Skip to content

Commit 582f2e7

Browse files
Show PetSets in oc status
Make a minor change to prepare for generic controller references (since PetSets and RCs could conflict over pods).
1 parent 96f7092 commit 582f2e7

File tree

18 files changed

+782
-51
lines changed

18 files changed

+782
-51
lines changed

pkg/api/graph/graphview/dc_pipeline.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -80,5 +80,5 @@ type SortedDeploymentConfigPipeline []DeploymentConfigPipeline
8080
func (m SortedDeploymentConfigPipeline) Len() int { return len(m) }
8181
func (m SortedDeploymentConfigPipeline) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
8282
func (m SortedDeploymentConfigPipeline) Less(i, j int) bool {
83-
return CompareObjectMeta(&m[i].Deployment.ObjectMeta, &m[j].Deployment.ObjectMeta)
83+
return CompareObjectMeta(&m[i].Deployment.DeploymentConfig.ObjectMeta, &m[j].Deployment.DeploymentConfig.ObjectMeta)
8484
}

pkg/api/graph/graphview/petset.go

+51
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
package graphview
2+
3+
import (
4+
osgraph "github.com/openshift/origin/pkg/api/graph"
5+
kubeedges "github.com/openshift/origin/pkg/api/kubegraph"
6+
kubegraph "github.com/openshift/origin/pkg/api/kubegraph/nodes"
7+
)
8+
9+
type PetSet struct {
10+
PetSet *kubegraph.PetSetNode
11+
12+
OwnedPods []*kubegraph.PodNode
13+
CreatedPods []*kubegraph.PodNode
14+
15+
// TODO: handle conflicting once controller refs are present, not worth it yet
16+
}
17+
18+
// AllPetSets returns all the PetSets that aren't in the excludes set and the set of covered NodeIDs
19+
func AllPetSets(g osgraph.Graph, excludeNodeIDs IntSet) ([]PetSet, IntSet) {
20+
covered := IntSet{}
21+
views := []PetSet{}
22+
23+
for _, uncastNode := range g.NodesByKind(kubegraph.PetSetNodeKind) {
24+
if excludeNodeIDs.Has(uncastNode.ID()) {
25+
continue
26+
}
27+
28+
view, covers := NewPetSet(g, uncastNode.(*kubegraph.PetSetNode))
29+
covered.Insert(covers.List()...)
30+
views = append(views, view)
31+
}
32+
33+
return views, covered
34+
}
35+
36+
// NewPetSet returns the PetSet and a set of all the NodeIDs covered by the PetSet
37+
func NewPetSet(g osgraph.Graph, node *kubegraph.PetSetNode) (PetSet, IntSet) {
38+
covered := IntSet{}
39+
covered.Insert(node.ID())
40+
41+
view := PetSet{}
42+
view.PetSet = node
43+
44+
for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(node, kubeedges.ManagedByControllerEdgeKind) {
45+
podNode := uncastPodNode.(*kubegraph.PodNode)
46+
covered.Insert(podNode.ID())
47+
view.OwnedPods = append(view.OwnedPods, podNode)
48+
}
49+
50+
return view, covered
51+
}

pkg/api/graph/graphview/rc.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -60,13 +60,13 @@ func NewReplicationController(g osgraph.Graph, rcNode *kubegraph.ReplicationCont
6060
rcView.RC = rcNode
6161
rcView.ConflictingRCIDToPods = map[int][]*kubegraph.PodNode{}
6262

63-
for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(rcNode, kubeedges.ManagedByRCEdgeKind) {
63+
for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(rcNode, kubeedges.ManagedByControllerEdgeKind) {
6464
podNode := uncastPodNode.(*kubegraph.PodNode)
6565
covered.Insert(podNode.ID())
6666
rcView.OwnedPods = append(rcView.OwnedPods, podNode)
6767

6868
// check to see if this pod is managed by more than one RC
69-
uncastOwningRCs := g.SuccessorNodesByEdgeKind(podNode, kubeedges.ManagedByRCEdgeKind)
69+
uncastOwningRCs := g.SuccessorNodesByEdgeKind(podNode, kubeedges.ManagedByControllerEdgeKind)
7070
if len(uncastOwningRCs) > 1 {
7171
for _, uncastOwningRC := range uncastOwningRCs {
7272
if uncastOwningRC.ID() == rcNode.ID() {

pkg/api/graph/graphview/service_group.go

+15-3
Original file line numberDiff line numberDiff line change
@@ -21,10 +21,13 @@ type ServiceGroup struct {
2121

2222
DeploymentConfigPipelines []DeploymentConfigPipeline
2323
ReplicationControllers []ReplicationController
24+
PetSets []PetSet
2425

25-
FulfillingDCs []*deploygraph.DeploymentConfigNode
26-
FulfillingRCs []*kubegraph.ReplicationControllerNode
27-
FulfillingPods []*kubegraph.PodNode
26+
// TODO: this has to stop
27+
FulfillingPetSets []*kubegraph.PetSetNode
28+
FulfillingDCs []*deploygraph.DeploymentConfigNode
29+
FulfillingRCs []*kubegraph.ReplicationControllerNode
30+
FulfillingPods []*kubegraph.PodNode
2831

2932
ExposingRoutes []*routegraph.RouteNode
3033
}
@@ -66,6 +69,8 @@ func NewServiceGroup(g osgraph.Graph, serviceNode *kubegraph.ServiceNode) (Servi
6669
service.FulfillingRCs = append(service.FulfillingRCs, castContainer)
6770
case *kubegraph.PodNode:
6871
service.FulfillingPods = append(service.FulfillingPods, castContainer)
72+
case *kubegraph.PetSetNode:
73+
service.FulfillingPetSets = append(service.FulfillingPetSets, castContainer)
6974
default:
7075
utilruntime.HandleError(fmt.Errorf("unrecognized container: %v", castContainer))
7176
}
@@ -97,6 +102,13 @@ func NewServiceGroup(g osgraph.Graph, serviceNode *kubegraph.ServiceNode) (Servi
97102
service.ReplicationControllers = append(service.ReplicationControllers, rcView)
98103
}
99104

105+
for _, fulfillingPetSet := range service.FulfillingPetSets {
106+
view, covers := NewPetSet(g, fulfillingPetSet)
107+
108+
covered.Insert(covers.List()...)
109+
service.PetSets = append(service.PetSets, view)
110+
}
111+
100112
for _, fulfillingPod := range service.FulfillingPods {
101113
_, podCovers := NewPod(g, fulfillingPod)
102114
covered.Insert(podCovers.List()...)

pkg/api/graph/graphview/veneering_test.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ func TestBareRCGroup(t *testing.T) {
7878

7979
kubeedges.AddAllExposedPodTemplateSpecEdges(g)
8080
kubeedges.AddAllExposedPodEdges(g)
81-
kubeedges.AddAllManagedByRCPodEdges(g)
81+
kubeedges.AddAllManagedByControllerPodEdges(g)
8282

8383
coveredNodes := IntSet{}
8484

@@ -399,7 +399,7 @@ func TestGraph(t *testing.T) {
399399
}
400400

401401
for _, bareDCPipeline := range bareDCPipelines {
402-
t.Logf("from %s", bareDCPipeline.Deployment.Name)
402+
t.Logf("from %s", bareDCPipeline.Deployment.DeploymentConfig.Name)
403403
for _, path := range bareDCPipeline.Images {
404404
t.Logf(" %v", path)
405405
}
@@ -413,7 +413,7 @@ func TestGraph(t *testing.T) {
413413
indent := " "
414414

415415
for _, deployment := range serviceGroup.DeploymentConfigPipelines {
416-
t.Logf("%sdeployment %s", indent, deployment.Deployment.Name)
416+
t.Logf("%sdeployment %s", indent, deployment.Deployment.DeploymentConfig.Name)
417417
for _, image := range deployment.Images {
418418
t.Logf("%s image %s", indent, image.Image.ImageSpec())
419419
if image.Build != nil {

pkg/api/kubegraph/analysis/rc.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -21,11 +21,11 @@ func FindDuelingReplicationControllers(g osgraph.Graph, f osgraph.Namer) []osgra
2121
for _, uncastRCNode := range g.NodesByKind(kubegraph.ReplicationControllerNodeKind) {
2222
rcNode := uncastRCNode.(*kubegraph.ReplicationControllerNode)
2323

24-
for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(rcNode, kubeedges.ManagedByRCEdgeKind) {
24+
for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(rcNode, kubeedges.ManagedByControllerEdgeKind) {
2525
podNode := uncastPodNode.(*kubegraph.PodNode)
2626

2727
// check to see if this pod is managed by more than one RC
28-
uncastOwningRCs := g.SuccessorNodesByEdgeKind(podNode, kubeedges.ManagedByRCEdgeKind)
28+
uncastOwningRCs := g.SuccessorNodesByEdgeKind(podNode, kubeedges.ManagedByControllerEdgeKind)
2929
if len(uncastOwningRCs) > 1 {
3030
involvedRCNames := []string{}
3131
relatedNodes := []graph.Node{uncastPodNode}

pkg/api/kubegraph/analysis/rc_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ func TestDuelingRC(t *testing.T) {
1414
t.Fatalf("unexpected error: %v", err)
1515
}
1616

17-
kubeedges.AddAllManagedByRCPodEdges(g)
17+
kubeedges.AddAllManagedByControllerPodEdges(g)
1818

1919
markers := FindDuelingReplicationControllers(g, osgraph.DefaultNamer)
2020
if e, a := 2, len(markers); e != a {

pkg/api/kubegraph/edge_test.go

+15-1
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@ import (
88

99
kapi "k8s.io/kubernetes/pkg/api"
1010
_ "k8s.io/kubernetes/pkg/api/install"
11+
"k8s.io/kubernetes/pkg/api/unversioned"
12+
kapps "k8s.io/kubernetes/pkg/apis/apps"
1113
"k8s.io/kubernetes/pkg/apis/autoscaling"
1214
"k8s.io/kubernetes/pkg/runtime"
1315

@@ -38,6 +40,14 @@ func TestNamespaceEdgeMatching(t *testing.T) {
3840
rc.Spec.Selector = map[string]string{"a": "1"}
3941
kubegraph.EnsureReplicationControllerNode(g, rc)
4042

43+
p := &kapps.PetSet{}
44+
p.Namespace = namespace
45+
p.Name = "the-petset"
46+
p.Spec.Selector = &unversioned.LabelSelector{
47+
MatchLabels: map[string]string{"a": "1"},
48+
}
49+
kubegraph.EnsurePetSetNode(g, p)
50+
4151
svc := &kapi.Service{}
4252
svc.Namespace = namespace
4353
svc.Name = "the-svc"
@@ -49,7 +59,7 @@ func TestNamespaceEdgeMatching(t *testing.T) {
4959
fn("other", g)
5060
AddAllExposedPodEdges(g)
5161
AddAllExposedPodTemplateSpecEdges(g)
52-
AddAllManagedByRCPodEdges(g)
62+
AddAllManagedByControllerPodEdges(g)
5363

5464
for _, edge := range g.Edges() {
5565
nsTo, err := namespaceFor(edge.To())
@@ -79,6 +89,10 @@ func namespaceFor(node graph.Node) (string, error) {
7989
return node.(*kubegraph.PodSpecNode).Namespace, nil
8090
case *kapi.ReplicationControllerSpec:
8191
return node.(*kubegraph.ReplicationControllerSpecNode).Namespace, nil
92+
case *kapps.PetSetSpec:
93+
return node.(*kubegraph.PetSetSpecNode).Namespace, nil
94+
case *kapi.PodTemplateSpec:
95+
return node.(*kubegraph.PodTemplateSpecNode).Namespace, nil
8296
default:
8397
return "", fmt.Errorf("unknown object: %#v", obj)
8498
}

pkg/api/kubegraph/edges.go

+17-12
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,8 @@ import (
2020
const (
2121
// ExposedThroughServiceEdgeKind goes from a PodTemplateSpec or a Pod to Service. The head should make the service's selector.
2222
ExposedThroughServiceEdgeKind = "ExposedThroughService"
23-
// ManagedByRCEdgeKind goes from Pod to ReplicationController when the Pod satisfies the ReplicationController's label selector
24-
ManagedByRCEdgeKind = "ManagedByRC"
23+
// ManagedByControllerEdgeKind goes from Pod to controller when the Pod satisfies a controller's label selector
24+
ManagedByControllerEdgeKind = "ManagedByController"
2525
// MountedSecretEdgeKind goes from PodSpec to Secret indicating that is or will be a request to mount a volume with the Secret.
2626
MountedSecretEdgeKind = "MountedSecret"
2727
// MountableSecretEdgeKind goes from ServiceAccount to Secret indicating that the SA allows the Secret to be mounted
@@ -91,31 +91,36 @@ func AddAllExposedPodEdges(g osgraph.MutableUniqueGraph) {
9191
}
9292
}
9393

94-
// AddManagedByRCPodEdges ensures that a directed edge exists between an RC and all the pods
94+
// AddManagedByControllerPodEdges ensures that a directed edge exists between a controller and all the pods
9595
// in the graph that match the label selector
96-
func AddManagedByRCPodEdges(g osgraph.MutableUniqueGraph, rcNode *kubegraph.ReplicationControllerNode) {
97-
if rcNode.Spec.Selector == nil {
96+
func AddManagedByControllerPodEdges(g osgraph.MutableUniqueGraph, to graph.Node, namespace string, selector map[string]string) {
97+
if selector == nil {
9898
return
9999
}
100-
query := labels.SelectorFromSet(rcNode.Spec.Selector)
100+
query := labels.SelectorFromSet(selector)
101101
for _, n := range g.(graph.Graph).Nodes() {
102102
switch target := n.(type) {
103103
case *kubegraph.PodNode:
104-
if target.Namespace != rcNode.Namespace {
104+
if target.Namespace != namespace {
105105
continue
106106
}
107107
if query.Matches(labels.Set(target.Labels)) {
108-
g.AddEdge(target, rcNode, ManagedByRCEdgeKind)
108+
g.AddEdge(target, to, ManagedByControllerEdgeKind)
109109
}
110110
}
111111
}
112112
}
113113

114-
// AddAllManagedByRCPodEdges calls AddManagedByRCPodEdges for every ServiceNode in the graph
115-
func AddAllManagedByRCPodEdges(g osgraph.MutableUniqueGraph) {
114+
// AddAllManagedByControllerPodEdges calls AddManagedByControllerPodEdges for every node in the graph
115+
// TODO: should do this through an interface (selects pods)
116+
func AddAllManagedByControllerPodEdges(g osgraph.MutableUniqueGraph) {
116117
for _, node := range g.(graph.Graph).Nodes() {
117-
if rcNode, ok := node.(*kubegraph.ReplicationControllerNode); ok {
118-
AddManagedByRCPodEdges(g, rcNode)
118+
switch cast := node.(type) {
119+
case *kubegraph.ReplicationControllerNode:
120+
AddManagedByControllerPodEdges(g, cast, cast.ReplicationController.Namespace, cast.ReplicationController.Spec.Selector)
121+
case *kubegraph.PetSetNode:
122+
// TODO: refactor to handle expanded selectors (along with ReplicaSets and Deployments)
123+
AddManagedByControllerPodEdges(g, cast, cast.PetSet.Namespace, cast.PetSet.Spec.Selector.MatchLabels)
119124
}
120125
}
121126
}

pkg/api/kubegraph/nodes/nodes.go

+31
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ import (
44
"github.com/gonum/graph"
55

66
kapi "k8s.io/kubernetes/pkg/api"
7+
kapps "k8s.io/kubernetes/pkg/apis/apps"
78
"k8s.io/kubernetes/pkg/apis/autoscaling"
89

910
osgraph "github.com/openshift/origin/pkg/api/graph"
@@ -154,3 +155,33 @@ func EnsureHorizontalPodAutoscalerNode(g osgraph.MutableUniqueGraph, hpa *autosc
154155
},
155156
).(*HorizontalPodAutoscalerNode)
156157
}
158+
159+
func EnsurePetSetNode(g osgraph.MutableUniqueGraph, petset *kapps.PetSet) *PetSetNode {
160+
nodeName := PetSetNodeName(petset)
161+
node := osgraph.EnsureUnique(g,
162+
nodeName,
163+
func(node osgraph.Node) graph.Node {
164+
return &PetSetNode{node, petset}
165+
},
166+
).(*PetSetNode)
167+
168+
specNode := EnsurePetSetSpecNode(g, &petset.Spec, petset.Namespace, nodeName)
169+
g.AddEdge(node, specNode, osgraph.ContainsEdgeKind)
170+
171+
return node
172+
}
173+
174+
func EnsurePetSetSpecNode(g osgraph.MutableUniqueGraph, spec *kapps.PetSetSpec, namespace string, ownerName osgraph.UniqueName) *PetSetSpecNode {
175+
specName := PetSetSpecNodeName(spec, ownerName)
176+
specNode := osgraph.EnsureUnique(g,
177+
specName,
178+
func(node osgraph.Node) graph.Node {
179+
return &PetSetSpecNode{node, spec, namespace, ownerName}
180+
},
181+
).(*PetSetSpecNode)
182+
183+
ptSpecNode := EnsurePodTemplateSpecNode(g, &spec.Template, namespace, specName)
184+
g.AddEdge(specNode, ptSpecNode, osgraph.ContainsEdgeKind)
185+
186+
return specNode
187+
}

0 commit comments

Comments
 (0)