Skip to content

Commit 6858e4f

Browse files
author
OpenShift Bot
authored
Merge pull request #14033 from deads2k/auth-13-client-builder
Merged by openshift-bot
2 parents 2d26eae + ba8eba7 commit 6858e4f

File tree

10 files changed

+207
-153
lines changed

10 files changed

+207
-153
lines changed
+37
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
package bootstrappolicy
2+
3+
import (
4+
"github.com/golang/glog"
5+
6+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
7+
8+
authorizationapi "github.com/openshift/origin/pkg/authorization/api"
9+
)
10+
11+
var (
12+
deadClusterRoles = []authorizationapi.ClusterRole{}
13+
)
14+
15+
func addDeadClusterRole(name string) {
16+
for _, existingRole := range deadClusterRoles {
17+
if name == existingRole.Name {
18+
glog.Fatalf("role %q was already registered", name)
19+
}
20+
}
21+
22+
deadClusterRoles = append(deadClusterRoles,
23+
authorizationapi.ClusterRole{
24+
ObjectMeta: metav1.ObjectMeta{Name: name},
25+
},
26+
)
27+
}
28+
29+
// GetDeadClusterRoles returns cluster roles which should no longer have any permissions.
30+
// These are enumerated so that a reconcile that tightens permissions will properly.
31+
func GetDeadClusterRoles() []authorizationapi.ClusterRole {
32+
return deadClusterRoles
33+
}
34+
35+
func init() {
36+
addDeadClusterRole("system:replication-controller")
37+
}

pkg/cmd/server/bootstrappolicy/infra_sa_policy.go

-48
Original file line numberDiff line numberDiff line change
@@ -26,9 +26,6 @@ const (
2626
InfraBuildControllerServiceAccountName = "build-controller"
2727
BuildControllerRoleName = "system:build-controller"
2828

29-
InfraReplicationControllerServiceAccountName = "replication-controller"
30-
ReplicationControllerRoleName = "system:replication-controller"
31-
3229
InfraReplicaSetControllerServiceAccountName = "replicaset-controller"
3330
ReplicaSetControllerRoleName = "system:replicaset-controller"
3431

@@ -290,51 +287,6 @@ func init() {
290287
panic(err)
291288
}
292289

293-
err = InfraSAs.addServiceAccount(
294-
InfraReplicationControllerServiceAccountName,
295-
authorizationapi.ClusterRole{
296-
ObjectMeta: metav1.ObjectMeta{
297-
Name: ReplicationControllerRoleName,
298-
},
299-
Rules: []authorizationapi.PolicyRule{
300-
// ReplicationManager.rcController.ListWatch
301-
{
302-
Verbs: sets.NewString("list", "watch"),
303-
Resources: sets.NewString("replicationcontrollers"),
304-
},
305-
// ReplicationManager.syncReplicationController() -> updateReplicaCount()
306-
{
307-
// TODO: audit/remove those, 1.0 controllers needed get, update
308-
Verbs: sets.NewString("get", "update"),
309-
Resources: sets.NewString("replicationcontrollers"),
310-
},
311-
// ReplicationManager.syncReplicationController() -> updateReplicaCount()
312-
{
313-
Verbs: sets.NewString("update"),
314-
Resources: sets.NewString("replicationcontrollers/status"),
315-
},
316-
// ReplicationManager.podController.ListWatch
317-
{
318-
Verbs: sets.NewString("list", "watch"),
319-
Resources: sets.NewString("pods"),
320-
},
321-
// ReplicationManager.podControl (RealPodControl)
322-
{
323-
Verbs: sets.NewString("create", "delete", "patch"),
324-
Resources: sets.NewString("pods"),
325-
},
326-
// ReplicationManager.podControl.recorder
327-
{
328-
Verbs: sets.NewString("create", "update", "patch"),
329-
Resources: sets.NewString("events"),
330-
},
331-
},
332-
},
333-
)
334-
if err != nil {
335-
panic(err)
336-
}
337-
338290
err = InfraSAs.addServiceAccount(
339291
InfraReplicaSetControllerServiceAccountName,
340292
authorizationapi.ClusterRole{

pkg/cmd/server/bootstrappolicy/policy.go

+3
Original file line numberDiff line numberDiff line change
@@ -941,6 +941,9 @@ func GetOpenshiftBootstrapClusterRoles() []authorizationapi.ClusterRole {
941941

942942
func GetBootstrapClusterRoles() []authorizationapi.ClusterRole {
943943
openshiftClusterRoles := GetOpenshiftBootstrapClusterRoles()
944+
// dead cluster roles need to be checked for conflicts (in case something new comes up)
945+
// so add them to this list.
946+
openshiftClusterRoles = append(openshiftClusterRoles, GetDeadClusterRoles()...)
944947
openshiftSAClusterRoles := InfraSAs.AllRoles()
945948
kubeClusterRoles, err := GetKubeBootstrapClusterRoles()
946949
// coder error

pkg/cmd/server/kubernetes/master/master.go

-12
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,6 @@ import (
4343
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
4444
gccontroller "k8s.io/kubernetes/pkg/controller/podgc"
4545
replicasetcontroller "k8s.io/kubernetes/pkg/controller/replicaset"
46-
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
4746
servicecontroller "k8s.io/kubernetes/pkg/controller/service"
4847
statefulsetcontroller "k8s.io/kubernetes/pkg/controller/statefulset"
4948
attachdetachcontroller "k8s.io/kubernetes/pkg/controller/volume/attachdetach"
@@ -207,17 +206,6 @@ func (c *MasterConfig) RunReplicaSetController(client kclientset.Interface) {
207206
go controller.Run(int(c.ControllerManager.ConcurrentRSSyncs), utilwait.NeverStop)
208207
}
209208

210-
// RunReplicationController starts the Kubernetes replication controller sync loop
211-
func (c *MasterConfig) RunReplicationController(client kclientset.Interface) {
212-
controllerManager := replicationcontroller.NewReplicationManager(
213-
c.Informers.KubernetesInformers().Core().V1().Pods(),
214-
c.Informers.KubernetesInformers().Core().V1().ReplicationControllers(),
215-
client,
216-
replicationcontroller.BurstReplicas,
217-
)
218-
go controllerManager.Run(int(c.ControllerManager.ConcurrentRCSyncs), utilwait.NeverStop)
219-
}
220-
221209
func (c *MasterConfig) RunDeploymentController(client kclientset.Interface) {
222210
controller := deployment.NewDeploymentController(
223211
c.Informers.KubernetesInformers().Extensions().V1beta1().Deployments(),

pkg/cmd/server/start/start_master.go

+74-5
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,11 @@ import (
1717

1818
kerrors "k8s.io/apimachinery/pkg/api/errors"
1919
"k8s.io/apimachinery/pkg/runtime/schema"
20+
"k8s.io/apimachinery/pkg/util/sets"
2021
utilwait "k8s.io/apimachinery/pkg/util/wait"
2122
"k8s.io/client-go/dynamic"
23+
restclient "k8s.io/client-go/rest"
24+
kctrlmgr "k8s.io/kubernetes/cmd/kube-controller-manager/app"
2225
cmapp "k8s.io/kubernetes/cmd/kube-controller-manager/app/options"
2326
kapi "k8s.io/kubernetes/pkg/api"
2427
"k8s.io/kubernetes/pkg/apis/apps"
@@ -27,6 +30,7 @@ import (
2730
"k8s.io/kubernetes/pkg/apis/extensions"
2831
"k8s.io/kubernetes/pkg/apis/policy"
2932
"k8s.io/kubernetes/pkg/capabilities"
33+
"k8s.io/kubernetes/pkg/controller"
3034
kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
3135
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
3236

@@ -602,10 +606,6 @@ func startControllers(oc *origin.MasterConfig, kc *kubernetes.MasterConfig) erro
602606
oc.RunSecurityAllocationController()
603607

604608
if kc != nil {
605-
_, _, _, rcClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraReplicationControllerServiceAccountName)
606-
if err != nil {
607-
glog.Fatalf("Could not get client for replication controller: %v", err)
608-
}
609609
_, _, _, rsClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraReplicaSetControllerServiceAccountName)
610610
if err != nil {
611611
glog.Fatalf("Could not get client for replication controller: %v", err)
@@ -687,10 +687,79 @@ func startControllers(oc *origin.MasterConfig, kc *kubernetes.MasterConfig) erro
687687
glog.Fatalf("Could not get client for garbage collector controller: %v", err)
688688
}
689689

690+
rootClientBuilder := controller.SimpleControllerClientBuilder{
691+
ClientConfig: &oc.PrivilegedLoopbackClientConfig,
692+
}
693+
saClientBuilder := controller.SAControllerClientBuilder{
694+
ClientConfig: restclient.AnonymousClientConfig(&oc.PrivilegedLoopbackClientConfig),
695+
CoreClient: oc.PrivilegedLoopbackKubernetesClientsetExternal.Core(),
696+
AuthenticationClient: oc.PrivilegedLoopbackKubernetesClientsetExternal.Authentication(),
697+
Namespace: "kube-system",
698+
}
699+
availableResources, err := kctrlmgr.GetAvailableResources(rootClientBuilder)
700+
if err != nil {
701+
return err
702+
}
703+
704+
controllerContext := kctrlmgr.ControllerContext{
705+
ClientBuilder: saClientBuilder,
706+
InformerFactory: oc.Informers.KubernetesInformers(),
707+
Options: *controllerManagerOptions,
708+
AvailableResources: availableResources,
709+
Stop: utilwait.NeverStop,
710+
}
711+
controllerInitializers := kctrlmgr.NewControllerInitializers()
712+
713+
// TODO remove this. Using it now to control the migration
714+
allowedControllers := sets.NewString(
715+
// "endpoint",
716+
"replicationcontroller",
717+
// "podgc",
718+
// "resourcequota",
719+
// "namespace",
720+
// "serviceaccount",
721+
// "garbagecollector",
722+
// "daemonset",
723+
// "job",
724+
// "deployment",
725+
// "replicaset",
726+
// "horizontalpodautoscaling",
727+
// "disruption",
728+
// "statefuleset",
729+
// "cronjob",
730+
// "certificatesigningrequests",
731+
// "ttl",
732+
// "bootstrapsigner",
733+
// "tokencleaner",
734+
)
735+
736+
for controllerName, initFn := range controllerInitializers {
737+
// TODO remove this. Only call one to start to prove the principle
738+
if !allowedControllers.Has(controllerName) {
739+
glog.Warningf("%q is skipped", controllerName)
740+
continue
741+
}
742+
if !controllerContext.IsControllerEnabled(controllerName) {
743+
glog.Warningf("%q is disabled", controllerName)
744+
continue
745+
}
746+
747+
glog.V(1).Infof("Starting %q", controllerName)
748+
started, err := initFn(controllerContext)
749+
if err != nil {
750+
glog.Errorf("Error starting %q", controllerName)
751+
return err
752+
}
753+
if !started {
754+
glog.Warningf("Skipping %q", controllerName)
755+
continue
756+
}
757+
glog.Infof("Started %q", controllerName)
758+
}
759+
690760
// no special order
691761
kc.RunNodeController()
692762
kc.RunScheduler()
693-
kc.RunReplicationController(rcClient)
694763
kc.RunReplicaSetController(rsClient)
695764
kc.RunDeploymentController(deploymentClient)
696765
kc.RunGarbageCollectorController(garbageCollectorControllerClient, garbageCollectorControllerConfig)

test/cmd/quota.sh

+4-1
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,10 @@ os::cmd::expect_success 'oc new-project asmail [email protected]'
2929
os::cmd::try_until_text 'oc get appliedclusterresourcequota -n bar --as deads -o name' "for-deads-by-annotation"
3030
os::cmd::try_until_text 'oc get appliedclusterresourcequota -n foo --as deads -o name' "for-deads-by-annotation"
3131
os::cmd::try_until_text 'oc get appliedclusterresourcequota -n asmail --as [email protected] -o name' "for-deads-email-by-annotation"
32-
os::cmd::try_until_text 'oc describe appliedclusterresourcequota/for-deads-by-annotation -n bar --as deads' "secrets.*1[0-9]"
32+
# the point of the test is to make sure that clusterquota is counting correct and secrets are auto-created and countable
33+
# the create_dockercfg controller can issue multiple creates if the token controller doesn't fill them in, but the creates are duplicates
34+
# since an annotation tracks the intended secrets to be created. That results in multi-counting quota until reconciliation runs
35+
os::cmd::try_until_text 'oc describe appliedclusterresourcequota/for-deads-by-annotation -n bar --as deads' "secrets.*(1[0-9]|20|21|22)"
3336
os::cmd::expect_success 'oc delete project foo'
3437
os::cmd::try_until_not_text 'oc get clusterresourcequota/for-deads-by-annotation -o jsonpath="{.status.namespaces[*].namespace}"' 'foo'
3538
os::cmd::expect_success 'oc delete project bar'

test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml

+6-57
Original file line numberDiff line numberDiff line change
@@ -2781,6 +2781,12 @@ items:
27812781
- get
27822782
- put
27832783
- update
2784+
- apiVersion: v1
2785+
kind: ClusterRole
2786+
metadata:
2787+
creationTimestamp: null
2788+
name: system:replication-controller
2789+
rules: []
27842790
- apiVersion: v1
27852791
kind: ClusterRole
27862792
metadata:
@@ -3725,63 +3731,6 @@ items:
37253731
- create
37263732
- patch
37273733
- update
3728-
- apiVersion: v1
3729-
kind: ClusterRole
3730-
metadata:
3731-
annotations:
3732-
authorization.openshift.io/system-only: "true"
3733-
creationTimestamp: null
3734-
name: system:replication-controller
3735-
rules:
3736-
- apiGroups:
3737-
- ""
3738-
attributeRestrictions: null
3739-
resources:
3740-
- replicationcontrollers
3741-
verbs:
3742-
- list
3743-
- watch
3744-
- apiGroups:
3745-
- ""
3746-
attributeRestrictions: null
3747-
resources:
3748-
- replicationcontrollers
3749-
verbs:
3750-
- get
3751-
- update
3752-
- apiGroups:
3753-
- ""
3754-
attributeRestrictions: null
3755-
resources:
3756-
- replicationcontrollers/status
3757-
verbs:
3758-
- update
3759-
- apiGroups:
3760-
- ""
3761-
attributeRestrictions: null
3762-
resources:
3763-
- pods
3764-
verbs:
3765-
- list
3766-
- watch
3767-
- apiGroups:
3768-
- ""
3769-
attributeRestrictions: null
3770-
resources:
3771-
- pods
3772-
verbs:
3773-
- create
3774-
- delete
3775-
- patch
3776-
- apiGroups:
3777-
- ""
3778-
attributeRestrictions: null
3779-
resources:
3780-
- events
3781-
verbs:
3782-
- create
3783-
- patch
3784-
- update
37853734
- apiVersion: v1
37863735
kind: ClusterRole
37873736
metadata:

vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/controllermanager.go

+5-5
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)