From 210a137044ee9fceb7a7527a3a690e3f9ad1cb42 Mon Sep 17 00:00:00 2001 From: Steve Kuznetsov Date: Thu, 3 Aug 2023 08:30:31 -0600 Subject: [PATCH 1/2] operators/catalog: don't watch copied CSVs As far as I can tell, we never do anything with them. Signed-off-by: Steve Kuznetsov --- pkg/controller/operators/catalog/operator.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/controller/operators/catalog/operator.go b/pkg/controller/operators/catalog/operator.go index 0d24b363d3..0daeddf4b5 100644 --- a/pkg/controller/operators/catalog/operator.go +++ b/pkg/controller/operators/catalog/operator.go @@ -204,7 +204,9 @@ func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clo // Fields are pruned from local copies of the objects managed // by this informer in order to reduce cached size. prunedCSVInformer := cache.NewSharedIndexInformer( - pruning.NewListerWatcher(op.client, metav1.NamespaceAll, func(*metav1.ListOptions) {}, pruning.PrunerFunc(func(csv *v1alpha1.ClusterServiceVersion) { + pruning.NewListerWatcher(op.client, metav1.NamespaceAll, func(options *metav1.ListOptions) { + options.LabelSelector = fmt.Sprintf("!%s", v1alpha1.CopiedLabelKey) + }, pruning.PrunerFunc(func(csv *v1alpha1.ClusterServiceVersion) { *csv = v1alpha1.ClusterServiceVersion{ TypeMeta: csv.TypeMeta, ObjectMeta: metav1.ObjectMeta{ From fc29ea19438415a05fed612b2b1b2aa73e0a4cfa Mon Sep 17 00:00:00 2001 From: Steve Kuznetsov Date: Thu, 3 Aug 2023 08:35:08 -0600 Subject: [PATCH 2/2] operators/olm: use a partial object metadata watch for copied CSVs All we ever ned to know about copied CSVs is their metadata. No need to prune objects in memory, it's better to never allocate the memory to deserilize them in the first place. Signed-off-by: Steve Kuznetsov --- cmd/olm/main.go | 6 + pkg/controller/operators/catalog/operator.go | 48 ++- pkg/controller/operators/olm/config.go | 8 + pkg/controller/operators/olm/operator.go | 68 +-- pkg/controller/operators/olm/operator_test.go | 330 +++++++++----- pkg/controller/operators/olm/operatorgroup.go | 31 +- .../operators/olm/operatorgroup_test.go | 53 +-- .../k8s.io/client-go/metadata/fake/simple.go | 405 ++++++++++++++++++ .../metadata/metadatainformer/informer.go | 183 ++++++++ .../metadata/metadatainformer/interface.go | 53 +++ .../metadata/metadatalister/interface.go | 40 ++ .../metadata/metadatalister/lister.go | 91 ++++ .../client-go/metadata/metadatalister/shim.go | 87 ++++ vendor/modules.txt | 3 + 14 files changed, 1190 insertions(+), 216 deletions(-) create mode 100644 vendor/k8s.io/client-go/metadata/fake/simple.go create mode 100644 vendor/k8s.io/client-go/metadata/metadatainformer/informer.go create mode 100644 vendor/k8s.io/client-go/metadata/metadatainformer/interface.go create mode 100644 vendor/k8s.io/client-go/metadata/metadatalister/interface.go create mode 100644 vendor/k8s.io/client-go/metadata/metadatalister/lister.go create mode 100644 vendor/k8s.io/client-go/metadata/metadatalister/shim.go diff --git a/cmd/olm/main.go b/cmd/olm/main.go index fd063ffe56..c0b6868f2b 100644 --- a/cmd/olm/main.go +++ b/cmd/olm/main.go @@ -14,6 +14,7 @@ import ( "github.com/sirupsen/logrus" "github.com/spf13/pflag" corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/metadata" "k8s.io/klog" ctrl "sigs.k8s.io/controller-runtime" @@ -154,6 +155,10 @@ func main() { if err != nil { logger.WithError(err).Fatal("error configuring custom resource client") } + metadataClient, err := metadata.NewForConfig(config) + if err != nil { + logger.WithError(err).Fatal("error configuring metadata client") + } // Create a new instance of the operator. op, err := olm.NewOperator( @@ -162,6 +167,7 @@ func main() { olm.WithWatchedNamespaces(namespaces...), olm.WithResyncPeriod(queueinformer.ResyncWithJitter(*wakeupInterval, 0.2)), olm.WithExternalClient(crClient), + olm.WithMetadataClient(metadataClient), olm.WithOperatorClient(opClient), olm.WithRestConfig(config), olm.WithConfigClient(versionedConfigClient), diff --git a/pkg/controller/operators/catalog/operator.go b/pkg/controller/operators/catalog/operator.go index 0daeddf4b5..aa7eb9668b 100644 --- a/pkg/controller/operators/catalog/operator.go +++ b/pkg/controller/operators/catalog/operator.go @@ -204,29 +204,31 @@ func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clo // Fields are pruned from local copies of the objects managed // by this informer in order to reduce cached size. prunedCSVInformer := cache.NewSharedIndexInformer( - pruning.NewListerWatcher(op.client, metav1.NamespaceAll, func(options *metav1.ListOptions) { - options.LabelSelector = fmt.Sprintf("!%s", v1alpha1.CopiedLabelKey) - }, pruning.PrunerFunc(func(csv *v1alpha1.ClusterServiceVersion) { - *csv = v1alpha1.ClusterServiceVersion{ - TypeMeta: csv.TypeMeta, - ObjectMeta: metav1.ObjectMeta{ - Name: csv.Name, - Namespace: csv.Namespace, - Labels: csv.Labels, - Annotations: csv.Annotations, - }, - Spec: v1alpha1.ClusterServiceVersionSpec{ - CustomResourceDefinitions: csv.Spec.CustomResourceDefinitions, - APIServiceDefinitions: csv.Spec.APIServiceDefinitions, - Replaces: csv.Spec.Replaces, - Version: csv.Spec.Version, - }, - Status: v1alpha1.ClusterServiceVersionStatus{ - Phase: csv.Status.Phase, - Reason: csv.Status.Reason, - }, - } - })), + pruning.NewListerWatcher(op.client, metav1.NamespaceAll, + func(options *metav1.ListOptions) { + options.LabelSelector = fmt.Sprintf("!%s", v1alpha1.CopiedLabelKey) + }, + pruning.PrunerFunc(func(csv *v1alpha1.ClusterServiceVersion) { + *csv = v1alpha1.ClusterServiceVersion{ + TypeMeta: csv.TypeMeta, + ObjectMeta: metav1.ObjectMeta{ + Name: csv.Name, + Namespace: csv.Namespace, + Labels: csv.Labels, + Annotations: csv.Annotations, + }, + Spec: v1alpha1.ClusterServiceVersionSpec{ + CustomResourceDefinitions: csv.Spec.CustomResourceDefinitions, + APIServiceDefinitions: csv.Spec.APIServiceDefinitions, + Replaces: csv.Spec.Replaces, + Version: csv.Spec.Version, + }, + Status: v1alpha1.ClusterServiceVersionStatus{ + Phase: csv.Status.Phase, + Reason: csv.Status.Reason, + }, + } + })), &v1alpha1.ClusterServiceVersion{}, resyncPeriod(), cache.Indexers{ diff --git a/pkg/controller/operators/olm/config.go b/pkg/controller/operators/olm/config.go index edb3df7175..fa472c7130 100644 --- a/pkg/controller/operators/olm/config.go +++ b/pkg/controller/operators/olm/config.go @@ -5,6 +5,7 @@ import ( "time" "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/queueinformer" + "k8s.io/client-go/metadata" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -29,6 +30,7 @@ type operatorConfig struct { clock utilclock.Clock logger *logrus.Logger operatorClient operatorclient.ClientInterface + metadataClient metadata.Interface externalClient versioned.Interface strategyResolver install.StrategyResolverInterface apiReconciler APIIntersectionReconciler @@ -159,6 +161,12 @@ func WithOperatorClient(operatorClient operatorclient.ClientInterface) OperatorO } } +func WithMetadataClient(metadataClient metadata.Interface) OperatorOption { + return func(config *operatorConfig) { + config.metadataClient = metadataClient + } +} + func WithExternalClient(externalClient versioned.Interface) OperatorOption { return func(config *operatorConfig) { config.externalClient = externalClient diff --git a/pkg/controller/operators/olm/operator.go b/pkg/controller/operators/olm/operator.go index efd5e33b37..16dde0f8bc 100644 --- a/pkg/controller/operators/olm/operator.go +++ b/pkg/controller/operators/olm/operator.go @@ -24,6 +24,8 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/informers" k8sscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/metadata/metadatainformer" + "k8s.io/client-go/metadata/metadatalister" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" @@ -35,12 +37,10 @@ import ( "github.com/operator-framework/api/pkg/operators/v1alpha1" "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned" "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/informers/externalversions" - operatorsv1alpha1listers "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/listers/operators/v1alpha1" "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/certs" "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/install" - "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/internal/pruning" "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/olm/overrides" - resolver "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver" "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/clients" csvutility "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/csv" "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/event" @@ -75,7 +75,7 @@ type Operator struct { client versioned.Interface lister operatorlister.OperatorLister protectedCopiedCSVNamespaces map[string]struct{} - copiedCSVLister operatorsv1alpha1listers.ClusterServiceVersionLister + copiedCSVLister metadatalister.Lister ogQueueSet *queueinformer.ResourceQueueSet csvQueueSet *queueinformer.ResourceQueueSet olmConfigQueue workqueue.RateLimitingInterface @@ -127,6 +127,9 @@ func newOperatorWithConfig(ctx context.Context, config *operatorConfig) (*Operat if err := k8sscheme.AddToScheme(scheme); err != nil { return nil, err } + if err := metav1.AddMetaToScheme(scheme); err != nil { + return nil, err + } op := &Operator{ Operator: queueOperator, @@ -208,44 +211,20 @@ func newOperatorWithConfig(ctx context.Context, config *operatorConfig) (*Operat return nil, err } - // A separate informer solely for CSV copies. Fields - // are pruned from local copies of the objects managed + // A separate informer solely for CSV copies. Object metadata requests are used // by this informer in order to reduce cached size. - copiedCSVInformer := cache.NewSharedIndexInformer( - pruning.NewListerWatcher( - op.client, - namespace, - func(opts *metav1.ListOptions) { - opts.LabelSelector = v1alpha1.CopiedLabelKey - }, - pruning.PrunerFunc(func(csv *v1alpha1.ClusterServiceVersion) { - nonstatus, status := copyableCSVHash(csv) - *csv = v1alpha1.ClusterServiceVersion{ - TypeMeta: csv.TypeMeta, - ObjectMeta: csv.ObjectMeta, - Status: v1alpha1.ClusterServiceVersionStatus{ - Phase: csv.Status.Phase, - Reason: csv.Status.Reason, - }, - } - if csv.Annotations == nil { - csv.Annotations = make(map[string]string, 2) - } - // These annotation keys are - // intentionally invalid -- all writes - // to copied CSVs are regenerated from - // the corresponding non-copied CSV, - // so it should never be transmitted - // back to the API server. - csv.Annotations["$copyhash-nonstatus"] = nonstatus - csv.Annotations["$copyhash-status"] = status - }), - ), - &v1alpha1.ClusterServiceVersion{}, + gvr := v1alpha1.SchemeGroupVersion.WithResource("clusterserviceversions") + copiedCSVInformer := metadatainformer.NewFilteredMetadataInformer( + config.metadataClient, + gvr, + namespace, config.resyncPeriod(), cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, - ) - op.copiedCSVLister = operatorsv1alpha1listers.NewClusterServiceVersionLister(copiedCSVInformer.GetIndexer()) + func(options *metav1.ListOptions) { + options.LabelSelector = v1alpha1.CopiedLabelKey + }, + ).Informer() + op.copiedCSVLister = metadatalister.New(copiedCSVInformer.GetIndexer(), gvr) // Register separate queue for gcing copied csvs copiedCSVGCQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("%s/csv-gc", namespace)) @@ -1195,17 +1174,16 @@ func (a *Operator) handleClusterServiceVersionDeletion(obj interface{}) { } } -func (a *Operator) removeDanglingChildCSVs(csv *v1alpha1.ClusterServiceVersion) error { +func (a *Operator) removeDanglingChildCSVs(csv *metav1.PartialObjectMetadata) error { logger := a.logger.WithFields(logrus.Fields{ "id": queueinformer.NewLoopID(), "csv": csv.GetName(), "namespace": csv.GetNamespace(), - "phase": csv.Status.Phase, "labels": csv.GetLabels(), "annotations": csv.GetAnnotations(), }) - if !csv.IsCopied() { + if !v1alpha1.IsCopied(csv) { logger.Warning("removeDanglingChild called on a parent. this is a no-op but should be avoided.") return nil } @@ -1244,7 +1222,7 @@ func (a *Operator) removeDanglingChildCSVs(csv *v1alpha1.ClusterServiceVersion) return nil } -func (a *Operator) deleteChild(csv *v1alpha1.ClusterServiceVersion, logger *logrus.Entry) error { +func (a *Operator) deleteChild(csv *metav1.PartialObjectMetadata, logger *logrus.Entry) error { logger.Debug("gcing csv") return a.client.OperatorsV1alpha1().ClusterServiceVersions(csv.GetNamespace()).Delete(context.TODO(), csv.GetName(), metav1.DeleteOptions{}) } @@ -1683,12 +1661,12 @@ func (a *Operator) createCSVCopyingDisabledEvent(csv *v1alpha1.ClusterServiceVer } func (a *Operator) syncGcCsv(obj interface{}) (syncError error) { - clusterServiceVersion, ok := obj.(*v1alpha1.ClusterServiceVersion) + clusterServiceVersion, ok := obj.(*metav1.PartialObjectMetadata) if !ok { a.logger.Debugf("wrong type: %#v", obj) return fmt.Errorf("casting ClusterServiceVersion failed") } - if clusterServiceVersion.IsCopied() { + if v1alpha1.IsCopied(clusterServiceVersion) { syncError = a.removeDanglingChildCSVs(clusterServiceVersion) return } diff --git a/pkg/controller/operators/olm/operator_test.go b/pkg/controller/operators/olm/operator_test.go index 5cac02b6cf..86e331c2aa 100644 --- a/pkg/controller/operators/olm/operator_test.go +++ b/pkg/controller/operators/olm/operator_test.go @@ -29,7 +29,7 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsfake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" apierrors "k8s.io/apimachinery/pkg/api/errors" - meta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -39,6 +39,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" k8sfake "k8s.io/client-go/kubernetes/fake" k8sscheme "k8s.io/client-go/kubernetes/scheme" + metadatafake "k8s.io/client-go/metadata/fake" "k8s.io/client-go/pkg/version" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" @@ -167,6 +168,7 @@ type fakeOperatorConfig struct { k8sObjs []runtime.Object extObjs []runtime.Object regObjs []runtime.Object + partialMetadata []runtime.Object actionLog *[]clienttesting.Action } @@ -231,6 +233,12 @@ func withRegObjs(regObjs ...runtime.Object) fakeOperatorOption { } } +func withPartialMetadata(objects ...runtime.Object) fakeOperatorOption { + return func(config *fakeOperatorConfig) { + config.partialMetadata = objects + } +} + func withActionLog(log *[]clienttesting.Action) fakeOperatorOption { return func(config *fakeOperatorConfig) { config.actionLog = log @@ -245,6 +253,7 @@ func withLogger(logger *logrus.Logger) fakeOperatorOption { // NewFakeOperator creates and starts a new operator using fake clients. func NewFakeOperator(ctx context.Context, options ...fakeOperatorOption) (*Operator, error) { + logrus.SetLevel(logrus.DebugLevel) // Apply options to default config config := &fakeOperatorConfig{ operatorConfig: &operatorConfig{ @@ -267,8 +276,20 @@ func NewFakeOperator(ctx context.Context, options ...fakeOperatorOption) (*Opera option(config) } + scheme := runtime.NewScheme() + if err := k8sscheme.AddToScheme(scheme); err != nil { + return nil, err + } + if err := metav1.AddMetaToScheme(scheme); err != nil { + return nil, err + } + if err := fake.AddToScheme(scheme); err != nil { + return nil, err + } + // Create client fakes - config.externalClient = fake.NewReactionForwardingClientsetDecorator(config.clientObjs, config.fakeClientOptions...) + externalFake := fake.NewReactionForwardingClientsetDecorator(config.clientObjs, config.fakeClientOptions...) + config.externalClient = externalFake // TODO: Using the ReactionForwardingClientsetDecorator for k8s objects causes issues with adding Resources for discovery. // For now, directly use a SimpleClientset instead. k8sClientFake := k8sfake.NewSimpleClientset(config.k8sObjs...) @@ -279,6 +300,27 @@ func NewFakeOperator(ctx context.Context, options ...fakeOperatorOption) (*Opera })) config.operatorClient = operatorclient.NewClient(k8sClientFake, apiextensionsfake.NewSimpleClientset(config.extObjs...), apiregistrationfake.NewSimpleClientset(config.regObjs...)) config.configClient = configfake.NewSimpleClientset() + metadataFake := metadatafake.NewSimpleMetadataClient(scheme, config.partialMetadata...) + config.metadataClient = metadataFake + // It's a travesty that we need to do this, but the fakes leave us no other option. In the API server, of course + // changes to objects are transparently exposed in the metadata client. In fake-land, we need to enforce that ourselves. + externalFake.PrependReactor("*", "*", func(action clienttesting.Action) (bool, runtime.Object, error) { + var err error + switch action.GetVerb() { + case "create": + a := action.(clienttesting.CreateAction) + m := a.GetObject().(metav1.ObjectMetaAccessor).GetObjectMeta().(*metav1.ObjectMeta) + _, err = metadataFake.Resource(action.GetResource()).Namespace(action.GetNamespace()).(metadatafake.MetadataClient).CreateFake(&metav1.PartialObjectMetadata{ObjectMeta: *m}, metav1.CreateOptions{}) + case "update": + a := action.(clienttesting.UpdateAction) + m := a.GetObject().(metav1.ObjectMetaAccessor).GetObjectMeta().(*metav1.ObjectMeta) + _, err = metadataFake.Resource(action.GetResource()).Namespace(action.GetNamespace()).(metadatafake.MetadataClient).UpdateFake(&metav1.PartialObjectMetadata{ObjectMeta: *m}, metav1.UpdateOptions{}) + case "delete": + a := action.(clienttesting.DeleteAction) + err = metadataFake.Resource(action.GetResource()).Delete(context.TODO(), a.GetName(), metav1.DeleteOptions{}) + } + return false, nil, err + }) for _, ns := range config.namespaces { _, err := config.operatorClient.KubernetesInterface().CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{}) @@ -294,11 +336,6 @@ func NewFakeOperator(ctx context.Context, options ...fakeOperatorOption) (*Opera } op.recorder = config.recorder - scheme := runtime.NewScheme() - if err := k8sscheme.AddToScheme(scheme); err != nil { - return nil, err - } - op.csvSetGenerator = csvutility.NewSetGenerator(config.logger, op.lister) op.csvReplaceFinder = csvutility.NewReplaceFinder(config.logger, config.externalClient) op.serviceAccountSyncer = scoped.NewUserDefinedServiceAccountSyncer(config.logger, scheme, config.operatorClient, op.client) @@ -930,7 +967,7 @@ func TestTransitionCSV(t *testing.T) { apiLabeler labeler.Labeler } type initial struct { - csvs []runtime.Object + csvs []*v1alpha1.ClusterServiceVersion clientObjs []runtime.Object crds []runtime.Object objs []runtime.Object @@ -950,7 +987,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVNoneToPending/CRD", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -972,7 +1009,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVNoneToPending/APIService/Required", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withAPIServices(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -994,7 +1031,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVPendingToFailed/BadStrategyPermissions", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithUID(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -1046,7 +1083,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVPendingToPending/CRD", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -1072,7 +1109,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVPendingToPending/APIService/Required/Missing", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withAPIServices(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -1097,7 +1134,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVPendingToPending/APIService/Required/Unavailable", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withAPIServices(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -1123,7 +1160,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVPendingToPending/APIService/Required/Unknown", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withAPIServices(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -1149,7 +1186,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVPendingToPending/APIService/Owned/DeploymentNotFound", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withAPIServices(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -1177,7 +1214,7 @@ func TestTransitionCSV(t *testing.T) { { name: "CSVPendingToFailed/CRDOwnerConflict", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -1221,7 +1258,7 @@ func TestTransitionCSV(t *testing.T) { { name: "CSVPendingToFailed/APIServiceOwnerConflict", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withCertInfo(withAPIServices(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -1305,7 +1342,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVFailedToPending/Deployment", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -1330,7 +1367,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVFailedToPending/CRD", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -1355,7 +1392,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVPendingToInstallReady/CRD", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -1380,7 +1417,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVPendingToInstallReady/APIService/Required", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withAPIServices(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -1403,7 +1440,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVInstallReadyToInstalling", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -1428,7 +1465,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVInstallReadyToInstalling/APIService/Owned", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withAPIServices(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -1453,7 +1490,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVSucceededToPending/APIService/Owned/CertRotation", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withCertInfo(withAPIServices(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -1522,7 +1559,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVSucceededToFailed/APIService/Owned/BadCAHash/Deployment", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withCertInfo(withAPIServices(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -1591,7 +1628,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVSucceededToFailed/APIService/Owned/BadCAHash/Secret", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withCertInfo(withAPIServices(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -1660,7 +1697,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVSucceededToFailed/APIService/Owned/BadCAHash/DeploymentAndSecret", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withCertInfo(withAPIServices(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -1729,7 +1766,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVSucceededToFailed/APIService/Owned/BadCA", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withCertInfo(withAPIServices(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -1798,7 +1835,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVSucceededToFailed/APIService/Owned/BadServingCert", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withCertInfo(withAPIServices(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -1867,7 +1904,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVSucceededToFailed/APIService/Owned/ExpiredCA", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withCertInfo(withAPIServices(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -1936,7 +1973,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVFailedToPending/APIService/Owned/ExpiredCA", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withCertInfo(withAPIServices(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -2005,7 +2042,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVFailedToPending/InstallModes/Owned/PreviouslyUnsupported", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withConditionReason(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -2035,7 +2072,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVFailedToPending/InstallModes/Owned/PreviouslyNoOperatorGroups", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withConditionReason(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -2065,7 +2102,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVFailedToPending/InstallModes/Owned/PreviouslyTooManyOperatorGroups", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withConditionReason(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -2095,7 +2132,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVSucceededToFailed/InstallModes/Owned/Unsupported", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withInstallModes(withConditionReason(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -2132,7 +2169,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVSucceededToFailed/InstallModes/Owned/NoOperatorGroups", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withConditionReason(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -2164,7 +2201,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVSucceededToFailed/InstallModes/Owned/TooManyOperatorGroups", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withConditionReason(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -2213,7 +2250,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVSucceededToSucceeded/OperatorGroupChanged", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withConditionReason(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -2258,7 +2295,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVInstallingToSucceeded/UnmanagedDeploymentNotAffected", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -2293,7 +2330,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVInstallingToInstallReady", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -2322,7 +2359,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVInstallingToInstallReadyDueToAnnotations", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -2348,7 +2385,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVSucceededToSucceeded/UnmanagedDeploymentInNamespace", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withConditionReason(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -2387,7 +2424,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVSucceededToFailed/CRD", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withAPIServices(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -2409,7 +2446,7 @@ func TestTransitionCSV(t *testing.T) { { name: "SingleCSVSucceededToPending/DeploymentSpecChanged", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withConditionReason(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -2444,7 +2481,7 @@ func TestTransitionCSV(t *testing.T) { { name: "CSVSucceededToReplacing", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withAnnotations(csv("csv1", namespace, "0.0.0", @@ -2453,7 +2490,7 @@ func TestTransitionCSV(t *testing.T) { []*apiextensionsv1.CustomResourceDefinition{crd("c1", "v1", "g1")}, []*apiextensionsv1.CustomResourceDefinition{}, v1alpha1.CSVPhaseSucceeded, - ), defaultTemplateAnnotations), + ), defaultTemplateAnnotations).(*v1alpha1.ClusterServiceVersion), csvWithAnnotations(csv("csv2", namespace, "0.0.0", @@ -2482,7 +2519,7 @@ func TestTransitionCSV(t *testing.T) { { name: "CSVReplacingToDeleted", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -2527,7 +2564,7 @@ func TestTransitionCSV(t *testing.T) { { name: "CSVDeletedToGone", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -2573,7 +2610,7 @@ func TestTransitionCSV(t *testing.T) { name: "CSVMultipleReplacingToDeleted", initial: initial{ // order matters in this test case - we want to apply the latest CSV first to test the GC marking - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithLabels(csvWithAnnotations(csv("csv3", namespace, "0.0.0", @@ -2638,7 +2675,7 @@ func TestTransitionCSV(t *testing.T) { { name: "CSVMultipleDeletedToGone", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv3", namespace, "0.0.0", @@ -2697,7 +2734,7 @@ func TestTransitionCSV(t *testing.T) { { name: "CSVMultipleDeletedToGone/AfterOneDeleted", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv2", namespace, "0.0.0", @@ -2743,7 +2780,7 @@ func TestTransitionCSV(t *testing.T) { { name: "CSVMultipleDeletedToGone/AfterTwoDeleted", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv2", namespace, "0.0.0", @@ -2789,7 +2826,7 @@ func TestTransitionCSV(t *testing.T) { name: "SingleCSVNoneToFailed/InterOperatorGroupOwnerConflict", config: operatorConfig{apiReconciler: buildFakeAPIIntersectionReconcilerThatReturns(APIConflict)}, initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -2812,7 +2849,7 @@ func TestTransitionCSV(t *testing.T) { name: "SingleCSVNoneToNone/AddAPIs", config: operatorConfig{apiReconciler: buildFakeAPIIntersectionReconcilerThatReturns(AddAPIs)}, initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -2835,7 +2872,7 @@ func TestTransitionCSV(t *testing.T) { name: "SingleCSVNoneToNone/RemoveAPIs", config: operatorConfig{apiReconciler: buildFakeAPIIntersectionReconcilerThatReturns(RemoveAPIs)}, initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -2858,7 +2895,7 @@ func TestTransitionCSV(t *testing.T) { name: "SingleCSVNoneToFailed/StaticOperatorGroup/AddAPIs", config: operatorConfig{apiReconciler: buildFakeAPIIntersectionReconcilerThatReturns(AddAPIs)}, initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -2888,7 +2925,7 @@ func TestTransitionCSV(t *testing.T) { name: "SingleCSVNoneToFailed/StaticOperatorGroup/RemoveAPIs", config: operatorConfig{apiReconciler: buildFakeAPIIntersectionReconcilerThatReturns(RemoveAPIs)}, initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -2918,7 +2955,7 @@ func TestTransitionCSV(t *testing.T) { name: "SingleCSVNoneToPending/StaticOperatorGroup/NoAPIConflict", config: operatorConfig{apiReconciler: buildFakeAPIIntersectionReconcilerThatReturns(NoAPIConflict)}, initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -2948,7 +2985,7 @@ func TestTransitionCSV(t *testing.T) { name: "SingleCSVFailedToPending/InterOperatorGroupOwnerConflict/NoAPIConflict", config: operatorConfig{apiReconciler: buildFakeAPIIntersectionReconcilerThatReturns(NoAPIConflict)}, initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csvWithStatusReason(csv("csv1", namespace, "0.0.0", @@ -2971,7 +3008,7 @@ func TestTransitionCSV(t *testing.T) { name: "SingleCSVFailedToPending/StaticOperatorGroup/CannotModifyStaticOperatorGroupProvidedAPIs/NoAPIConflict", config: operatorConfig{apiReconciler: buildFakeAPIIntersectionReconcilerThatReturns(NoAPIConflict)}, initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csvWithStatusReason(csv("csv1", namespace, "0.0.0", @@ -3001,7 +3038,7 @@ func TestTransitionCSV(t *testing.T) { name: "SingleCSVFailedToFailed/InterOperatorGroupOwnerConflict/APIConflict", config: operatorConfig{apiReconciler: buildFakeAPIIntersectionReconcilerThatReturns(APIConflict)}, initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csvWithStatusReason(csv("csv1", namespace, "0.0.0", @@ -3024,7 +3061,7 @@ func TestTransitionCSV(t *testing.T) { name: "SingleCSVFailedToFailed/StaticOperatorGroup/CannotModifyStaticOperatorGroupProvidedAPIs/AddAPIs", config: operatorConfig{apiReconciler: buildFakeAPIIntersectionReconcilerThatReturns(AddAPIs)}, initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csvWithStatusReason(csv("csv1", namespace, "0.0.0", @@ -3054,7 +3091,7 @@ func TestTransitionCSV(t *testing.T) { name: "SingleCSVFailedToFailed/StaticOperatorGroup/CannotModifyStaticOperatorGroupProvidedAPIs/RemoveAPIs", config: operatorConfig{apiReconciler: buildFakeAPIIntersectionReconcilerThatReturns(RemoveAPIs)}, initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csvWithStatusReason(csv("csv1", namespace, "0.0.0", @@ -3086,13 +3123,22 @@ func TestTransitionCSV(t *testing.T) { // Create test operator ctx, cancel := context.WithCancel(context.TODO()) defer cancel() + clientObjects := tt.initial.clientObjs + var partials []runtime.Object + for _, csv := range tt.initial.csvs { + clientObjects = append(clientObjects, csv) + partials = append(partials, &metav1.PartialObjectMetadata{ + ObjectMeta: csv.ObjectMeta, + }) + } op, err := NewFakeOperator( ctx, withNamespaces(namespace, "kube-system"), - withClientObjs(append(tt.initial.csvs, tt.initial.clientObjs...)...), + withClientObjs(clientObjects...), withK8sObjs(tt.initial.objs...), withExtObjs(tt.initial.crds...), withRegObjs(tt.initial.apis...), + withPartialMetadata(partials...), withOperatorNamespace(namespace), withAPIReconciler(tt.config.apiReconciler), withAPILabeler(tt.config.apiLabeler), @@ -3102,7 +3148,7 @@ func TestTransitionCSV(t *testing.T) { // run csv sync for each CSV for _, csv := range tt.initial.csvs { err := op.syncClusterServiceVersion(csv) - expectedErr := tt.expected.err[csv.(*v1alpha1.ClusterServiceVersion).Name] + expectedErr := tt.expected.err[csv.Name] require.Equal(t, expectedErr, err) } @@ -3173,7 +3219,7 @@ func TestTransitionCSVFailForward(t *testing.T) { apiLabeler labeler.Labeler } type initial struct { - csvs []runtime.Object + csvs []*v1alpha1.ClusterServiceVersion clientObjs []runtime.Object crds []runtime.Object objs []runtime.Object @@ -3193,7 +3239,7 @@ func TestTransitionCSVFailForward(t *testing.T) { { name: "FailForwardEnabled/CSV1/FailedToReplacing", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv1", namespace, "1.0.0", @@ -3231,7 +3277,7 @@ func TestTransitionCSVFailForward(t *testing.T) { { name: "FailForwardDisabled/CSV1/FailedToPending", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv1", namespace, "1.0.0", @@ -3269,7 +3315,7 @@ func TestTransitionCSVFailForward(t *testing.T) { { name: "FailForwardEnabled/ReplacementChain/CSV2/FailedToReplacing", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv1", namespace, "1.0.0", @@ -3317,7 +3363,7 @@ func TestTransitionCSVFailForward(t *testing.T) { { name: "FailForwardDisabled/ReplacementChain/CSV2/FailedToPending", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithAnnotations(csv("csv1", namespace, "1.0.0", @@ -3368,13 +3414,22 @@ func TestTransitionCSVFailForward(t *testing.T) { // Create test operator ctx, cancel := context.WithCancel(context.TODO()) defer cancel() + clientObjects := tt.initial.clientObjs + var partials []runtime.Object + for _, csv := range tt.initial.csvs { + clientObjects = append(clientObjects, csv) + partials = append(partials, &metav1.PartialObjectMetadata{ + ObjectMeta: csv.ObjectMeta, + }) + } op, err := NewFakeOperator( ctx, withNamespaces(namespace, "kube-system"), - withClientObjs(append(tt.initial.csvs, tt.initial.clientObjs...)...), + withClientObjs(clientObjects...), withK8sObjs(tt.initial.objs...), withExtObjs(tt.initial.crds...), withRegObjs(tt.initial.apis...), + withPartialMetadata(partials...), withOperatorNamespace(namespace), withAPIReconciler(tt.config.apiReconciler), withAPILabeler(tt.config.apiLabeler), @@ -3384,7 +3439,7 @@ func TestTransitionCSVFailForward(t *testing.T) { // run csv sync for each CSV for _, csv := range tt.initial.csvs { err := op.syncClusterServiceVersion(csv) - expectedErr := tt.expected.err[csv.(*v1alpha1.ClusterServiceVersion).Name] + expectedErr := tt.expected.err[csv.Name] require.Equal(t, expectedErr, err) } @@ -3423,7 +3478,7 @@ func TestWebhookCABundleRetrieval(t *testing.T) { caBundle := []byte("Foo") type initial struct { - csvs []runtime.Object + csvs []*v1alpha1.ClusterServiceVersion crds []runtime.Object objs []runtime.Object desc v1alpha1.WebhookDescription @@ -3440,7 +3495,7 @@ func TestWebhookCABundleRetrieval(t *testing.T) { { name: "MissingCAResource", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csv("csv1", namespace, "0.0.0", @@ -3467,7 +3522,7 @@ func TestWebhookCABundleRetrieval(t *testing.T) { { name: "RetrieveCAFromConversionWebhook", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithConversionWebhook(csv("csv1", namespace, "0.0.0", @@ -3498,7 +3553,7 @@ func TestWebhookCABundleRetrieval(t *testing.T) { { name: "FailToRetrieveCAFromConversionWebhook", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithConversionWebhook(csv("csv1", namespace, "0.0.0", @@ -3529,7 +3584,7 @@ func TestWebhookCABundleRetrieval(t *testing.T) { { name: "RetrieveFromValidatingAdmissionWebhook", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithValidatingAdmissionWebhook(csv("csv1", namespace, "0.0.0", @@ -3578,7 +3633,7 @@ func TestWebhookCABundleRetrieval(t *testing.T) { { name: "RetrieveFromMutatingAdmissionWebhook", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ csvWithMutatingAdmissionWebhook(csv("csv1", namespace, "0.0.0", @@ -3630,19 +3685,28 @@ func TestWebhookCABundleRetrieval(t *testing.T) { // Create test operator ctx, cancel := context.WithCancel(context.TODO()) defer cancel() + var csvs []runtime.Object + var partials []runtime.Object + for _, csv := range tt.initial.csvs { + csvs = append(csvs, csv) + partials = append(partials, &metav1.PartialObjectMetadata{ + ObjectMeta: csv.ObjectMeta, + }) + } op, err := NewFakeOperator( ctx, withNamespaces(namespace, "kube-system"), - withClientObjs(tt.initial.csvs...), + withClientObjs(csvs...), withK8sObjs(tt.initial.objs...), withExtObjs(tt.initial.crds...), + withPartialMetadata(partials...), withOperatorNamespace(namespace), ) require.NoError(t, err) // run csv sync for each CSV for _, csv := range tt.initial.csvs { - caBundle, err := op.getWebhookCABundle(csv.(*v1alpha1.ClusterServiceVersion), &tt.initial.desc) + caBundle, err := op.getWebhookCABundle(csv, &tt.initial.desc) require.Equal(t, tt.expected.err, err) require.Equal(t, tt.expected.caBundle, caBundle) } @@ -4331,6 +4395,7 @@ func TestSyncOperatorGroups(t *testing.T) { type initial struct { operatorGroup *operatorsv1.OperatorGroup + csvs []*v1alpha1.ClusterServiceVersion clientObjs []runtime.Object crds []runtime.Object k8sObjs []runtime.Object @@ -4468,7 +4533,8 @@ func TestSyncOperatorGroups(t *testing.T) { }, }, }, - clientObjs: []runtime.Object{operatorCSV}, + clientObjs: []runtime.Object{}, + csvs: []*v1alpha1.ClusterServiceVersion{operatorCSV}, k8sObjs: []runtime.Object{ &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -4572,7 +4638,8 @@ func TestSyncOperatorGroups(t *testing.T) { TargetNamespaces: []string{operatorNamespace, targetNamespace}, }, }, - clientObjs: []runtime.Object{operatorCSV}, + clientObjs: []runtime.Object{}, + csvs: []*v1alpha1.ClusterServiceVersion{operatorCSV}, k8sObjs: []runtime.Object{ &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -4673,7 +4740,8 @@ func TestSyncOperatorGroups(t *testing.T) { }, Spec: operatorsv1.OperatorGroupSpec{}, }, - clientObjs: []runtime.Object{operatorCSV}, + clientObjs: []runtime.Object{}, + csvs: []*v1alpha1.ClusterServiceVersion{operatorCSV}, k8sObjs: []runtime.Object{ &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -4832,14 +4900,13 @@ func TestSyncOperatorGroups(t *testing.T) { }, Spec: operatorsv1.OperatorGroupSpec{}, }, - clientObjs: []runtime.Object{ - withInstallModes(operatorCSV.DeepCopy(), []v1alpha1.InstallMode{ - { - Type: v1alpha1.InstallModeTypeAllNamespaces, - Supported: false, - }, - }), - }, + clientObjs: []runtime.Object{}, + csvs: []*v1alpha1.ClusterServiceVersion{withInstallModes(operatorCSV.DeepCopy(), []v1alpha1.InstallMode{ + { + Type: v1alpha1.InstallModeTypeAllNamespaces, + Supported: false, + }, + })}, k8sObjs: []runtime.Object{ &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -4923,6 +4990,16 @@ func TestSyncOperatorGroups(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + var partials []runtime.Object + for _, csv := range tt.initial.csvs { + clientObjs = append(clientObjs, csv) + partials = append(partials, &metav1.PartialObjectMetadata{ + ObjectMeta: csv.ObjectMeta, + }) + } + l := logrus.New() + l.SetLevel(logrus.DebugLevel) + l = l.WithField("test", tt.name).Logger op, err := NewFakeOperator( ctx, withClock(clockFake), @@ -4932,6 +5009,8 @@ func TestSyncOperatorGroups(t *testing.T) { withK8sObjs(k8sObjs...), withExtObjs(extObjs...), withRegObjs(regObjs...), + withPartialMetadata(partials...), + withLogger(l), ) require.NoError(t, err) @@ -4999,6 +5078,7 @@ func TestSyncOperatorGroups(t *testing.T) { }) require.NoError(t, err) + var foundErr error // Sync csvs enough to get them back to a succeeded state err = wait.PollUntilContextTimeout(ctx, tick, timeout, true, func(ctx context.Context) (bool, error) { csvs, err := op.client.OperatorsV1alpha1().ClusterServiceVersions(operatorNamespace).List(ctx, metav1.ListOptions{}) @@ -5007,14 +5087,17 @@ func TestSyncOperatorGroups(t *testing.T) { } for _, csv := range csvs.Items { + t.Logf("%s/%s", csv.Namespace, csv.Name) if csv.Status.Phase == v1alpha1.CSVPhaseInstalling { simulateSuccessfulRollout(&csv) } + t.Log("op.syncClusterServiceVersion") if err := op.syncClusterServiceVersion(&csv); err != nil { return false, err } + t.Log("op.syncCopyCSV") if err := op.syncCopyCSV(&csv); err != nil && !tt.ignoreCopyError { return false, err } @@ -5022,12 +5105,14 @@ func TestSyncOperatorGroups(t *testing.T) { for namespace, objects := range tt.final.objects { if err := RequireObjectsInCache(t, op.lister, namespace, objects, true); err != nil { + foundErr = err return false, nil } } return true, nil }) + t.Log(foundErr) require.NoError(t, err) operatorGroup, err = op.client.OperatorsV1().OperatorGroups(operatorGroup.GetNamespace()).Get(ctx, operatorGroup.GetName(), metav1.GetOptions{}) @@ -5037,7 +5122,13 @@ func TestSyncOperatorGroups(t *testing.T) { assert.Equal(t, tt.expectedStatus, operatorGroup.Status) for namespace, objects := range tt.final.objects { - RequireObjectsInNamespace(t, op.opClient, op.client, namespace, objects) + var foundErr error + err = wait.PollUntilContextTimeout(ctx, tick, timeout, true, func(ctx context.Context) (bool, error) { + foundErr = CheckObjectsInNamespace(t, op.opClient, op.client, namespace, objects) + return foundErr == nil, nil + }) + t.Log(foundErr) + require.NoError(t, err) } }) } @@ -5262,7 +5353,7 @@ func RequireObjectsInCache(t *testing.T, lister operatorlister.OperatorLister, n } if doCompare { if !reflect.DeepEqual(object, fetched) { - return fmt.Errorf("expected object didn't match %v: %s", object, cmp.Diff(object, fetched)) + return fmt.Errorf("expected object didn't match: %s", cmp.Diff(object, fetched)) } } } @@ -5270,21 +5361,32 @@ func RequireObjectsInCache(t *testing.T, lister operatorlister.OperatorLister, n } func RequireObjectsInNamespace(t *testing.T, opClient operatorclient.ClientInterface, client versioned.Interface, namespace string, objects []runtime.Object) { + require.NoError(t, CheckObjectsInNamespace(t, opClient, client, namespace, objects)) +} + +func CheckObjectsInNamespace(t *testing.T, opClient operatorclient.ClientInterface, client versioned.Interface, namespace string, objects []runtime.Object) error { for _, object := range objects { var err error var fetched runtime.Object + var name string switch o := object.(type) { case *appsv1.Deployment: + name = o.GetName() fetched, err = opClient.GetDeployment(namespace, o.GetName()) case *rbacv1.ClusterRole: + name = o.GetName() fetched, err = opClient.GetClusterRole(o.GetName()) case *rbacv1.Role: + name = o.GetName() fetched, err = opClient.GetRole(namespace, o.GetName()) case *rbacv1.ClusterRoleBinding: + name = o.GetName() fetched, err = opClient.GetClusterRoleBinding(o.GetName()) case *rbacv1.RoleBinding: + name = o.GetName() fetched, err = opClient.GetRoleBinding(namespace, o.GetName()) case *v1alpha1.ClusterServiceVersion: + name = o.GetName() fetched, err = client.OperatorsV1alpha1().ClusterServiceVersions(namespace).Get(context.TODO(), o.GetName(), metav1.GetOptions{}) // This protects against small timing issues in sync tests // We generally don't care about the conditions (state history in this case, unlike many kube resources) @@ -5292,15 +5394,22 @@ func RequireObjectsInNamespace(t *testing.T, opClient operatorclient.ClientInter object.(*v1alpha1.ClusterServiceVersion).Status.Conditions = nil fetched.(*v1alpha1.ClusterServiceVersion).Status.Conditions = nil case *operatorsv1.OperatorGroup: + name = o.GetName() fetched, err = client.OperatorsV1().OperatorGroups(namespace).Get(context.TODO(), o.GetName(), metav1.GetOptions{}) case *corev1.Secret: + name = o.GetName() fetched, err = opClient.GetSecret(namespace, o.GetName()) default: require.Failf(t, "couldn't find expected object", "%#v", object) } - require.NoError(t, err, "couldn't fetch %s %v", namespace, object) - require.True(t, reflect.DeepEqual(object, fetched), cmp.Diff(object, fetched)) + if err != nil { + return fmt.Errorf("couldn't fetch %s/%s: %w", namespace, name, err) + } + if diff := cmp.Diff(object, fetched); diff != "" { + return fmt.Errorf("incorrect object %s/%s: %v", namespace, name, diff) + } } + return nil } func TestCARotation(t *testing.T) { @@ -5350,7 +5459,7 @@ func TestCARotation(t *testing.T) { apiLabeler labeler.Labeler } type initial struct { - csvs []runtime.Object + csvs []*v1alpha1.ClusterServiceVersion clientObjs []runtime.Object crds []runtime.Object objs []runtime.Object @@ -5365,7 +5474,7 @@ func TestCARotation(t *testing.T) { // Happy path: cert is created and csv status contains the right cert dates name: "NoCertificate/CertificateCreated", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withAPIServices(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -5387,7 +5496,7 @@ func TestCARotation(t *testing.T) { // resources. If the certs exist and are valid, no need to rotate or update the csv status. name: "HasValidCertificate/ManagedPodDeleted/NoRotation", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withUID(withCertInfo(withAPIServices(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -5396,7 +5505,7 @@ func TestCARotation(t *testing.T) { []*apiextensionsv1.CustomResourceDefinition{crd("c1", "v1", "g1")}, []*apiextensionsv1.CustomResourceDefinition{}, v1alpha1.CSVPhaseInstallReady, - ), defaultTemplateAnnotations), apis("a1.v1.a1Kind"), nil), rotateAt, lastUpdate), types.UID("csv-uid")), + ), defaultTemplateAnnotations), apis("a1.v1.a1Kind"), nil), rotateAt, lastUpdate), types.UID("csv-uid")).(*v1alpha1.ClusterServiceVersion), }, clientObjs: []runtime.Object{defaultOperatorGroup}, crds: []runtime.Object{ @@ -5450,7 +5559,7 @@ func TestCARotation(t *testing.T) { // If the cert secret is deleted, a new one is created name: "ValidCert/SecretMissing/NewCertCreated", initial: initial{ - csvs: []runtime.Object{ + csvs: []*v1alpha1.ClusterServiceVersion{ withUID(withCertInfo(withAPIServices(csvWithAnnotations(csv("csv1", namespace, "0.0.0", @@ -5459,7 +5568,7 @@ func TestCARotation(t *testing.T) { []*apiextensionsv1.CustomResourceDefinition{crd("c1", "v1", "g1")}, []*apiextensionsv1.CustomResourceDefinition{}, v1alpha1.CSVPhaseInstallReady, - ), defaultTemplateAnnotations), apis("a1.v1.a1Kind"), nil), rotateAt, lastUpdate), types.UID("csv-uid")), + ), defaultTemplateAnnotations), apis("a1.v1.a1Kind"), nil), rotateAt, lastUpdate), types.UID("csv-uid")).(*v1alpha1.ClusterServiceVersion), }, clientObjs: []runtime.Object{defaultOperatorGroup}, crds: []runtime.Object{ @@ -5514,13 +5623,22 @@ func TestCARotation(t *testing.T) { // Create test operator ctx, cancel := context.WithCancel(context.TODO()) defer cancel() + clientObjects := tt.initial.clientObjs + var partials []runtime.Object + for _, csv := range tt.initial.csvs { + clientObjects = append(clientObjects, csv) + partials = append(partials, &metav1.PartialObjectMetadata{ + ObjectMeta: csv.ObjectMeta, + }) + } op, err := NewFakeOperator( ctx, withNamespaces(namespace, "kube-system"), - withClientObjs(append(tt.initial.csvs, tt.initial.clientObjs...)...), + withClientObjs(clientObjects...), withK8sObjs(tt.initial.objs...), withExtObjs(tt.initial.crds...), withRegObjs(tt.initial.apis...), + withPartialMetadata(partials...), withOperatorNamespace(namespace), withAPIReconciler(tt.config.apiReconciler), withAPILabeler(tt.config.apiLabeler), @@ -5528,11 +5646,7 @@ func TestCARotation(t *testing.T) { require.NoError(t, err) // run csv sync for each CSV - for _, runtimeObject := range tt.initial.csvs { - // Convert the rt object to a proper csv for ease - csv, ok := runtimeObject.(*v1alpha1.ClusterServiceVersion) - require.True(t, ok) - + for _, csv := range tt.initial.csvs { // sync works err := op.syncClusterServiceVersion(csv) require.NoError(t, err) diff --git a/pkg/controller/operators/olm/operatorgroup.go b/pkg/controller/operators/olm/operatorgroup.go index cd135c8058..745048cbb0 100644 --- a/pkg/controller/operators/olm/operatorgroup.go +++ b/pkg/controller/operators/olm/operatorgroup.go @@ -12,7 +12,7 @@ import ( corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - meta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/errors" @@ -797,15 +797,15 @@ func (a *Operator) copyToNamespace(prototype *v1alpha1.ClusterServiceVersion, ns prototype.ResourceVersion = "" prototype.UID = "" - existing, err := a.copiedCSVLister.ClusterServiceVersions(nsTo).Get(prototype.GetName()) + existing, err := a.copiedCSVLister.Namespace(nsTo).Get(prototype.GetName()) if apierrors.IsNotFound(err) { created, err := a.client.OperatorsV1alpha1().ClusterServiceVersions(nsTo).Create(context.TODO(), prototype, metav1.CreateOptions{}) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to create new CSV: %w", err) } created.Status = prototype.Status if _, err := a.client.OperatorsV1alpha1().ClusterServiceVersions(nsTo).UpdateStatus(context.TODO(), created, metav1.UpdateOptions{}); err != nil { - return nil, err + return nil, fmt.Errorf("failed to update status on new CSV: %w", err) } return &v1alpha1.ClusterServiceVersion{ ObjectMeta: metav1.ObjectMeta{ @@ -824,38 +824,39 @@ func (a *Operator) copyToNamespace(prototype *v1alpha1.ClusterServiceVersion, ns existingNonStatus := existing.Annotations["$copyhash-nonstatus"] existingStatus := existing.Annotations["$copyhash-status"] + var updated *v1alpha1.ClusterServiceVersion if existingNonStatus != nonstatus { - if existing, err = a.client.OperatorsV1alpha1().ClusterServiceVersions(nsTo).Update(context.TODO(), prototype, metav1.UpdateOptions{}); err != nil { - return nil, err + if updated, err = a.client.OperatorsV1alpha1().ClusterServiceVersions(nsTo).Update(context.TODO(), prototype, metav1.UpdateOptions{}); err != nil { + return nil, fmt.Errorf("failed to update: %w", err) } } else { // Avoid mutating cached copied CSV. - existing = prototype + updated = prototype } if existingStatus != status { - existing.Status = prototype.Status - if _, err = a.client.OperatorsV1alpha1().ClusterServiceVersions(nsTo).UpdateStatus(context.TODO(), existing, metav1.UpdateOptions{}); err != nil { - return nil, err + updated.Status = prototype.Status + if _, err = a.client.OperatorsV1alpha1().ClusterServiceVersions(nsTo).UpdateStatus(context.TODO(), updated, metav1.UpdateOptions{}); err != nil { + return nil, fmt.Errorf("failed to update status: %w", err) } } return &v1alpha1.ClusterServiceVersion{ ObjectMeta: metav1.ObjectMeta{ - Name: existing.Name, - Namespace: existing.Namespace, - UID: existing.UID, + Name: updated.Name, + Namespace: updated.Namespace, + UID: updated.UID, }, }, nil } func (a *Operator) pruneFromNamespace(operatorGroupName, namespace string) error { - fetchedCSVs, err := a.copiedCSVLister.ClusterServiceVersions(namespace).List(labels.Everything()) + fetchedCSVs, err := a.copiedCSVLister.Namespace(namespace).List(labels.Everything()) if err != nil { return err } for _, csv := range fetchedCSVs { - if csv.IsCopied() && csv.GetAnnotations()[operatorsv1.OperatorGroupAnnotationKey] == operatorGroupName { + if v1alpha1.IsCopied(csv) && csv.GetAnnotations()[operatorsv1.OperatorGroupAnnotationKey] == operatorGroupName { a.logger.Debugf("Found CSV '%v' in namespace %v to delete", csv.GetName(), namespace) if err := a.copiedCSVGCQueueSet.Requeue(csv.GetNamespace(), csv.GetName()); err != nil { return err diff --git a/pkg/controller/operators/olm/operatorgroup_test.go b/pkg/controller/operators/olm/operatorgroup_test.go index 8a745358f5..bb328c72cc 100644 --- a/pkg/controller/operators/olm/operatorgroup_test.go +++ b/pkg/controller/operators/olm/operatorgroup_test.go @@ -4,9 +4,11 @@ import ( "fmt" "testing" + "github.com/google/go-cmp/cmp" "github.com/sirupsen/logrus/hooks/test" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "k8s.io/client-go/metadata/metadatalister" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -15,8 +17,6 @@ import ( "github.com/operator-framework/api/pkg/operators/v1alpha1" "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/fake" - listersv1alpha1 "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/listers/operators/v1alpha1" - "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorlister/operatorlisterfakes" ) func TestCopyToNamespace(t *testing.T) { @@ -29,7 +29,7 @@ func TestCopyToNamespace(t *testing.T) { Hash string StatusHash string Prototype v1alpha1.ClusterServiceVersion - ExistingCopy *v1alpha1.ClusterServiceVersion + ExistingCopy *metav1.PartialObjectMetadata ExpectedResult *v1alpha1.ClusterServiceVersion ExpectedError error ExpectedActions []ktesting.Action @@ -105,7 +105,7 @@ func TestCopyToNamespace(t *testing.T) { Phase: "waxing gibbous", }, }, - ExistingCopy: &v1alpha1.ClusterServiceVersion{ + ExistingCopy: &metav1.PartialObjectMetadata{ ObjectMeta: metav1.ObjectMeta{ Name: "name", Namespace: "to", @@ -158,7 +158,7 @@ func TestCopyToNamespace(t *testing.T) { Phase: "waxing gibbous", }, }, - ExistingCopy: &v1alpha1.ClusterServiceVersion{ + ExistingCopy: &metav1.PartialObjectMetadata{ ObjectMeta: metav1.ObjectMeta{ Name: "name", Namespace: "to", @@ -211,7 +211,7 @@ func TestCopyToNamespace(t *testing.T) { Phase: "waxing gibbous", }, }, - ExistingCopy: &v1alpha1.ClusterServiceVersion{ + ExistingCopy: &metav1.PartialObjectMetadata{ ObjectMeta: metav1.ObjectMeta{ Name: "name", Namespace: "to", @@ -272,7 +272,7 @@ func TestCopyToNamespace(t *testing.T) { Name: "name", }, }, - ExistingCopy: &v1alpha1.ClusterServiceVersion{ + ExistingCopy: &metav1.PartialObjectMetadata{ ObjectMeta: metav1.ObjectMeta{ Name: "name", Namespace: "to", @@ -293,21 +293,20 @@ func TestCopyToNamespace(t *testing.T) { }, } { t.Run(tc.Name, func(t *testing.T) { - lister := &operatorlisterfakes.FakeOperatorLister{} - v1alpha1lister := &operatorlisterfakes.FakeOperatorsV1alpha1Lister{} - lister.OperatorsV1alpha1Returns(v1alpha1lister) - client := fake.NewSimpleClientset() + var lister metadatalister.Lister if tc.ExistingCopy != nil { - client = fake.NewSimpleClientset(tc.ExistingCopy) - v1alpha1lister.ClusterServiceVersionListerReturns(FakeClusterServiceVersionLister{tc.ExistingCopy}) + client = fake.NewSimpleClientset(&v1alpha1.ClusterServiceVersion{ + ObjectMeta: tc.ExistingCopy.ObjectMeta, + }) + lister = FakeClusterServiceVersionLister{tc.ExistingCopy} } else { - v1alpha1lister.ClusterServiceVersionListerReturns(FakeClusterServiceVersionLister(nil)) + lister = FakeClusterServiceVersionLister{{}} } logger, _ := test.NewNullLogger() o := &Operator{ - copiedCSVLister: v1alpha1lister.ClusterServiceVersionLister(), + copiedCSVLister: lister, client: client, logger: logger, } @@ -319,21 +318,25 @@ func TestCopyToNamespace(t *testing.T) { } else { require.EqualError(t, err, tc.ExpectedError.Error()) } - assert.Equal(t, tc.ExpectedResult, result) + if diff := cmp.Diff(tc.ExpectedResult, result); diff != "" { + t.Errorf("incorrect result: %v", diff) + } actions := client.Actions() if len(actions) == 0 { actions = nil } - assert.Equal(t, tc.ExpectedActions, actions) + if diff := cmp.Diff(tc.ExpectedActions, actions); diff != "" { + t.Errorf("incorrect actions: %v", diff) + } }) } } -type FakeClusterServiceVersionLister []*v1alpha1.ClusterServiceVersion +type FakeClusterServiceVersionLister []*metav1.PartialObjectMetadata -func (l FakeClusterServiceVersionLister) List(selector labels.Selector) ([]*v1alpha1.ClusterServiceVersion, error) { - var result []*v1alpha1.ClusterServiceVersion +func (l FakeClusterServiceVersionLister) List(selector labels.Selector) ([]*metav1.PartialObjectMetadata, error) { + var result []*metav1.PartialObjectMetadata for _, csv := range l { if !selector.Matches(labels.Set(csv.GetLabels())) { continue @@ -343,8 +346,8 @@ func (l FakeClusterServiceVersionLister) List(selector labels.Selector) ([]*v1al return result, nil } -func (l FakeClusterServiceVersionLister) ClusterServiceVersions(namespace string) listersv1alpha1.ClusterServiceVersionNamespaceLister { - var filtered []*v1alpha1.ClusterServiceVersion +func (l FakeClusterServiceVersionLister) Namespace(namespace string) metadatalister.NamespaceLister { + var filtered []*metav1.PartialObjectMetadata for _, csv := range l { if csv.GetNamespace() != namespace { continue @@ -354,7 +357,7 @@ func (l FakeClusterServiceVersionLister) ClusterServiceVersions(namespace string return FakeClusterServiceVersionLister(filtered) } -func (l FakeClusterServiceVersionLister) Get(name string) (*v1alpha1.ClusterServiceVersion, error) { +func (l FakeClusterServiceVersionLister) Get(name string) (*metav1.PartialObjectMetadata, error) { for _, csv := range l { if csv.GetName() == name { return csv, nil @@ -364,8 +367,8 @@ func (l FakeClusterServiceVersionLister) Get(name string) (*v1alpha1.ClusterServ } var ( - _ listersv1alpha1.ClusterServiceVersionLister = FakeClusterServiceVersionLister{} - _ listersv1alpha1.ClusterServiceVersionNamespaceLister = FakeClusterServiceVersionLister{} + _ metadatalister.Lister = FakeClusterServiceVersionLister{} + _ metadatalister.NamespaceLister = FakeClusterServiceVersionLister{} ) func TestCSVCopyPrototype(t *testing.T) { diff --git a/vendor/k8s.io/client-go/metadata/fake/simple.go b/vendor/k8s.io/client-go/metadata/fake/simple.go new file mode 100644 index 0000000000..5b585f3fd6 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/fake/simple.go @@ -0,0 +1,405 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/metadata" + "k8s.io/client-go/testing" +) + +// MetadataClient assists in creating fake objects for use when testing, since metadata.Getter +// does not expose create +type MetadataClient interface { + metadata.Getter + CreateFake(obj *metav1.PartialObjectMetadata, opts metav1.CreateOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) + UpdateFake(obj *metav1.PartialObjectMetadata, opts metav1.UpdateOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) +} + +// NewTestScheme creates a unique Scheme for each test. +func NewTestScheme() *runtime.Scheme { + return runtime.NewScheme() +} + +// NewSimpleMetadataClient creates a new client that will use the provided scheme and respond with the +// provided objects when requests are made. It will track actions made to the client which can be checked +// with GetActions(). +func NewSimpleMetadataClient(scheme *runtime.Scheme, objects ...runtime.Object) *FakeMetadataClient { + gvkFakeList := schema.GroupVersionKind{Group: "fake-metadata-client-group", Version: "v1", Kind: "List"} + if !scheme.Recognizes(gvkFakeList) { + // In order to use List with this client, you have to have the v1.List registered in your scheme, since this is a test + // type we modify the input scheme + scheme.AddKnownTypeWithName(gvkFakeList, &metav1.List{}) + } + + codecs := serializer.NewCodecFactory(scheme) + o := testing.NewObjectTracker(scheme, codecs.UniversalDeserializer()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &FakeMetadataClient{scheme: scheme, tracker: o} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// FakeMetadataClient implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type FakeMetadataClient struct { + testing.Fake + scheme *runtime.Scheme + tracker testing.ObjectTracker +} + +type metadataResourceClient struct { + client *FakeMetadataClient + namespace string + resource schema.GroupVersionResource +} + +var ( + _ metadata.Interface = &FakeMetadataClient{} + _ testing.FakeClient = &FakeMetadataClient{} +) + +func (c *FakeMetadataClient) Tracker() testing.ObjectTracker { + return c.tracker +} + +// Resource returns an interface for accessing the provided resource. +func (c *FakeMetadataClient) Resource(resource schema.GroupVersionResource) metadata.Getter { + return &metadataResourceClient{client: c, resource: resource} +} + +// Namespace returns an interface for accessing the current resource in the specified +// namespace. +func (c *metadataResourceClient) Namespace(ns string) metadata.ResourceInterface { + ret := *c + ret.namespace = ns + return &ret +} + +// CreateFake records the object creation and processes it via the reactor. +func (c *metadataResourceClient) CreateFake(obj *metav1.PartialObjectMetadata, opts metav1.CreateOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) { + var uncastRet runtime.Object + var err error + switch { + case len(c.namespace) == 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootCreateAction(c.resource, obj), obj) + + case len(c.namespace) == 0 && len(subresources) > 0: + var accessor metav1.Object // avoid shadowing err + accessor, err = meta.Accessor(obj) + if err != nil { + return nil, err + } + name := accessor.GetName() + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootCreateSubresourceAction(c.resource, name, strings.Join(subresources, "/"), obj), obj) + + case len(c.namespace) > 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewCreateAction(c.resource, c.namespace, obj), obj) + + case len(c.namespace) > 0 && len(subresources) > 0: + var accessor metav1.Object // avoid shadowing err + accessor, err = meta.Accessor(obj) + if err != nil { + return nil, err + } + name := accessor.GetName() + uncastRet, err = c.client.Fake. + Invokes(testing.NewCreateSubresourceAction(c.resource, name, strings.Join(subresources, "/"), c.namespace, obj), obj) + + } + + if err != nil { + return nil, err + } + if uncastRet == nil { + return nil, err + } + ret, ok := uncastRet.(*metav1.PartialObjectMetadata) + if !ok { + return nil, fmt.Errorf("unexpected return value type %T", uncastRet) + } + return ret, err +} + +// UpdateFake records the object update and processes it via the reactor. +func (c *metadataResourceClient) UpdateFake(obj *metav1.PartialObjectMetadata, opts metav1.UpdateOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) { + var uncastRet runtime.Object + var err error + switch { + case len(c.namespace) == 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootUpdateAction(c.resource, obj), obj) + + case len(c.namespace) == 0 && len(subresources) > 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(c.resource, strings.Join(subresources, "/"), obj), obj) + + case len(c.namespace) > 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewUpdateAction(c.resource, c.namespace, obj), obj) + + case len(c.namespace) > 0 && len(subresources) > 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewUpdateSubresourceAction(c.resource, strings.Join(subresources, "/"), c.namespace, obj), obj) + + } + + if err != nil { + return nil, err + } + if uncastRet == nil { + return nil, err + } + ret, ok := uncastRet.(*metav1.PartialObjectMetadata) + if !ok { + return nil, fmt.Errorf("unexpected return value type %T", uncastRet) + } + return ret, err +} + +// UpdateStatus records the object status update and processes it via the reactor. +func (c *metadataResourceClient) UpdateStatus(obj *metav1.PartialObjectMetadata, opts metav1.UpdateOptions) (*metav1.PartialObjectMetadata, error) { + var uncastRet runtime.Object + var err error + switch { + case len(c.namespace) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(c.resource, "status", obj), obj) + + case len(c.namespace) > 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewUpdateSubresourceAction(c.resource, "status", c.namespace, obj), obj) + + } + + if err != nil { + return nil, err + } + if uncastRet == nil { + return nil, err + } + ret, ok := uncastRet.(*metav1.PartialObjectMetadata) + if !ok { + return nil, fmt.Errorf("unexpected return value type %T", uncastRet) + } + return ret, err +} + +// Delete records the object deletion and processes it via the reactor. +func (c *metadataResourceClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions, subresources ...string) error { + var err error + switch { + case len(c.namespace) == 0 && len(subresources) == 0: + _, err = c.client.Fake. + Invokes(testing.NewRootDeleteAction(c.resource, name), &metav1.Status{Status: "metadata delete fail"}) + + case len(c.namespace) == 0 && len(subresources) > 0: + _, err = c.client.Fake. + Invokes(testing.NewRootDeleteSubresourceAction(c.resource, strings.Join(subresources, "/"), name), &metav1.Status{Status: "metadata delete fail"}) + + case len(c.namespace) > 0 && len(subresources) == 0: + _, err = c.client.Fake. + Invokes(testing.NewDeleteAction(c.resource, c.namespace, name), &metav1.Status{Status: "metadata delete fail"}) + + case len(c.namespace) > 0 && len(subresources) > 0: + _, err = c.client.Fake. + Invokes(testing.NewDeleteSubresourceAction(c.resource, strings.Join(subresources, "/"), c.namespace, name), &metav1.Status{Status: "metadata delete fail"}) + } + + return err +} + +// DeleteCollection records the object collection deletion and processes it via the reactor. +func (c *metadataResourceClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var err error + switch { + case len(c.namespace) == 0: + action := testing.NewRootDeleteCollectionAction(c.resource, listOptions) + _, err = c.client.Fake.Invokes(action, &metav1.Status{Status: "metadata deletecollection fail"}) + + case len(c.namespace) > 0: + action := testing.NewDeleteCollectionAction(c.resource, c.namespace, listOptions) + _, err = c.client.Fake.Invokes(action, &metav1.Status{Status: "metadata deletecollection fail"}) + + } + + return err +} + +// Get records the object retrieval and processes it via the reactor. +func (c *metadataResourceClient) Get(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) { + var uncastRet runtime.Object + var err error + switch { + case len(c.namespace) == 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootGetAction(c.resource, name), &metav1.Status{Status: "metadata get fail"}) + + case len(c.namespace) == 0 && len(subresources) > 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootGetSubresourceAction(c.resource, strings.Join(subresources, "/"), name), &metav1.Status{Status: "metadata get fail"}) + + case len(c.namespace) > 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewGetAction(c.resource, c.namespace, name), &metav1.Status{Status: "metadata get fail"}) + + case len(c.namespace) > 0 && len(subresources) > 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewGetSubresourceAction(c.resource, c.namespace, strings.Join(subresources, "/"), name), &metav1.Status{Status: "metadata get fail"}) + } + + if err != nil { + return nil, err + } + if uncastRet == nil { + return nil, err + } + ret, ok := uncastRet.(*metav1.PartialObjectMetadata) + if !ok { + return nil, fmt.Errorf("unexpected return value type %T", uncastRet) + } + return ret, err +} + +// List records the object deletion and processes it via the reactor. +func (c *metadataResourceClient) List(ctx context.Context, opts metav1.ListOptions) (*metav1.PartialObjectMetadataList, error) { + var obj runtime.Object + var err error + switch { + case len(c.namespace) == 0: + obj, err = c.client.Fake. + Invokes(testing.NewRootListAction(c.resource, schema.GroupVersionKind{Group: "fake-metadata-client-group", Version: "v1", Kind: "" /*List is appended by the tracker automatically*/}, opts), &metav1.Status{Status: "metadata list fail"}) + + case len(c.namespace) > 0: + obj, err = c.client.Fake. + Invokes(testing.NewListAction(c.resource, schema.GroupVersionKind{Group: "fake-metadata-client-group", Version: "v1", Kind: "" /*List is appended by the tracker automatically*/}, c.namespace, opts), &metav1.Status{Status: "metadata list fail"}) + + } + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + + inputList, ok := obj.(*metav1.List) + if !ok { + return nil, fmt.Errorf("incoming object is incorrect type %T", obj) + } + + list := &metav1.PartialObjectMetadataList{ + ListMeta: inputList.ListMeta, + } + for i := range inputList.Items { + item, ok := inputList.Items[i].Object.(*metav1.PartialObjectMetadata) + if !ok { + return nil, fmt.Errorf("item %d in list %T is %T", i, inputList, inputList.Items[i].Object) + } + metadata, err := meta.Accessor(item) + if err != nil { + return nil, err + } + if label.Matches(labels.Set(metadata.GetLabels())) { + list.Items = append(list.Items, *item) + } + } + return list, nil +} + +func (c *metadataResourceClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + switch { + case len(c.namespace) == 0: + return c.client.Fake. + InvokesWatch(testing.NewRootWatchAction(c.resource, opts)) + + case len(c.namespace) > 0: + return c.client.Fake. + InvokesWatch(testing.NewWatchAction(c.resource, c.namespace, opts)) + + } + + panic("math broke") +} + +// Patch records the object patch and processes it via the reactor. +func (c *metadataResourceClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) { + var uncastRet runtime.Object + var err error + switch { + case len(c.namespace) == 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootPatchAction(c.resource, name, pt, data), &metav1.Status{Status: "metadata patch fail"}) + + case len(c.namespace) == 0 && len(subresources) > 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootPatchSubresourceAction(c.resource, name, pt, data, subresources...), &metav1.Status{Status: "metadata patch fail"}) + + case len(c.namespace) > 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewPatchAction(c.resource, c.namespace, name, pt, data), &metav1.Status{Status: "metadata patch fail"}) + + case len(c.namespace) > 0 && len(subresources) > 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewPatchSubresourceAction(c.resource, c.namespace, name, pt, data, subresources...), &metav1.Status{Status: "metadata patch fail"}) + + } + + if err != nil { + return nil, err + } + if uncastRet == nil { + return nil, err + } + ret, ok := uncastRet.(*metav1.PartialObjectMetadata) + if !ok { + return nil, fmt.Errorf("unexpected return value type %T", uncastRet) + } + return ret, err +} diff --git a/vendor/k8s.io/client-go/metadata/metadatainformer/informer.go b/vendor/k8s.io/client-go/metadata/metadatainformer/informer.go new file mode 100644 index 0000000000..c211a4b729 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadatainformer/informer.go @@ -0,0 +1,183 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadatainformer + +import ( + "context" + "sync" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/informers" + "k8s.io/client-go/metadata" + "k8s.io/client-go/metadata/metadatalister" + "k8s.io/client-go/tools/cache" +) + +// NewSharedInformerFactory constructs a new instance of metadataSharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client metadata.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewFilteredSharedInformerFactory(client, defaultResync, metav1.NamespaceAll, nil) +} + +// NewFilteredSharedInformerFactory constructs a new instance of metadataSharedInformerFactory. +// Listers obtained via this factory will be subject to the same filters as specified here. +func NewFilteredSharedInformerFactory(client metadata.Interface, defaultResync time.Duration, namespace string, tweakListOptions TweakListOptionsFunc) SharedInformerFactory { + return &metadataSharedInformerFactory{ + client: client, + defaultResync: defaultResync, + namespace: namespace, + informers: map[schema.GroupVersionResource]informers.GenericInformer{}, + startedInformers: make(map[schema.GroupVersionResource]bool), + tweakListOptions: tweakListOptions, + } +} + +type metadataSharedInformerFactory struct { + client metadata.Interface + defaultResync time.Duration + namespace string + + lock sync.Mutex + informers map[schema.GroupVersionResource]informers.GenericInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[schema.GroupVersionResource]bool + tweakListOptions TweakListOptionsFunc + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool +} + +var _ SharedInformerFactory = &metadataSharedInformerFactory{} + +func (f *metadataSharedInformerFactory) ForResource(gvr schema.GroupVersionResource) informers.GenericInformer { + f.lock.Lock() + defer f.lock.Unlock() + + key := gvr + informer, exists := f.informers[key] + if exists { + return informer + } + + informer = NewFilteredMetadataInformer(f.client, gvr, f.namespace, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) + f.informers[key] = informer + + return informer +} + +// Start initializes all requested informers. +func (f *metadataSharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.shuttingDown { + return + } + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer.Informer() + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *metadataSharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[schema.GroupVersionResource]bool { + informers := func() map[schema.GroupVersionResource]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[schema.GroupVersionResource]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer.Informer() + } + } + return informers + }() + + res := map[schema.GroupVersionResource]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +func (f *metadataSharedInformerFactory) Shutdown() { + // Will return immediately if there is nothing to wait for. + defer f.wg.Wait() + + f.lock.Lock() + defer f.lock.Unlock() + f.shuttingDown = true +} + +// NewFilteredMetadataInformer constructs a new informer for a metadata type. +func NewFilteredMetadataInformer(client metadata.Interface, gvr schema.GroupVersionResource, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions TweakListOptionsFunc) informers.GenericInformer { + return &metadataInformer{ + gvr: gvr, + informer: cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.Resource(gvr).Namespace(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.Resource(gvr).Namespace(namespace).Watch(context.TODO(), options) + }, + }, + &metav1.PartialObjectMetadata{}, + resyncPeriod, + indexers, + ), + } +} + +type metadataInformer struct { + informer cache.SharedIndexInformer + gvr schema.GroupVersionResource +} + +var _ informers.GenericInformer = &metadataInformer{} + +func (d *metadataInformer) Informer() cache.SharedIndexInformer { + return d.informer +} + +func (d *metadataInformer) Lister() cache.GenericLister { + return metadatalister.NewRuntimeObjectShim(metadatalister.New(d.informer.GetIndexer(), d.gvr)) +} diff --git a/vendor/k8s.io/client-go/metadata/metadatainformer/interface.go b/vendor/k8s.io/client-go/metadata/metadatainformer/interface.go new file mode 100644 index 0000000000..9f61706cda --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadatainformer/interface.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadatainformer + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/informers" +) + +// SharedInformerFactory provides access to a shared informer and lister for dynamic client +type SharedInformerFactory interface { + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + Start(stopCh <-chan struct{}) + + // ForResource gives generic access to a shared informer of the matching type. + ForResource(gvr schema.GroupVersionResource) informers.GenericInformer + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. + WaitForCacheSync(stopCh <-chan struct{}) map[schema.GroupVersionResource]bool + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() +} + +// TweakListOptionsFunc defines the signature of a helper function +// that wants to provide more listing options to API +type TweakListOptionsFunc func(*metav1.ListOptions) diff --git a/vendor/k8s.io/client-go/metadata/metadatalister/interface.go b/vendor/k8s.io/client-go/metadata/metadatalister/interface.go new file mode 100644 index 0000000000..bb35485895 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadatalister/interface.go @@ -0,0 +1,40 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadatalister + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// Lister helps list resources. +type Lister interface { + // List lists all resources in the indexer. + List(selector labels.Selector) (ret []*metav1.PartialObjectMetadata, err error) + // Get retrieves a resource from the indexer with the given name + Get(name string) (*metav1.PartialObjectMetadata, error) + // Namespace returns an object that can list and get resources in a given namespace. + Namespace(namespace string) NamespaceLister +} + +// NamespaceLister helps list and get resources. +type NamespaceLister interface { + // List lists all resources in the indexer for a given namespace. + List(selector labels.Selector) (ret []*metav1.PartialObjectMetadata, err error) + // Get retrieves a resource from the indexer for a given namespace and name. + Get(name string) (*metav1.PartialObjectMetadata, error) +} diff --git a/vendor/k8s.io/client-go/metadata/metadatalister/lister.go b/vendor/k8s.io/client-go/metadata/metadatalister/lister.go new file mode 100644 index 0000000000..faeccc0fc2 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadatalister/lister.go @@ -0,0 +1,91 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadatalister + +import ( + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" +) + +var _ Lister = &metadataLister{} +var _ NamespaceLister = &metadataNamespaceLister{} + +// metadataLister implements the Lister interface. +type metadataLister struct { + indexer cache.Indexer + gvr schema.GroupVersionResource +} + +// New returns a new Lister. +func New(indexer cache.Indexer, gvr schema.GroupVersionResource) Lister { + return &metadataLister{indexer: indexer, gvr: gvr} +} + +// List lists all resources in the indexer. +func (l *metadataLister) List(selector labels.Selector) (ret []*metav1.PartialObjectMetadata, err error) { + err = cache.ListAll(l.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*metav1.PartialObjectMetadata)) + }) + return ret, err +} + +// Get retrieves a resource from the indexer with the given name +func (l *metadataLister) Get(name string) (*metav1.PartialObjectMetadata, error) { + obj, exists, err := l.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(l.gvr.GroupResource(), name) + } + return obj.(*metav1.PartialObjectMetadata), nil +} + +// Namespace returns an object that can list and get resources from a given namespace. +func (l *metadataLister) Namespace(namespace string) NamespaceLister { + return &metadataNamespaceLister{indexer: l.indexer, namespace: namespace, gvr: l.gvr} +} + +// metadataNamespaceLister implements the NamespaceLister interface. +type metadataNamespaceLister struct { + indexer cache.Indexer + namespace string + gvr schema.GroupVersionResource +} + +// List lists all resources in the indexer for a given namespace. +func (l *metadataNamespaceLister) List(selector labels.Selector) (ret []*metav1.PartialObjectMetadata, err error) { + err = cache.ListAllByNamespace(l.indexer, l.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*metav1.PartialObjectMetadata)) + }) + return ret, err +} + +// Get retrieves a resource from the indexer for a given namespace and name. +func (l *metadataNamespaceLister) Get(name string) (*metav1.PartialObjectMetadata, error) { + obj, exists, err := l.indexer.GetByKey(l.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(l.gvr.GroupResource(), name) + } + return obj.(*metav1.PartialObjectMetadata), nil +} diff --git a/vendor/k8s.io/client-go/metadata/metadatalister/shim.go b/vendor/k8s.io/client-go/metadata/metadatalister/shim.go new file mode 100644 index 0000000000..f31c607258 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadatalister/shim.go @@ -0,0 +1,87 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadatalister + +import ( + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/cache" +) + +var _ cache.GenericLister = &metadataListerShim{} +var _ cache.GenericNamespaceLister = &metadataNamespaceListerShim{} + +// metadataListerShim implements the cache.GenericLister interface. +type metadataListerShim struct { + lister Lister +} + +// NewRuntimeObjectShim returns a new shim for Lister. +// It wraps Lister so that it implements cache.GenericLister interface +func NewRuntimeObjectShim(lister Lister) cache.GenericLister { + return &metadataListerShim{lister: lister} +} + +// List will return all objects across namespaces +func (s *metadataListerShim) List(selector labels.Selector) (ret []runtime.Object, err error) { + objs, err := s.lister.List(selector) + if err != nil { + return nil, err + } + + ret = make([]runtime.Object, len(objs)) + for index, obj := range objs { + ret[index] = obj + } + return ret, err +} + +// Get will attempt to retrieve assuming that name==key +func (s *metadataListerShim) Get(name string) (runtime.Object, error) { + return s.lister.Get(name) +} + +func (s *metadataListerShim) ByNamespace(namespace string) cache.GenericNamespaceLister { + return &metadataNamespaceListerShim{ + namespaceLister: s.lister.Namespace(namespace), + } +} + +// metadataNamespaceListerShim implements the NamespaceLister interface. +// It wraps NamespaceLister so that it implements cache.GenericNamespaceLister interface +type metadataNamespaceListerShim struct { + namespaceLister NamespaceLister +} + +// List will return all objects in this namespace +func (ns *metadataNamespaceListerShim) List(selector labels.Selector) (ret []runtime.Object, err error) { + objs, err := ns.namespaceLister.List(selector) + if err != nil { + return nil, err + } + + ret = make([]runtime.Object, len(objs)) + for index, obj := range objs { + ret[index] = obj + } + return ret, err +} + +// Get will attempt to retrieve by namespace and name +func (ns *metadataNamespaceListerShim) Get(name string) (runtime.Object, error) { + return ns.namespaceLister.Get(name) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 61f15e4c9d..2239666b98 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1760,6 +1760,9 @@ k8s.io/client-go/listers/storage/v1 k8s.io/client-go/listers/storage/v1alpha1 k8s.io/client-go/listers/storage/v1beta1 k8s.io/client-go/metadata +k8s.io/client-go/metadata/fake +k8s.io/client-go/metadata/metadatainformer +k8s.io/client-go/metadata/metadatalister k8s.io/client-go/openapi k8s.io/client-go/openapi/cached k8s.io/client-go/openapi3