Skip to content

Commit 85a6f24

Browse files
committed
fixups
1 parent 508d9ea commit 85a6f24

11 files changed

+76
-20
lines changed

controllers/clustercache/cluster_accessor.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ type clusterAccessorConfig struct {
6666
// with the following name format: "<cluster-name>-kubeconfig".
6767
// Ideally this is a client that caches only kubeconfig secrets, it is highly recommended to avoid caching all secrets.
6868
// An example on how to create an ideal secret caching client can be found in the core Cluster API controller main.go file.
69-
SecretClient client.Client
69+
SecretClient client.Reader
7070

7171
// ControllerPodMetadata is the Pod metadata of the controller using this ClusterCache.
7272
// This is only set when the POD_NAMESPACE, POD_NAME and POD_UID environment variables are set.

controllers/clustercache/cluster_cache.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ type Options struct {
5252
// with the following name format: "<cluster-name>-kubeconfig".
5353
// Ideally this is a client that caches only kubeconfig secrets, it is highly recommended to avoid caching all secrets.
5454
// An example on how to create an ideal secret caching client can be found in the core Cluster API controller main.go file.
55-
SecretClient client.Client
55+
SecretClient client.Reader
5656

5757
// WatchFilterValue is the label value used to filter events prior to reconciliation.
5858
WatchFilterValue string
@@ -276,7 +276,7 @@ func SetupWithManager(ctx context.Context, mgr manager.Manager, options Options,
276276
}
277277

278278
type clusterCache struct {
279-
client client.Client
279+
client client.Reader
280280

281281
// clusterAccessorConfig is the config for clusterAccessors.
282282
clusterAccessorConfig *clusterAccessorConfig

internal/controllers/cluster/suite_test.go

+3-2
Original file line numberDiff line numberDiff line change
@@ -76,8 +76,9 @@ func TestMain(m *testing.M) {
7676
}
7777

7878
if err := (&Reconciler{
79-
Client: mgr.GetClient(),
80-
APIReader: mgr.GetClient(),
79+
Client: mgr.GetClient(),
80+
APIReader: mgr.GetClient(),
81+
ClusterCache: clusterCache,
8182
}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: 1}); err != nil {
8283
panic(fmt.Sprintf("Failed to start ClusterReconciler: %v", err))
8384
}

internal/controllers/machine/machine_controller_noderef_test.go

+23-11
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,11 @@ func TestGetNode(t *testing.T) {
5858
}
5959

6060
g.Expect(env.Create(ctx, testCluster)).To(Succeed())
61+
// Set InfrastructureReady to true so ClusterCache creates the clusterAccessor.
62+
patch := client.MergeFrom(testCluster.DeepCopy())
63+
testCluster.Status.InfrastructureReady = true
64+
g.Expect(env.Status().Patch(ctx, testCluster, patch)).To(Succeed())
65+
6166
g.Expect(env.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed())
6267
defer func(do ...client.Object) {
6368
g.Expect(env.Cleanup(ctx, do...)).To(Succeed())
@@ -137,7 +142,7 @@ func TestGetNode(t *testing.T) {
137142
Client: clustercache.ClientOptions{
138143
UserAgent: remote.DefaultClusterAPIUserAgent("test-controller-manager"),
139144
},
140-
}, controller.Options{MaxConcurrentReconciles: 10})
145+
}, controller.Options{MaxConcurrentReconciles: 10, SkipNameValidation: ptr.To(true)})
141146
if err != nil {
142147
panic(fmt.Sprintf("Failed to create new cluster cache tracker: %v", err))
143148
}
@@ -150,15 +155,18 @@ func TestGetNode(t *testing.T) {
150155
w, err := ctrl.NewControllerManagedBy(env.Manager).For(&corev1.Node{}).Build(r)
151156
g.Expect(err).ToNot(HaveOccurred())
152157

153-
g.Expect(clusterCache.Watch(ctx, clustercache.WatchInput{
154-
Name: "TestGetNode",
155-
Cluster: util.ObjectKey(testCluster),
156-
Watcher: w,
157-
Kind: &corev1.Node{},
158-
EventHandler: handler.EnqueueRequestsFromMapFunc(func(context.Context, client.Object) []reconcile.Request {
159-
return nil
160-
}),
161-
})).To(Succeed())
158+
// Retry because the ClusterCache might not have immediately created the clusterAccessor.
159+
g.Eventually(func(g Gomega) {
160+
g.Expect(clusterCache.Watch(ctx, clustercache.WatchInput{
161+
Name: "TestGetNode",
162+
Cluster: util.ObjectKey(testCluster),
163+
Watcher: w,
164+
Kind: &corev1.Node{},
165+
EventHandler: handler.EnqueueRequestsFromMapFunc(func(context.Context, client.Object) []reconcile.Request {
166+
return nil
167+
}),
168+
})).To(Succeed())
169+
}, 1*time.Minute, 5*time.Second).Should(Succeed())
162170

163171
for _, tc := range testCases {
164172
t.Run(tc.name, func(t *testing.T) {
@@ -339,7 +347,11 @@ func TestNodeLabelSync(t *testing.T) {
339347

340348
g.Expect(env.Create(ctx, cluster)).To(Succeed())
341349
defaultKubeconfigSecret := kubeconfig.GenerateSecret(cluster, kubeconfig.FromEnvTestConfig(env.Config, cluster))
342-
g.Expect(env.Create(ctx, defaultKubeconfigSecret)).To(Succeed())
350+
g.Expect(env.CreateAndWait(ctx, defaultKubeconfigSecret)).To(Succeed())
351+
// Set InfrastructureReady to true so ClusterCache creates the clusterAccessor.
352+
patch := client.MergeFrom(cluster.DeepCopy())
353+
cluster.Status.InfrastructureReady = true
354+
g.Expect(env.Status().Patch(ctx, cluster, patch)).To(Succeed())
343355

344356
g.Expect(env.Create(ctx, infraMachine)).To(Succeed())
345357
// Set InfrastructureMachine .status.interruptible and .status.ready to true.

internal/controllers/machine/machine_controller_phases_test.go

+12
Original file line numberDiff line numberDiff line change
@@ -278,6 +278,10 @@ func TestReconcileMachinePhases(t *testing.T) {
278278
g.Expect(env.Create(ctx, cluster)).To(Succeed())
279279
defaultKubeconfigSecret = kubeconfig.GenerateSecret(cluster, kubeconfig.FromEnvTestConfig(env.Config, cluster))
280280
g.Expect(env.Create(ctx, defaultKubeconfigSecret)).To(Succeed())
281+
// Set InfrastructureReady to true so ClusterCache creates the clusterAccessor.
282+
patch := client.MergeFrom(cluster.DeepCopy())
283+
cluster.Status.InfrastructureReady = true
284+
g.Expect(env.Status().Patch(ctx, cluster, patch)).To(Succeed())
281285

282286
g.Expect(env.Create(ctx, bootstrapConfig)).To(Succeed())
283287
g.Expect(env.Create(ctx, infraMachine)).To(Succeed())
@@ -363,6 +367,10 @@ func TestReconcileMachinePhases(t *testing.T) {
363367
g.Expect(env.Create(ctx, cluster)).To(Succeed())
364368
defaultKubeconfigSecret = kubeconfig.GenerateSecret(cluster, kubeconfig.FromEnvTestConfig(env.Config, cluster))
365369
g.Expect(env.Create(ctx, defaultKubeconfigSecret)).To(Succeed())
370+
// Set InfrastructureReady to true so ClusterCache creates the clusterAccessor.
371+
patch := client.MergeFrom(cluster.DeepCopy())
372+
cluster.Status.InfrastructureReady = true
373+
g.Expect(env.Status().Patch(ctx, cluster, patch)).To(Succeed())
366374

367375
g.Expect(env.Create(ctx, bootstrapConfig)).To(Succeed())
368376
g.Expect(env.Create(ctx, infraMachine)).To(Succeed())
@@ -437,6 +445,10 @@ func TestReconcileMachinePhases(t *testing.T) {
437445
g.Expect(env.Create(ctx, cluster)).To(Succeed())
438446
defaultKubeconfigSecret = kubeconfig.GenerateSecret(cluster, kubeconfig.FromEnvTestConfig(env.Config, cluster))
439447
g.Expect(env.Create(ctx, defaultKubeconfigSecret)).To(Succeed())
448+
// Set InfrastructureReady to true so ClusterCache creates the clusterAccessor.
449+
patch := client.MergeFrom(cluster.DeepCopy())
450+
cluster.Status.InfrastructureReady = true
451+
g.Expect(env.Status().Patch(ctx, cluster, patch)).To(Succeed())
440452

441453
g.Expect(env.Create(ctx, bootstrapConfig)).To(Succeed())
442454
g.Expect(env.Create(ctx, infraMachine)).To(Succeed())

internal/controllers/machine/machine_controller_test.go

+5
Original file line numberDiff line numberDiff line change
@@ -108,6 +108,11 @@ func TestWatches(t *testing.T) {
108108

109109
g.Expect(env.Create(ctx, testCluster)).To(Succeed())
110110
g.Expect(env.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed())
111+
// Set InfrastructureReady to true so ClusterCache creates the clusterAccessor.
112+
testClusterOriginal := client.MergeFrom(testCluster.DeepCopy())
113+
testCluster.Status.InfrastructureReady = true
114+
g.Expect(env.Status().Patch(ctx, testCluster, testClusterOriginal)).To(Succeed())
115+
111116
g.Expect(env.Create(ctx, defaultBootstrap)).To(Succeed())
112117
g.Expect(env.Create(ctx, node)).To(Succeed())
113118
g.Expect(env.Create(ctx, infraMachine)).To(Succeed())

internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go

+6
Original file line numberDiff line numberDiff line change
@@ -2377,6 +2377,12 @@ func createCluster(g *WithT, namespaceName string) *clusterv1.Cluster {
23772377

23782378
g.Expect(env.CreateKubeconfigSecret(ctx, cluster)).To(Succeed())
23792379

2380+
// Set InfrastructureReady to true so ClusterCache creates the clusterAccessor.
2381+
patchHelper, err = patch.NewHelper(cluster, env.Client)
2382+
g.Expect(err).ToNot(HaveOccurred())
2383+
cluster.Status.InfrastructureReady = true
2384+
g.Expect(patchHelper.Patch(ctx, cluster)).To(Succeed())
2385+
23802386
return cluster
23812387
}
23822388

internal/controllers/machinehealthcheck/suite_test.go

+3-2
Original file line numberDiff line numberDiff line change
@@ -79,8 +79,9 @@ func TestMain(m *testing.M) {
7979
}
8080

8181
if err := (&clustercontroller.Reconciler{
82-
Client: mgr.GetClient(),
83-
APIReader: mgr.GetClient(),
82+
Client: mgr.GetClient(),
83+
APIReader: mgr.GetClient(),
84+
ClusterCache: clusterCache,
8485
}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: 1}); err != nil {
8586
panic(fmt.Sprintf("Failed to start ClusterReconciler: %v", err))
8687
}

internal/controllers/machineset/machineset_controller_test.go

+5
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,11 @@ func TestMachineSetReconciler(t *testing.T) {
6161
t.Log("Creating the Cluster Kubeconfig Secret")
6262
g.Expect(env.CreateKubeconfigSecret(ctx, cluster)).To(Succeed())
6363

64+
// Set InfrastructureReady to true so ClusterCache creates the clusterAccessor.
65+
patch := client.MergeFrom(cluster.DeepCopy())
66+
cluster.Status.InfrastructureReady = true
67+
g.Expect(env.Status().Patch(ctx, cluster, patch)).To(Succeed())
68+
6469
return ns, cluster
6570
}
6671

internal/controllers/topology/cluster/cluster_controller_test.go

+13-1
Original file line numberDiff line numberDiff line change
@@ -894,6 +894,18 @@ func setupTestEnvForIntegrationTests(ns *corev1.Namespace) (func() error, error)
894894
return cleanup, err
895895
}
896896
}
897+
// Set InfrastructureReady to true so ClusterCache creates the clusterAccessors.
898+
patch := client.MergeFrom(cluster1.DeepCopy())
899+
cluster1.Status.InfrastructureReady = true
900+
if err := env.Status().Patch(ctx, cluster1, patch); err != nil {
901+
return nil, err
902+
}
903+
patch = client.MergeFrom(cluster2.DeepCopy())
904+
cluster2.Status.InfrastructureReady = true
905+
if err := env.Status().Patch(ctx, cluster2, patch); err != nil {
906+
return nil, err
907+
}
908+
897909
return cleanup, nil
898910
}
899911

@@ -1041,7 +1053,7 @@ func assertMachineDeploymentsReconcile(cluster *clusterv1.Cluster) error {
10411053

10421054
// Check replicas and version for the MachineDeployment.
10431055
if *md.Spec.Replicas != *topologyMD.Replicas {
1044-
return fmt.Errorf("replicas %v does not match expected %v", md.Spec.Replicas, topologyMD.Replicas)
1056+
return fmt.Errorf("replicas %v does not match expected %v", *md.Spec.Replicas, *topologyMD.Replicas)
10451057
}
10461058
if *md.Spec.Template.Spec.Version != cluster.Spec.Topology.Version {
10471059
return fmt.Errorf("version %v does not match expected %v", *md.Spec.Template.Spec.Version, cluster.Spec.Topology.Version)

internal/test/envtest/environment.go

+3-1
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,9 @@ func init() {
7777
// Otherwise it would fall back and log to os.Stderr.
7878
// This would lead to race conditions because input.M.Run() writes os.Stderr
7979
// while some go routines in controller-runtime use os.Stderr to write logs.
80-
if err := logsv1.ValidateAndApply(logs.NewOptions(), nil); err != nil {
80+
logOptions := logs.NewOptions()
81+
logOptions.Verbosity = logsv1.VerbosityLevel(6) // FIXME: change to 2 before merge
82+
if err := logsv1.ValidateAndApply(logOptions, nil); err != nil {
8183
klog.ErrorS(err, "Unable to validate and apply log options")
8284
os.Exit(1)
8385
}

0 commit comments

Comments
 (0)