Skip to content

Commit 7464233

Browse files
🌱 Use kind as a secondary management cluster for clusterctl E2E tests (#10639)
* Use kind as a secondary management cluster for clusterctl E2E tests * Use kind in all clusterctl tests * Fix lint * Address comments
1 parent 3342e83 commit 7464233

File tree

4 files changed

+151
-84
lines changed

4 files changed

+151
-84
lines changed

‎test/e2e/clusterctl_upgrade.go

+103-54
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,12 @@ type ClusterctlUpgradeSpecInput struct {
6161
ClusterctlConfigPath string
6262
BootstrapClusterProxy framework.ClusterProxy
6363
ArtifactFolder string
64+
65+
// UseKindForManagementCluster instruct the test to use kind for creating the management cluster (instead to use the actual infrastructure provider).
66+
// NOTE: given that the bootstrap cluster could be shared by several tests, it is not practical to use it for testing clusterctl upgrades.
67+
// So we are creating a new management cluster where to install older version of providers
68+
UseKindForManagementCluster bool
69+
6470
// InitWithBinary must be used to specify the URL of the clusterctl binary of the old version of Cluster API. The spec will interpolate the
6571
// strings `{OS}` and `{ARCH}` to `runtime.GOOS` and `runtime.GOARCH` respectively, e.g. https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.23/clusterctl-{OS}-{ARCH}
6672
InitWithBinary string
@@ -187,16 +193,22 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
187193
managementClusterNamespace *corev1.Namespace
188194
managementClusterCancelWatches context.CancelFunc
189195
managementClusterResources *clusterctl.ApplyClusterTemplateAndWaitResult
196+
managementClusterProvider bootstrap.ClusterProvider
190197
managementClusterProxy framework.ClusterProxy
191198

192199
initClusterctlBinaryURL string
193200
initContract string
194201
initKubernetesVersion string
195202

196203
workloadClusterName string
204+
205+
scheme *apiruntime.Scheme
197206
)
198207

199208
BeforeEach(func() {
209+
scheme = apiruntime.NewScheme()
210+
framework.TryAddDefaultSchemes(scheme)
211+
200212
Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName)
201213
input = inputGetter()
202214
Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName)
@@ -231,73 +243,104 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
231243
Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion))
232244
Expect(os.MkdirAll(input.ArtifactFolder, 0750)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName)
233245

234-
// Setup a Namespace where to host objects for this spec and create a watcher for the namespace events.
235-
managementClusterNamespace, managementClusterCancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated)
246+
// If the test is not being run in a separated kind cluster, setup a Namespace in the current bootstrap cluster where to host objects for this spec and create a watcher for the namespace events.
247+
if !input.UseKindForManagementCluster {
248+
managementClusterNamespace, managementClusterCancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated)
249+
}
236250
managementClusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult)
237251
})
238252

239253
It("Should create a management cluster and then upgrade all the providers", func() {
240-
By("Creating a workload cluster to be used as a new management cluster")
241-
// NOTE: given that the bootstrap cluster could be shared by several tests, it is not practical to use it for testing clusterctl upgrades.
242-
// So we are creating a workload cluster that will be used as a new management cluster where to install older version of providers
243254
infrastructureProvider := clusterctl.DefaultInfrastructureProvider
244255
if input.InfrastructureProvider != nil {
245256
infrastructureProvider = *input.InfrastructureProvider
246257
}
247-
managementClusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6))
248-
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
249-
ClusterProxy: input.BootstrapClusterProxy,
250-
ConfigCluster: clusterctl.ConfigClusterInput{
251-
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()),
252-
ClusterctlConfigPath: input.ClusterctlConfigPath,
253-
KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(),
254-
InfrastructureProvider: infrastructureProvider,
255-
Flavor: input.MgmtFlavor,
256-
Namespace: managementClusterNamespace.Name,
257-
ClusterName: managementClusterName,
258-
KubernetesVersion: initKubernetesVersion,
259-
ControlPlaneMachineCount: ptr.To[int64](1),
260-
WorkerMachineCount: ptr.To[int64](1),
261-
},
262-
PreWaitForCluster: func() {
263-
if input.PreWaitForCluster != nil {
264-
input.PreWaitForCluster(input.BootstrapClusterProxy, managementClusterNamespace.Name, managementClusterName)
265-
}
266-
},
267-
CNIManifestPath: input.CNIManifestPath,
268-
ControlPlaneWaiters: input.ControlPlaneWaiters,
269-
WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"),
270-
WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"),
271-
WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
272-
}, managementClusterResources)
273-
274-
By("Turning the workload cluster into a management cluster with older versions of providers")
275-
276-
// If the cluster is a DockerCluster, we should load controller images into the nodes.
277-
// Nb. this can be achieved also by changing the DockerMachine spec, but for the time being we are using
278-
// this approach because this allows to have a single source of truth for images, the e2e config
279-
// Nb. the images for official version of the providers will be pulled from internet, but the latest images must be
280-
// built locally and loaded into kind
281-
cluster := managementClusterResources.Cluster
282-
if cluster.Spec.InfrastructureRef.Kind == "DockerCluster" {
283-
Expect(bootstrap.LoadImagesToKindCluster(ctx, bootstrap.LoadImagesToKindClusterInput{
284-
Name: cluster.Name,
285-
Images: input.E2EConfig.Images,
286-
})).To(Succeed())
258+
// NOTE: given that the bootstrap cluster could be shared by several tests, it is not practical to use it for testing clusterctl upgrades.
259+
// So we are creating a workload cluster that will be used as a new management cluster where to install older version of providers
260+
managementClusterName = fmt.Sprintf("%s-management-%s", specName, util.RandomString(6))
261+
managementClusterLogFolder := filepath.Join(input.ArtifactFolder, "clusters", managementClusterName)
262+
if input.UseKindForManagementCluster {
263+
By("Creating a kind cluster to be used as a new management cluster")
264+
265+
managementClusterProvider = bootstrap.CreateKindBootstrapClusterAndLoadImages(ctx, bootstrap.CreateKindBootstrapClusterAndLoadImagesInput{
266+
Name: managementClusterName,
267+
KubernetesVersion: initKubernetesVersion,
268+
RequiresDockerSock: input.E2EConfig.HasDockerProvider(),
269+
// Note: most of this images won't be used while starting the controllers, because it is used to spin up older versions of CAPI. Those images will be eventually used when upgrading to current.
270+
Images: input.E2EConfig.Images,
271+
IPFamily: input.E2EConfig.GetVariable(IPFamily),
272+
LogFolder: filepath.Join(managementClusterLogFolder, "logs-kind"),
273+
})
274+
Expect(managementClusterProvider).ToNot(BeNil(), "Failed to create a kind cluster")
275+
276+
kubeconfigPath := managementClusterProvider.GetKubeconfigPath()
277+
Expect(kubeconfigPath).To(BeAnExistingFile(), "Failed to get the kubeconfig file for the kind cluster")
278+
279+
managementClusterProxy = framework.NewClusterProxy(managementClusterName, kubeconfigPath, scheme)
280+
Expect(managementClusterProxy).ToNot(BeNil(), "Failed to get a kind cluster proxy")
281+
282+
managementClusterResources.Cluster = &clusterv1.Cluster{
283+
ObjectMeta: metav1.ObjectMeta{
284+
Name: managementClusterName,
285+
},
286+
}
287+
} else {
288+
By("Creating a workload cluster to be used as a new management cluster")
289+
290+
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
291+
ClusterProxy: input.BootstrapClusterProxy,
292+
ConfigCluster: clusterctl.ConfigClusterInput{
293+
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()),
294+
ClusterctlConfigPath: input.ClusterctlConfigPath,
295+
KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(),
296+
InfrastructureProvider: infrastructureProvider,
297+
Flavor: input.MgmtFlavor,
298+
Namespace: managementClusterNamespace.Name,
299+
ClusterName: managementClusterName,
300+
KubernetesVersion: initKubernetesVersion,
301+
ControlPlaneMachineCount: ptr.To[int64](1),
302+
WorkerMachineCount: ptr.To[int64](1),
303+
},
304+
PreWaitForCluster: func() {
305+
if input.PreWaitForCluster != nil {
306+
input.PreWaitForCluster(input.BootstrapClusterProxy, managementClusterNamespace.Name, managementClusterName)
307+
}
308+
},
309+
CNIManifestPath: input.CNIManifestPath,
310+
ControlPlaneWaiters: input.ControlPlaneWaiters,
311+
WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"),
312+
WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"),
313+
WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
314+
}, managementClusterResources)
315+
316+
// If the cluster is a DockerCluster, we should load controller images into the nodes.
317+
// Nb. this can be achieved also by changing the DockerMachine spec, but for the time being we are using
318+
// this approach because this allows to have a single source of truth for images, the e2e config
319+
// Nb. the images for official version of the providers will be pulled from internet, but the latest images must be
320+
// built locally and loaded into kind
321+
cluster := managementClusterResources.Cluster
322+
if cluster.Spec.InfrastructureRef.Kind == "DockerCluster" {
323+
Expect(bootstrap.LoadImagesToKindCluster(ctx, bootstrap.LoadImagesToKindClusterInput{
324+
Name: cluster.Name,
325+
Images: input.E2EConfig.Images,
326+
})).To(Succeed())
327+
}
328+
329+
// Get a ClusterProxy so we can interact with the workload cluster
330+
managementClusterProxy = input.BootstrapClusterProxy.GetWorkloadCluster(ctx, cluster.Namespace, cluster.Name, framework.WithMachineLogCollector(input.BootstrapClusterProxy.GetLogCollector()))
287331
}
288332

289-
// Get a ClusterProxy so we can interact with the workload cluster
290-
managementClusterProxy = input.BootstrapClusterProxy.GetWorkloadCluster(ctx, cluster.Namespace, cluster.Name, framework.WithMachineLogCollector(input.BootstrapClusterProxy.GetLogCollector()))
333+
By("Turning the new cluster into a management cluster with older versions of providers")
291334

292335
// Download the clusterctl version that should be used to initially set up the management cluster (which is later upgraded).
293336
Byf("Downloading clusterctl binary from %s", initClusterctlBinaryURL)
294337
clusterctlBinaryPath, clusterctlConfigPath := setupClusterctl(ctx, initClusterctlBinaryURL, input.ClusterctlConfigPath)
295338
defer os.Remove(clusterctlBinaryPath) // clean up
296339

297-
By("Initializing the workload cluster with older versions of providers")
340+
By("Initializing the new management cluster with older versions of providers")
298341

299342
if input.PreInit != nil {
300-
By("Running Pre-init steps against the management cluster")
343+
By("Running Pre-init steps against the new management cluster")
301344
input.PreInit(managementClusterProxy)
302345
}
303346

@@ -340,7 +383,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
340383
IPAMProviders: ipamProviders,
341384
RuntimeExtensionProviders: runtimeExtensionProviders,
342385
AddonProviders: addonProviders,
343-
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name),
386+
LogFolder: managementClusterLogFolder,
344387
}, input.E2EConfig.GetIntervals(specName, "wait-controllers")...)
345388

346389
By("THE MANAGEMENT CLUSTER WITH THE OLDER VERSION OF PROVIDERS IS UP&RUNNING!")
@@ -364,7 +407,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
364407
// In this case ApplyClusterTemplateAndWait can't be used because this helper is linked to the last version of the API;
365408
// so we are getting a template using the downloaded version of clusterctl, applying it, and wait for machines to be provisioned.
366409

367-
workloadClusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6))
410+
workloadClusterName = fmt.Sprintf("%s-workload-%s", specName, util.RandomString(6))
368411
workloadClusterNamespace := testNamespace.Name
369412
kubernetesVersion := input.WorkloadKubernetesVersion
370413
if kubernetesVersion == "" {
@@ -532,7 +575,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
532575
IPAMProviders: upgrade.IPAMProviders,
533576
RuntimeExtensionProviders: upgrade.RuntimeExtensionProviders,
534577
AddonProviders: upgrade.AddonProviders,
535-
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name),
578+
LogFolder: managementClusterLogFolder,
536579
}, input.E2EConfig.GetIntervals(specName, "wait-controllers")...)
537580
} else {
538581
Byf("[%d] Upgrading providers to the latest version available", i)
@@ -542,7 +585,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
542585
ClusterctlVariables: input.UpgradeClusterctlVariables,
543586
ClusterProxy: managementClusterProxy,
544587
Contract: upgrade.Contract,
545-
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name),
588+
LogFolder: managementClusterLogFolder,
546589
}, input.E2EConfig.GetIntervals(specName, "wait-controllers")...)
547590
}
548591

@@ -694,8 +737,14 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
694737
By("Running PreCleanupManagementCluster steps against the management cluster")
695738
input.PreCleanupManagementCluster(managementClusterProxy)
696739
}
740+
697741
// Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself.
698-
framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, managementClusterNamespace, managementClusterCancelWatches, managementClusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
742+
if input.UseKindForManagementCluster {
743+
managementClusterProxy.Dispose(ctx)
744+
managementClusterProvider.Dispose(ctx)
745+
} else {
746+
framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, managementClusterNamespace, managementClusterCancelWatches, managementClusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
747+
}
699748
})
700749
}
701750

‎test/e2e/clusterctl_upgrade_test.go

+33-26
Original file line numberDiff line numberDiff line change
@@ -104,8 +104,9 @@ var _ = Describe("When testing clusterctl upgrades (v0.3=>v1.5=>current)", func(
104104
UpgradeClusterctlVariables: map[string]string{
105105
"CLUSTER_TOPOLOGY": "false",
106106
},
107-
MgmtFlavor: "topology",
108-
WorkloadFlavor: "",
107+
MgmtFlavor: "topology",
108+
WorkloadFlavor: "",
109+
UseKindForManagementCluster: true,
109110
}
110111
})
111112
})
@@ -165,10 +166,11 @@ var _ = Describe("When testing clusterctl upgrades (v0.4=>v1.6=>current)", func(
165166
},
166167
// NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/data/infrastructure-docker/v0.4/bases.
167168
// Note: Both InitWithKubernetesVersion and WorkloadKubernetesVersion should be the highest mgmt cluster version supported by the source Cluster API version.
168-
InitWithKubernetesVersion: "v1.23.17",
169-
WorkloadKubernetesVersion: "v1.23.17",
170-
MgmtFlavor: "topology",
171-
WorkloadFlavor: "",
169+
InitWithKubernetesVersion: "v1.23.17",
170+
WorkloadKubernetesVersion: "v1.23.17",
171+
MgmtFlavor: "topology",
172+
WorkloadFlavor: "",
173+
UseKindForManagementCluster: true,
172174
}
173175
})
174176
})
@@ -199,10 +201,11 @@ var _ = Describe("When testing clusterctl upgrades (v1.0=>current)", func() {
199201
InitWithRuntimeExtensionProviders: []string{},
200202
// NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/data/infrastructure-docker/v1.0/bases.
201203
// Note: Both InitWithKubernetesVersion and WorkloadKubernetesVersion should be the highest mgmt cluster version supported by the source Cluster API version.
202-
InitWithKubernetesVersion: "v1.23.17",
203-
WorkloadKubernetesVersion: "v1.23.17",
204-
MgmtFlavor: "topology",
205-
WorkloadFlavor: "",
204+
InitWithKubernetesVersion: "v1.23.17",
205+
WorkloadKubernetesVersion: "v1.23.17",
206+
MgmtFlavor: "topology",
207+
WorkloadFlavor: "",
208+
UseKindForManagementCluster: true,
206209
}
207210
})
208211
})
@@ -229,10 +232,11 @@ var _ = Describe("When testing clusterctl upgrades (v1.5=>current)", func() {
229232
InitWithInfrastructureProviders: []string{fmt.Sprintf(providerDockerPrefix, stableRelease)},
230233
InitWithProvidersContract: "v1beta1",
231234
// Note: Both InitWithKubernetesVersion and WorkloadKubernetesVersion should be the highest mgmt cluster version supported by the source Cluster API version.
232-
InitWithKubernetesVersion: "v1.28.0",
233-
WorkloadKubernetesVersion: "v1.28.0",
234-
MgmtFlavor: "topology",
235-
WorkloadFlavor: "",
235+
InitWithKubernetesVersion: "v1.28.0",
236+
WorkloadKubernetesVersion: "v1.28.0",
237+
MgmtFlavor: "topology",
238+
WorkloadFlavor: "",
239+
UseKindForManagementCluster: true,
236240
}
237241
})
238242
})
@@ -259,10 +263,11 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.5=>cur
259263
InitWithInfrastructureProviders: []string{fmt.Sprintf(providerDockerPrefix, stableRelease)},
260264
InitWithProvidersContract: "v1beta1",
261265
// Note: Both InitWithKubernetesVersion and WorkloadKubernetesVersion should be the highest mgmt cluster version supported by the source Cluster API version.
262-
InitWithKubernetesVersion: "v1.28.0",
263-
WorkloadKubernetesVersion: "v1.28.0",
264-
MgmtFlavor: "topology",
265-
WorkloadFlavor: "topology",
266+
InitWithKubernetesVersion: "v1.28.0",
267+
WorkloadKubernetesVersion: "v1.28.0",
268+
MgmtFlavor: "topology",
269+
WorkloadFlavor: "topology",
270+
UseKindForManagementCluster: true,
266271
}
267272
})
268273
})
@@ -283,10 +288,11 @@ var _ = Describe("When testing clusterctl upgrades (v1.6=>current)", func() {
283288
InitWithBinary: fmt.Sprintf(clusterctlDownloadURL, stableRelease),
284289
InitWithProvidersContract: "v1beta1",
285290
// Note: Both InitWithKubernetesVersion and WorkloadKubernetesVersion should be the highest mgmt cluster version supported by the source Cluster API version.
286-
InitWithKubernetesVersion: "v1.29.2",
287-
WorkloadKubernetesVersion: "v1.29.2",
288-
MgmtFlavor: "topology",
289-
WorkloadFlavor: "",
291+
InitWithKubernetesVersion: "v1.29.2",
292+
WorkloadKubernetesVersion: "v1.29.2",
293+
MgmtFlavor: "topology",
294+
WorkloadFlavor: "",
295+
UseKindForManagementCluster: true,
290296
}
291297
})
292298
})
@@ -307,10 +313,11 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.6=>cur
307313
InitWithBinary: fmt.Sprintf(clusterctlDownloadURL, stableRelease),
308314
InitWithProvidersContract: "v1beta1",
309315
// Note: Both InitWithKubernetesVersion and WorkloadKubernetesVersion should be the highest mgmt cluster version supported by the source Cluster API version.
310-
InitWithKubernetesVersion: "v1.29.2",
311-
WorkloadKubernetesVersion: "v1.29.2",
312-
MgmtFlavor: "topology",
313-
WorkloadFlavor: "topology",
316+
InitWithKubernetesVersion: "v1.29.2",
317+
WorkloadKubernetesVersion: "v1.29.2",
318+
MgmtFlavor: "topology",
319+
WorkloadFlavor: "topology",
320+
UseKindForManagementCluster: true,
314321
}
315322
})
316323
})

0 commit comments

Comments
 (0)