Skip to content

Commit d92a5c2

Browse files
Test runtimextension integration
Signed-off-by: Danil-Grigorev <[email protected]>
1 parent 5b7d26f commit d92a5c2

File tree

6 files changed

+77
-10
lines changed

6 files changed

+77
-10
lines changed

Makefile

+1
Original file line numberDiff line numberDiff line change
@@ -618,6 +618,7 @@ generate-e2e-templates-main: $(KUSTOMIZE)
618618
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-ignition --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-ignition.yaml
619619
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/clusterclass-quick-start-kcp-only --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/clusterclass-quick-start-kcp-only.yaml
620620
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-cross-ns-topology --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-cross-ns-topology.yaml
621+
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-cross-ns-upgrades-runtimesdk --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-cross-ns-upgrades-runtimesdk.yaml
621622
$(KUSTOMIZE) build $(INMEMORY_TEMPLATES)/main/cluster-template --load-restrictor LoadRestrictionsNone > $(INMEMORY_TEMPLATES)/main/cluster-template.yaml
622623

623624
.PHONY: generate-metrics-config

test/e2e/cluster_upgrade_runtimesdk.go

+26-7
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,9 @@ type ClusterUpgradeWithRuntimeSDKSpecInput struct {
9595
ExtensionServiceNamespace string
9696
// ExtensionServiceName is the name of the service to configure in the test-namespace scoped ExtensionConfig.
9797
ExtensionServiceName string
98+
99+
// ClassNamespace is an optional class namespace reference, configuring cross-namespace cluster class reference
100+
ClassNamespace bool
98101
}
99102

100103
// ClusterUpgradeWithRuntimeSDKSpec implements a spec that upgrades a cluster and runs the Kubernetes conformance suite.
@@ -109,9 +112,9 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl
109112
)
110113

111114
var (
112-
input ClusterUpgradeWithRuntimeSDKSpecInput
113-
namespace *corev1.Namespace
114-
cancelWatches context.CancelFunc
115+
input ClusterUpgradeWithRuntimeSDKSpecInput
116+
namespace, infraNamespace *corev1.Namespace
117+
cancelWatches, cancelInfraWatches context.CancelFunc
115118

116119
controlPlaneMachineCount int64
117120
workerMachineCount int64
@@ -148,6 +151,7 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl
148151

149152
// Set up a Namespace where to host objects for this spec and create a watcher for the Namespace events.
150153
namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated)
154+
infraNamespace, cancelInfraWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated)
151155
clusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6))
152156
clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult)
153157
})
@@ -162,7 +166,7 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl
162166
By("Deploy Test Extension ExtensionConfig")
163167

164168
Expect(input.BootstrapClusterProxy.GetClient().Create(ctx,
165-
extensionConfig(specName, namespace.Name, input.ExtensionServiceNamespace, input.ExtensionServiceName))).
169+
extensionConfig(specName, input.ExtensionServiceNamespace, input.ExtensionServiceName, namespace.Name, infraNamespace.Name))).
166170
To(Succeed(), "Failed to create the extension config")
167171

168172
By("Creating a workload cluster; creation waits for BeforeClusterCreateHook to gate the operation")
@@ -177,6 +181,11 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl
177181
infrastructureProvider = *input.InfrastructureProvider
178182
}
179183

184+
variables := map[string]string{}
185+
if input.ClassNamespace {
186+
variables["CLUSTER_CLASS_NAMESPACE"] = infraNamespace.Name
187+
}
188+
180189
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
181190
ClusterProxy: input.BootstrapClusterProxy,
182191
ConfigCluster: clusterctl.ConfigClusterInput{
@@ -190,6 +199,7 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl
190199
KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeFrom),
191200
ControlPlaneMachineCount: ptr.To[int64](controlPlaneMachineCount),
192201
WorkerMachineCount: ptr.To[int64](workerMachineCount),
202+
ClusterctlVariables: variables,
193203
},
194204
PreWaitForCluster: func() {
195205
beforeClusterCreateTestHandler(ctx,
@@ -304,7 +314,7 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl
304314
if !input.SkipCleanup {
305315
// Delete the extensionConfig first to ensure the BeforeDeleteCluster hook doesn't block deletion.
306316
Eventually(func() error {
307-
return input.BootstrapClusterProxy.GetClient().Delete(ctx, extensionConfig(specName, namespace.Name, input.ExtensionServiceNamespace, input.ExtensionServiceName))
317+
return input.BootstrapClusterProxy.GetClient().Delete(ctx, extensionConfig(specName, input.ExtensionServiceNamespace, input.ExtensionServiceName))
308318
}, 10*time.Second, 1*time.Second).Should(Succeed(), "delete extensionConfig failed")
309319

310320
Byf("Deleting cluster %s", klog.KObj(clusterResources.Cluster))
@@ -322,8 +332,17 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl
322332
Deleter: input.BootstrapClusterProxy.GetClient(),
323333
Name: namespace.Name,
324334
})
335+
336+
if input.ClassNamespace {
337+
Byf("Deleting namespace used for optionally hosting the %q infrastructure spec", specName)
338+
framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{
339+
Deleter: input.BootstrapClusterProxy.GetClient(),
340+
Name: infraNamespace.Name,
341+
})
342+
}
325343
}
326344
cancelWatches()
345+
cancelInfraWatches()
327346
})
328347
}
329348

@@ -429,7 +448,7 @@ func machineSetPreflightChecksTestHandler(ctx context.Context, c client.Client,
429448
// We make sure this cluster-wide object does not conflict with others by using a random generated
430449
// name and a NamespaceSelector selecting on the namespace of the current test.
431450
// Thus, this object is "namespaced" to the current test even though it's a cluster-wide object.
432-
func extensionConfig(name, namespace, extensionServiceNamespace, extensionServiceName string) *runtimev1.ExtensionConfig {
451+
func extensionConfig(name, extensionServiceNamespace, extensionServiceName string, namespaces ...string) *runtimev1.ExtensionConfig {
433452
return &runtimev1.ExtensionConfig{
434453
ObjectMeta: metav1.ObjectMeta{
435454
// Note: We have to use a constant name here as we have to be able to reference it in the ClusterClass
@@ -454,7 +473,7 @@ func extensionConfig(name, namespace, extensionServiceNamespace, extensionServic
454473
{
455474
Key: "kubernetes.io/metadata.name",
456475
Operator: metav1.LabelSelectorOpIn,
457-
Values: []string{namespace},
476+
Values: namespaces,
458477
},
459478
},
460479
},

test/e2e/cluster_upgrade_runtimesdk_test.go

+33
Original file line numberDiff line numberDiff line change
@@ -60,3 +60,36 @@ var _ = Describe("When upgrading a workload cluster using ClusterClass with Runt
6060
}
6161
})
6262
})
63+
64+
var _ = Describe("When upgrading a workload cluster using ClusterClass in a different NS with RuntimeSDK [PR-Blocking] [ClusterClass]", func() {
65+
ClusterUpgradeWithRuntimeSDKSpec(ctx, func() ClusterUpgradeWithRuntimeSDKSpecInput {
66+
version, err := semver.ParseTolerant(e2eConfig.GetVariable(KubernetesVersionUpgradeFrom))
67+
Expect(err).ToNot(HaveOccurred(), "Invalid argument, KUBERNETES_VERSION_UPGRADE_FROM is not a valid version")
68+
if version.LT(semver.MustParse("1.24.0")) {
69+
Fail("This test only supports upgrades from Kubernetes >= v1.24.0")
70+
}
71+
72+
return ClusterUpgradeWithRuntimeSDKSpecInput{
73+
E2EConfig: e2eConfig,
74+
ClusterctlConfigPath: clusterctlConfigPath,
75+
BootstrapClusterProxy: bootstrapClusterProxy,
76+
ArtifactFolder: artifactFolder,
77+
SkipCleanup: skipCleanup,
78+
InfrastructureProvider: ptr.To("docker"),
79+
PostUpgrade: func(proxy framework.ClusterProxy, namespace, clusterName string) {
80+
// This check ensures that the resourceVersions are stable, i.e. it verifies there are no
81+
// continuous reconciles when everything should be stable.
82+
framework.ValidateResourceVersionStable(ctx, proxy, namespace, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName))
83+
},
84+
// "upgrades" is the same as the "topology" flavor but with an additional MachinePool.
85+
Flavor: ptr.To("cross-ns-upgrades-runtimesdk"),
86+
ClassNamespace: true,
87+
// The runtime extension gets deployed to the test-extension-system namespace and is exposed
88+
// by the test-extension-webhook-service.
89+
// The below values are used when creating the cluster-wide ExtensionConfig to refer
90+
// the actual service.
91+
ExtensionServiceNamespace: "test-extension-system",
92+
ExtensionServiceName: "test-extension-webhook-service",
93+
}
94+
})
95+
})

test/e2e/config/docker.yaml

+4-3
Original file line numberDiff line numberDiff line change
@@ -348,6 +348,7 @@ providers:
348348
- sourcePath: "../data/infrastructure-docker/main/cluster-template-topology.yaml"
349349
- sourcePath: "../data/infrastructure-docker/main/cluster-template-ignition.yaml"
350350
- sourcePath: "../data/infrastructure-docker/main/cluster-template-cross-ns-topology.yaml"
351+
- sourcePath: "../data/infrastructure-docker/main/cluster-template-cross-ns-upgrades-runtimesdk.yaml"
351352
- sourcePath: "../data/infrastructure-docker/main/clusterclass-quick-start.yaml"
352353
- sourcePath: "../data/infrastructure-docker/main/clusterclass-quick-start-kcp-only.yaml"
353354
- sourcePath: "../data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml"
@@ -377,10 +378,10 @@ variables:
377378
# allowing the same e2e config file to be re-used in different Prow jobs e.g. each one with a K8s version permutation.
378379
# The following Kubernetes versions should be the latest versions with already published kindest/node images.
379380
# This avoids building node images in the default case which improves the test duration significantly.
380-
KUBERNETES_VERSION_MANAGEMENT: "v1.32.0-beta.0"
381-
KUBERNETES_VERSION: "v1.32.0-beta.0"
381+
KUBERNETES_VERSION_MANAGEMENT: "v1.31.0"
382+
KUBERNETES_VERSION: "v1.31.0"
382383
KUBERNETES_VERSION_UPGRADE_FROM: "v1.31.0"
383-
KUBERNETES_VERSION_UPGRADE_TO: "v1.32.0-beta.0"
384+
KUBERNETES_VERSION_UPGRADE_TO: "v1.31.0"
384385
KUBERNETES_VERSION_LATEST_CI: "ci/latest-1.32"
385386
ETCD_VERSION_UPGRADE_TO: "3.5.16-0"
386387
COREDNS_VERSION_UPGRADE_TO: "v1.11.3"
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
---
2+
apiVersion: cluster.x-k8s.io/v1beta1
3+
kind: Cluster
4+
metadata:
5+
name: '${CLUSTER_NAME}'
6+
spec:
7+
topology:
8+
classNamespace: '${CLUSTER_CLASS_NAMESPACE}'
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
resources:
2+
- ../cluster-template-upgrades-runtimesdk
3+
4+
patches:
5+
- path: cluster.yaml

0 commit comments

Comments
 (0)