Skip to content

Commit 911ad53

Browse files
authored
Merge pull request #6625 from sbueringer/pr-runtime-sdk-e2e-test
✨ RuntimeSDK: Add Test Extension and initial e2e test
2 parents 8a6b4c6 + 8037804 commit 911ad53

26 files changed

+994
-1
lines changed

.gitignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,9 @@ test/e2e/data/infrastructure-docker/v1alpha3/cluster-template*.yaml
1515
test/e2e/data/infrastructure-docker/v1alpha4/cluster-template*.yaml
1616
test/e2e/data/infrastructure-docker/v1beta1/cluster-template*.yaml
1717

18+
# E2e test extension deployment
19+
test/e2e/data/test-extension/deployment.yaml
20+
1821
# Output of the go coverage tool, specifically when used with LiteIDE
1922
*.out
2023

Makefile

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -162,6 +162,10 @@ CLUSTERCTL_MANIFEST_DIR := cmd/clusterctl/config
162162
CLUSTERCTL_IMAGE_NAME ?= clusterctl
163163
CLUSTERCTL_IMG ?= $(REGISTRY)/$(CLUSTERCTL_IMAGE_NAME)
164164

165+
# test extension
166+
TEST_EXTENSION_IMAGE_NAME ?= test-extension
167+
TEST_EXTENSION_IMG ?= $(REGISTRY)/$(TEST_EXTENSION_IMAGE_NAME)
168+
165169
# It is set by Prow GIT_TAG, a git-based tag of the form vYYYYMMDD-hash, e.g., v20210120-v0.3.10-308-gc61521971
166170

167171
TAG ?= dev
@@ -554,6 +558,12 @@ docker-build-clusterctl: ## Build the docker image for clusterctl with output bi
554558
.PHONY: docker-capd-build-all
555559
docker-capd-build-all: $(addprefix docker-capd-build-,$(ALL_ARCH)) ## Build capd docker images for all architectures
556560

561+
.PHONY: docker-build-test-extension
562+
docker-build-test-extension: ## Build the docker image for core controller manager
563+
DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg ldflags="$(LDFLAGS)" . -t $(TEST_EXTENSION_IMG)-$(ARCH):$(TAG) --file ./test/extension/Dockerfile
564+
$(MAKE) set-manifest-image MANIFEST_IMG=$(TEST_EXTENSION_IMG)-$(ARCH) MANIFEST_TAG=$(TAG) TARGET_RESOURCE="./test/extension/config/default/extension_image_patch.yaml"
565+
$(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./test/extension/config/default/extension_pull_policy.yaml"
566+
557567
.PHONY: e2e-framework
558568
e2e-framework: ## Builds the CAPI e2e framework
559569
cd $(E2E_FRAMEWORK_DIR); go build ./...

scripts/ci-e2e-lib.sh

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,14 @@ capi:buildDockerImages () {
3939
else
4040
echo "+ CAPD images already present in the system, skipping make"
4141
fi
42+
43+
## Build test extension images, if missing
44+
if [[ "$(docker images -q "$REGISTRY/test-extension-$ARCH:$TAG" 2> /dev/null)" == "" ]]; then
45+
echo "+ Building test-extension image"
46+
make docker-build-test-extension
47+
else
48+
echo "+ test-extension image already present in the system, skipping make"
49+
fi
4250
}
4351

4452
# k8s::prepareKindestImages checks all the e2e test variables representing a Kubernetes version,

test/e2e/Makefile

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,11 +95,17 @@ cluster-templates-v1beta1: $(KUSTOMIZE) ## Generate cluster templates for v1beta
9595
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-node-drain --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-node-drain.yaml
9696
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-upgrades --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-upgrades.yaml
9797
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-upgrades-cgroupfs --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-upgrades-cgroupfs.yaml
98+
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-upgrades-runtimesdk --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-upgrades-runtimesdk.yaml
99+
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-upgrades-runtimesdk-cgroupfs --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-upgrades-runtimesdk-cgroupfs.yaml
98100
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-kcp-scale-in --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-kcp-scale-in.yaml
99101
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-ipv6 --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-ipv6.yaml
100102
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-topology.yaml
101103
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-ignition --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-ignition.yaml
102104

105+
test-extension-deployment: $(KUSTOMIZE) ## Generate deployment for test extension
106+
mkdir -p $(REPO_ROOT)/test/e2e/data/test-extension
107+
$(KUSTOMIZE) build $(REPO_ROOT)/test/extension/config/default > $(REPO_ROOT)/test/e2e/data/test-extension/deployment.yaml
108+
103109
## --------------------------------------
104110
## Testing
105111
## --------------------------------------
@@ -119,7 +125,7 @@ _SKIP_ARGS := $(foreach arg,$(strip $(GINKGO_SKIP)),-skip="$(arg)")
119125
endif
120126

121127
.PHONY: run
122-
run: $(GINKGO) cluster-templates ## Run the end-to-end tests
128+
run: $(GINKGO) cluster-templates test-extension-deployment ## Run the end-to-end tests
123129
$(GINKGO) -v -trace -tags=e2e -focus="$(GINKGO_FOCUS)" $(_SKIP_ARGS) -nodes=$(GINKGO_NODES) --noColor=$(GINKGO_NOCOLOR) $(GINKGO_ARGS) . -- \
124130
-e2e.artifacts-folder="$(ARTIFACTS)" \
125131
-e2e.config="$(E2E_CONF_FILE)" \

test/e2e/cluster_upgrade.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ type ClusterUpgradeConformanceSpecInput struct {
4545
// ControlPlaneMachineCount is used in `config cluster` to configure the count of the control plane machines used in the test.
4646
// Default is 1.
4747
ControlPlaneMachineCount *int64
48+
4849
// WorkerMachineCount is used in `config cluster` to configure the count of the worker machines used in the test.
4950
// NOTE: If the WORKER_MACHINE_COUNT var is used multiple times in the cluster template, the absolute count of
5051
// worker machines is a multiple of WorkerMachineCount.
@@ -66,6 +67,7 @@ func ClusterUpgradeConformanceSpec(ctx context.Context, inputGetter func() Clust
6667
kubetestConfigurationVariable = "KUBETEST_CONFIGURATION"
6768
specName = "k8s-upgrade-and-conformance"
6869
)
70+
6971
var (
7072
input ClusterUpgradeConformanceSpecInput
7173
namespace *corev1.Namespace
Lines changed: 236 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,236 @@
1+
/*
2+
Copyright 2021 The Kubernetes Authors.
3+
4+
Licensed under the Apache License, Version 2.0 (the "License");
5+
you may not use this file except in compliance with the License.
6+
You may obtain a copy of the License at
7+
8+
http://www.apache.org/licenses/LICENSE-2.0
9+
10+
Unless required by applicable law or agreed to in writing, software
11+
distributed under the License is distributed on an "AS IS" BASIS,
12+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
See the License for the specific language governing permissions and
14+
limitations under the License.
15+
*/
16+
17+
package e2e
18+
19+
import (
20+
"context"
21+
"fmt"
22+
"os"
23+
"path/filepath"
24+
"strings"
25+
"time"
26+
27+
. "github.com/onsi/ginkgo"
28+
. "github.com/onsi/gomega"
29+
corev1 "k8s.io/api/core/v1"
30+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
31+
"k8s.io/utils/pointer"
32+
33+
runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1"
34+
"sigs.k8s.io/cluster-api/test/framework"
35+
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
36+
"sigs.k8s.io/cluster-api/util"
37+
)
38+
39+
// clusterUpgradeWithRuntimeSDKSpecInput is the input for clusterUpgradeWithRuntimeSDKSpec.
40+
type clusterUpgradeWithRuntimeSDKSpecInput struct {
41+
E2EConfig *clusterctl.E2EConfig
42+
ClusterctlConfigPath string
43+
BootstrapClusterProxy framework.ClusterProxy
44+
ArtifactFolder string
45+
SkipCleanup bool
46+
47+
// ControlPlaneMachineCount is used in `config cluster` to configure the count of the control plane machines used in the test.
48+
// Default is 1.
49+
ControlPlaneMachineCount *int64
50+
51+
// WorkerMachineCount is used in `config cluster` to configure the count of the worker machines used in the test.
52+
// NOTE: If the WORKER_MACHINE_COUNT var is used multiple times in the cluster template, the absolute count of
53+
// worker machines is a multiple of WorkerMachineCount.
54+
// Default is 2.
55+
WorkerMachineCount *int64
56+
57+
// Flavor to use when creating the cluster for testing, "upgrades" is used if not specified.
58+
Flavor *string
59+
}
60+
61+
// clusterUpgradeWithRuntimeSDKSpec implements a spec that upgrades a cluster and runs the Kubernetes conformance suite.
62+
// Upgrading a cluster refers to upgrading the control-plane and worker nodes (managed by MD and machine pools).
63+
// NOTE: This test only works with a KubeadmControlPlane.
64+
// NOTE: This test works with Clusters with and without ClusterClass.
65+
// When using ClusterClass the ClusterClass must have the variables "etcdImageTag" and "coreDNSImageTag" of type string.
66+
// Those variables should have corresponding patches which set the etcd and CoreDNS tags in KCP.
67+
func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() clusterUpgradeWithRuntimeSDKSpecInput) {
68+
const (
69+
textExtensionPathVariable = "TEST_EXTENSION"
70+
specName = "k8s-upgrade-with-runtimesdk"
71+
)
72+
73+
var (
74+
input clusterUpgradeWithRuntimeSDKSpecInput
75+
namespace *corev1.Namespace
76+
ext *runtimev1.ExtensionConfig
77+
cancelWatches context.CancelFunc
78+
79+
controlPlaneMachineCount int64
80+
workerMachineCount int64
81+
82+
clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult
83+
testExtensionPath string
84+
)
85+
86+
BeforeEach(func() {
87+
Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName)
88+
input = inputGetter()
89+
Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName)
90+
Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName)
91+
Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName)
92+
Expect(os.MkdirAll(input.ArtifactFolder, 0750)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName)
93+
94+
Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersionUpgradeFrom))
95+
Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersionUpgradeTo))
96+
Expect(input.E2EConfig.Variables).To(HaveKey(EtcdVersionUpgradeTo))
97+
Expect(input.E2EConfig.Variables).To(HaveKey(CoreDNSVersionUpgradeTo))
98+
99+
testExtensionPath = input.E2EConfig.GetVariable(textExtensionPathVariable)
100+
Expect(testExtensionPath).To(BeAnExistingFile(), "The %s variable should resolve to an existing file", textExtensionPathVariable)
101+
102+
if input.ControlPlaneMachineCount == nil {
103+
controlPlaneMachineCount = 1
104+
} else {
105+
controlPlaneMachineCount = *input.ControlPlaneMachineCount
106+
}
107+
108+
if input.WorkerMachineCount == nil {
109+
workerMachineCount = 2
110+
} else {
111+
workerMachineCount = *input.WorkerMachineCount
112+
}
113+
114+
// Setup a Namespace where to host objects for this spec and create a watcher for the Namespace events.
115+
namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder)
116+
clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult)
117+
})
118+
119+
It("Should create and upgrade a workload cluster", func() {
120+
By("Deploy Test Extension")
121+
testExtensionDeploymentTemplate, err := os.ReadFile(testExtensionPath) //nolint:gosec
122+
Expect(err).ToNot(HaveOccurred(), "Failed to read the extension config deployment manifest file")
123+
124+
// Set the SERVICE_NAMESPACE, which is used in the cert-manager Certificate CR.
125+
// We have to dynamically set the namespace here, because it depends on the test run and thus
126+
// cannot be set when rendering the test extension YAML with kustomize.
127+
testExtensionDeployment := strings.ReplaceAll(string(testExtensionDeploymentTemplate), "${SERVICE_NAMESPACE}", namespace.Name)
128+
Expect(testExtensionDeployment).ToNot(BeEmpty(), "Test Extension deployment manifest file should not be empty")
129+
130+
Expect(input.BootstrapClusterProxy.Apply(ctx, []byte(testExtensionDeployment), "--namespace", namespace.Name)).To(Succeed())
131+
132+
By("Deploy Test Extension ExtensionConfig")
133+
ext = extensionConfig(specName, namespace)
134+
err = input.BootstrapClusterProxy.GetClient().Create(ctx, ext)
135+
Expect(err).ToNot(HaveOccurred(), "Failed to create the extension config")
136+
137+
By("Creating a workload cluster")
138+
139+
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
140+
ClusterProxy: input.BootstrapClusterProxy,
141+
ConfigCluster: clusterctl.ConfigClusterInput{
142+
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()),
143+
ClusterctlConfigPath: input.ClusterctlConfigPath,
144+
KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(),
145+
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
146+
Flavor: pointer.StringDeref(input.Flavor, "upgrades"),
147+
Namespace: namespace.Name,
148+
ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)),
149+
KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeFrom),
150+
ControlPlaneMachineCount: pointer.Int64Ptr(controlPlaneMachineCount),
151+
WorkerMachineCount: pointer.Int64Ptr(workerMachineCount),
152+
},
153+
WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"),
154+
WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"),
155+
WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
156+
WaitForMachinePools: input.E2EConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
157+
}, clusterResources)
158+
159+
// Upgrade the Cluster topology to run through an entire cluster lifecycle to test the lifecycle hooks.
160+
By("Upgrading the Cluster topology")
161+
framework.UpgradeClusterTopologyAndWaitForUpgrade(ctx, framework.UpgradeClusterTopologyAndWaitForUpgradeInput{
162+
ClusterProxy: input.BootstrapClusterProxy,
163+
Cluster: clusterResources.Cluster,
164+
ControlPlane: clusterResources.ControlPlane,
165+
EtcdImageTag: input.E2EConfig.GetVariable(EtcdVersionUpgradeTo),
166+
DNSImageTag: input.E2EConfig.GetVariable(CoreDNSVersionUpgradeTo),
167+
MachineDeployments: clusterResources.MachineDeployments,
168+
KubernetesUpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo),
169+
WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
170+
WaitForKubeProxyUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
171+
WaitForDNSUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
172+
WaitForEtcdUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
173+
})
174+
175+
// Only attempt to upgrade MachinePools if they were provided in the template.
176+
if len(clusterResources.MachinePools) > 0 && workerMachineCount > 0 {
177+
By("Upgrading the machinepool instances")
178+
framework.UpgradeMachinePoolAndWait(ctx, framework.UpgradeMachinePoolAndWaitInput{
179+
ClusterProxy: input.BootstrapClusterProxy,
180+
Cluster: clusterResources.Cluster,
181+
UpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo),
182+
WaitForMachinePoolToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-pool-upgrade"),
183+
MachinePools: clusterResources.MachinePools,
184+
})
185+
}
186+
187+
By("Waiting until nodes are ready")
188+
workloadProxy := input.BootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, clusterResources.Cluster.Name)
189+
workloadClient := workloadProxy.GetClient()
190+
framework.WaitForNodesReady(ctx, framework.WaitForNodesReadyInput{
191+
Lister: workloadClient,
192+
KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo),
193+
Count: int(clusterResources.ExpectedTotalNodes()),
194+
WaitForNodesReady: input.E2EConfig.GetIntervals(specName, "wait-nodes-ready"),
195+
})
196+
197+
By("PASSED!")
198+
})
199+
200+
AfterEach(func() {
201+
// Dumps all the resources in the spec Namespace, then cleanups the cluster object and the spec Namespace itself.
202+
dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
203+
204+
Eventually(func() error {
205+
return input.BootstrapClusterProxy.GetClient().Delete(ctx, ext)
206+
}, 10*time.Second, 1*time.Second).Should(Succeed())
207+
})
208+
}
209+
210+
// extensionConfig generates an ExtensionConfig.
211+
// We make sure this cluster-wide object does not conflict with others by using a random generated
212+
// name and a NamespaceSelector selecting on the namespace of the current test.
213+
// Thus, this object is "namespaced" to the current test even though it's a cluster-wide object.
214+
func extensionConfig(specName string, namespace *corev1.Namespace) *runtimev1.ExtensionConfig {
215+
return &runtimev1.ExtensionConfig{
216+
ObjectMeta: metav1.ObjectMeta{
217+
Name: fmt.Sprintf("%s-%s", specName, util.RandomString(6)),
218+
Annotations: map[string]string{
219+
"cert-manager.io/inject-ca-from-secret": fmt.Sprintf("%s/webhook-service-cert", namespace.Name),
220+
},
221+
},
222+
Spec: runtimev1.ExtensionConfigSpec{
223+
ClientConfig: runtimev1.ClientConfig{
224+
Service: &runtimev1.ServiceReference{
225+
Name: "webhook-service",
226+
Namespace: namespace.Name,
227+
},
228+
},
229+
NamespaceSelector: &metav1.LabelSelector{
230+
MatchLabels: map[string]string{
231+
"kubernetes.io/metadata.name:": namespace.Name,
232+
},
233+
},
234+
},
235+
}
236+
}
Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
//go:build e2e
2+
// +build e2e
3+
4+
/*
5+
Copyright 2021 The Kubernetes Authors.
6+
7+
Licensed under the Apache License, Version 2.0 (the "License");
8+
you may not use this file except in compliance with the License.
9+
You may obtain a copy of the License at
10+
11+
http://www.apache.org/licenses/LICENSE-2.0
12+
13+
Unless required by applicable law or agreed to in writing, software
14+
distributed under the License is distributed on an "AS IS" BASIS,
15+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16+
See the License for the specific language governing permissions and
17+
limitations under the License.
18+
*/
19+
20+
package e2e
21+
22+
import (
23+
"github.com/blang/semver"
24+
. "github.com/onsi/ginkgo"
25+
. "github.com/onsi/gomega"
26+
"k8s.io/utils/pointer"
27+
)
28+
29+
var _ = Describe("When upgrading a workload cluster using ClusterClass with RuntimeSDK [PR-Informing] [ClusterClass]", func() {
30+
clusterUpgradeWithRuntimeSDKSpec(ctx, func() clusterUpgradeWithRuntimeSDKSpecInput {
31+
// "upgrades" is the same as the "topology" flavor but with an additional MachinePool.
32+
flavor := pointer.String("upgrades-runtimesdk")
33+
// For KubernetesVersionUpgradeFrom < v1.24 we have to use upgrades-cgroupfs flavor.
34+
// This is because kind and CAPD only support:
35+
// * cgroupDriver cgroupfs for Kubernetes < v1.24
36+
// * cgroupDriver systemd for Kubernetes >= v1.24.
37+
// Notes:
38+
// * We always use a ClusterClass-based cluster-template for the upgrade test
39+
// * The ClusterClass will automatically adjust the cgroupDriver for KCP and MDs.
40+
// * We have to handle the MachinePool ourselves
41+
// * The upgrades-cgroupfs flavor uses an MP which is pinned to cgroupfs
42+
// * During the upgrade UpgradeMachinePoolAndWait automatically drops the cgroupfs pinning
43+
// when the target version is >= v1.24.
44+
// TODO: We can remove this after the v1.25 release as we then only test the v1.24=>v1.25 upgrade.
45+
version, err := semver.ParseTolerant(e2eConfig.GetVariable(KubernetesVersionUpgradeFrom))
46+
Expect(err).ToNot(HaveOccurred(), "Invalid argument, KUBERNETES_VERSION_UPGRADE_FROM is not a valid version")
47+
if version.LT(semver.MustParse("1.24.0")) {
48+
// "upgrades-cgroupfs" is the same as the "topology" flavor but with an additional MachinePool
49+
// with pinned cgroupDriver to cgroupfs.
50+
flavor = pointer.String("upgrades-runtimesdk-cgroupfs")
51+
}
52+
53+
return clusterUpgradeWithRuntimeSDKSpecInput{
54+
E2EConfig: e2eConfig,
55+
ClusterctlConfigPath: clusterctlConfigPath,
56+
BootstrapClusterProxy: bootstrapClusterProxy,
57+
ArtifactFolder: artifactFolder,
58+
SkipCleanup: skipCleanup,
59+
Flavor: flavor,
60+
}
61+
})
62+
})

0 commit comments

Comments
 (0)