Skip to content

Commit d94a842

Browse files
committed
Add e2e test to reproduce SSA apiVersion issue - add v1alpha3 test
1 parent abb86cd commit d94a842

File tree

13 files changed

+390
-37
lines changed

13 files changed

+390
-37
lines changed

Makefile

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -562,11 +562,15 @@ generate-doctoc:
562562
TRACE=$(TRACE) ./hack/generate-doctoc.sh
563563

564564
.PHONY: generate-e2e-templates
565-
generate-e2e-templates: $(KUSTOMIZE) $(addprefix generate-e2e-templates-, v0.4 v1.0 v1.5 v1.6 main) ## Generate cluster templates for all versions
565+
generate-e2e-templates: $(KUSTOMIZE) $(addprefix generate-e2e-templates-, v0.3 v0.4 v1.0 v1.5 v1.6 main) ## Generate cluster templates for all versions
566566

567567
DOCKER_TEMPLATES := test/e2e/data/infrastructure-docker
568568
INMEMORY_TEMPLATES := test/e2e/data/infrastructure-inmemory
569569

570+
.PHONY: generate-e2e-templates-v0.3
571+
generate-e2e-templates-v0.3: $(KUSTOMIZE)
572+
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v0.3/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v0.3/cluster-template.yaml
573+
570574
.PHONY: generate-e2e-templates-v0.4
571575
generate-e2e-templates-v0.4: $(KUSTOMIZE)
572576
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v0.4/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v0.4/cluster-template.yaml

test/e2e/clusterctl_upgrade.go

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -481,6 +481,9 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
481481
Expect(err).ToNot(HaveOccurred())
482482

483483
clusterctlUpgradeBinaryPath := ""
484+
// TODO: While it is generally fine to use this clusterctl config for upgrades as well,
485+
// it is not ideal because it points to the latest repositories (e.g. _artifacts/repository/cluster-api/latest/components.yaml)
486+
// For example this means if we upgrade to v1.5 the upgrade won't use the metadata.yaml from v1.5 it will use the one from latest.
484487
clusterctlUpgradeConfigPath := input.ClusterctlConfigPath
485488
if upgrade.WithBinary != "" {
486489
// Download the clusterctl version to be used to upgrade the management cluster
@@ -793,8 +796,12 @@ func calculateExpectedWorkerCount(ctx context.Context, c client.Client, unstruct
793796
}
794797

795798
machinePoolList := &unstructured.UnstructuredList{}
799+
machinePoolGroup := clusterv1.GroupVersion.Group
800+
if coreCAPIStorageVersion == "v1alpha3" {
801+
machinePoolGroup = "exp.cluster.x-k8s.io"
802+
}
796803
machinePoolList.SetGroupVersionKind(schema.GroupVersionKind{
797-
Group: clusterv1.GroupVersion.Group,
804+
Group: machinePoolGroup,
798805
Version: coreCAPIStorageVersion,
799806
Kind: "MachinePoolList",
800807
})

test/e2e/clusterctl_upgrade_test.go

Lines changed: 73 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ package e2e
2121

2222
import (
2323
"fmt"
24+
"runtime"
2425

2526
. "github.com/onsi/ginkgo/v2"
2627
. "github.com/onsi/gomega"
@@ -36,6 +37,73 @@ var (
3637
providerDockerPrefix = "docker:v%s"
3738
)
3839

40+
var _ = Describe("When testing clusterctl upgrades (v0.3=>v1.6=>current)", func() {
41+
// Get v0.3 latest stable release
42+
version03 := "0.3"
43+
stableRelease03, err := GetStableReleaseOfMinor(ctx, version03)
44+
Expect(err).ToNot(HaveOccurred(), "Failed to get stable version for minor release : %s", version03)
45+
clusterctlDownloadURL03 := clusterctlDownloadURL
46+
if runtime.GOOS == "darwin" {
47+
// There is no arm64 binary for v0.3.x, so we'll use the amd64 one.
48+
clusterctlDownloadURL03 = "https://github.com/kubernetes-sigs/cluster-api/releases/download/v%s/clusterctl-darwin-amd64"
49+
}
50+
51+
// Get v1.5 latest stable release
52+
version15 := "1.5"
53+
stableRelease15, err := GetStableReleaseOfMinor(ctx, version15)
54+
Expect(err).ToNot(HaveOccurred(), "Failed to get stable version for minor release : %s", version15)
55+
56+
ClusterctlUpgradeSpec(ctx, func() ClusterctlUpgradeSpecInput {
57+
return ClusterctlUpgradeSpecInput{
58+
E2EConfig: e2eConfig,
59+
ClusterctlConfigPath: clusterctlConfigPath,
60+
BootstrapClusterProxy: bootstrapClusterProxy,
61+
ArtifactFolder: artifactFolder,
62+
SkipCleanup: skipCleanup,
63+
InfrastructureProvider: ptr.To("docker"),
64+
// Configuration for the initial provider deployment.
65+
InitWithBinary: fmt.Sprintf(clusterctlDownloadURL03, stableRelease03),
66+
// We have to pin the providers because with `InitWithProvidersContract` the test would
67+
// use the latest version for the contract.
68+
InitWithCoreProvider: fmt.Sprintf(providerCAPIPrefix, stableRelease03),
69+
InitWithBootstrapProviders: []string{fmt.Sprintf(providerKubeadmPrefix, stableRelease03)},
70+
InitWithControlPlaneProviders: []string{fmt.Sprintf(providerKubeadmPrefix, stableRelease03)},
71+
InitWithInfrastructureProviders: []string{fmt.Sprintf(providerDockerPrefix, stableRelease03)},
72+
// We have to set this to an empty array as clusterctl v0.3 doesn't support
73+
// runtime extension providers. If we don't do this the test will automatically
74+
// try to deploy the latest version of our test-extension from docker.yaml.
75+
InitWithRuntimeExtensionProviders: []string{},
76+
// Configuration for the provider upgrades.
77+
Upgrades: []ClusterctlUpgradeSpecInputUpgrade{
78+
{
79+
// Upgrade to v1.5.
80+
// Note: v1.5 is the highest version we can use as it's the last one
81+
// that is able to upgrade from a v1alpha3 management cluster.
82+
WithBinary: fmt.Sprintf(clusterctlDownloadURL, stableRelease15),
83+
CoreProvider: fmt.Sprintf(providerCAPIPrefix, stableRelease15),
84+
BootstrapProviders: []string{fmt.Sprintf(providerKubeadmPrefix, stableRelease15)},
85+
ControlPlaneProviders: []string{fmt.Sprintf(providerKubeadmPrefix, stableRelease15)},
86+
InfrastructureProviders: []string{fmt.Sprintf(providerDockerPrefix, stableRelease15)},
87+
},
88+
{ // Upgrade to latest v1beta1.
89+
Contract: clusterv1.GroupVersion.Version,
90+
},
91+
},
92+
// CAPI v0.3.x does not work on Kubernetes >= v1.22.
93+
// NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/data/infrastructure-docker/v0.3/bases.
94+
// Note: Both InitWithKubernetesVersion and WorkloadKubernetesVersion should be the highest mgmt cluster version supported by the source Cluster API version.
95+
InitWithKubernetesVersion: "v1.21.14",
96+
WorkloadKubernetesVersion: "v1.22.17",
97+
// CAPI does not work with Kubernetes < v1.22 if ClusterClass is enabled, so we have to disable it.
98+
UpgradeClusterctlVariables: map[string]string{
99+
"CLUSTER_TOPOLOGY": "false",
100+
},
101+
MgmtFlavor: "topology",
102+
WorkloadFlavor: "",
103+
}
104+
})
105+
})
106+
39107
var _ = Describe("When testing clusterctl upgrades (v0.4=>v1.6=>current)", func() {
40108
// Get v0.4 latest stable release
41109
version04 := "0.4"
@@ -58,7 +126,7 @@ var _ = Describe("When testing clusterctl upgrades (v0.4=>v1.6=>current)", func(
58126
// Configuration for the initial provider deployment.
59127
InitWithBinary: fmt.Sprintf(clusterctlDownloadURL, stableRelease04),
60128
// We have to pin the providers because with `InitWithProvidersContract` the test would
61-
// use the latest version for the contract (which is v1.3.X for v1beta1).
129+
// use the latest version for the contract.
62130
InitWithCoreProvider: fmt.Sprintf(providerCAPIPrefix, stableRelease04),
63131
InitWithBootstrapProviders: []string{fmt.Sprintf(providerKubeadmPrefix, stableRelease04)},
64132
InitWithControlPlaneProviders: []string{fmt.Sprintf(providerKubeadmPrefix, stableRelease04)},
@@ -69,7 +137,10 @@ var _ = Describe("When testing clusterctl upgrades (v0.4=>v1.6=>current)", func(
69137
InitWithRuntimeExtensionProviders: []string{},
70138
// Configuration for the provider upgrades.
71139
Upgrades: []ClusterctlUpgradeSpecInputUpgrade{
72-
{ // Upgrade to 1.6.
140+
{
141+
// Upgrade to v1.6.
142+
// Note: v1.6 is the highest version we can use as it's the last one
143+
// that is able to upgrade from a v1alpha4 management cluster.
73144
WithBinary: fmt.Sprintf(clusterctlDownloadURL, stableRelease16),
74145
CoreProvider: fmt.Sprintf(providerCAPIPrefix, stableRelease16),
75146
BootstrapProviders: []string{fmt.Sprintf(providerKubeadmPrefix, stableRelease16)},

test/e2e/config/docker.yaml

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,15 @@ providers:
3535
- name: cluster-api
3636
type: CoreProvider
3737
versions:
38+
- name: "{go://sigs.k8s.io/[email protected]}" # latest published release in the v1alpha3 series; this is used for v1alpha3 --> v1beta1 clusterctl upgrades test only.
39+
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/[email protected]}/core-components.yaml"
40+
type: "url"
41+
contract: v1alpha3
42+
replacements:
43+
- old: --metrics-addr=127.0.0.1:8080
44+
new: --metrics-addr=:8080
45+
files:
46+
- sourcePath: "../data/shared/v0.3/metadata.yaml"
3847
- name: "{go://sigs.k8s.io/[email protected]}" # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only.
3948
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/[email protected]}/core-components.yaml"
4049
type: "url"
@@ -82,6 +91,15 @@ providers:
8291
- name: kubeadm
8392
type: BootstrapProvider
8493
versions:
94+
- name: "{go://sigs.k8s.io/[email protected]}" # latest published release in the v1alpha3 series; this is used for v1alpha3 --> v1beta1 clusterctl upgrades test only.
95+
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/[email protected]}/bootstrap-components.yaml"
96+
type: "url"
97+
contract: v1alpha3
98+
replacements:
99+
- old: --metrics-addr=127.0.0.1:8080
100+
new: --metrics-addr=:8080
101+
files:
102+
- sourcePath: "../data/shared/v0.3/metadata.yaml"
85103
- name: "{go://sigs.k8s.io/[email protected]}" # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only.
86104
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/[email protected]}/bootstrap-components.yaml"
87105
type: "url"
@@ -129,6 +147,15 @@ providers:
129147
- name: kubeadm
130148
type: ControlPlaneProvider
131149
versions:
150+
- name: "{go://sigs.k8s.io/[email protected]}" # latest published release in the v1alpha3 series; this is used for v1alpha3 --> v1beta1 clusterctl upgrades test only.
151+
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/[email protected]}/control-plane-components.yaml"
152+
type: "url"
153+
contract: v1alpha3
154+
replacements:
155+
- old: --metrics-addr=127.0.0.1:8080
156+
new: --metrics-addr=:8080
157+
files:
158+
- sourcePath: "../data/shared/v0.3/metadata.yaml"
132159
- name: "{go://sigs.k8s.io/[email protected]}" # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only.
133160
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/[email protected]}/control-plane-components.yaml"
134161
type: "url"
@@ -176,6 +203,16 @@ providers:
176203
- name: docker
177204
type: InfrastructureProvider
178205
versions:
206+
- name: "{go://sigs.k8s.io/[email protected]}" # latest published release in the v1alpha3 series; this is used for v1alpha3 --> v1beta1 clusterctl upgrades test only.
207+
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/[email protected]}/infrastructure-components-development.yaml"
208+
type: "url"
209+
contract: v1alpha3
210+
replacements:
211+
- old: --metrics-addr=127.0.0.1:8080
212+
new: --metrics-addr=:8080
213+
files:
214+
- sourcePath: "../data/shared/v0.3/metadata.yaml"
215+
- sourcePath: "../data/infrastructure-docker/v0.3/cluster-template.yaml"
179216
- name: "{go://sigs.k8s.io/[email protected]}" # latest published release in the v1alpha4 series; this is used for v1alpha4 --> v1beta1 clusterctl upgrades test only.
180217
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/[email protected]}/infrastructure-components-development.yaml"
181218
type: "url"
Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
---
2+
# DockerCluster object referenced by the Cluster object
3+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
4+
kind: DockerCluster
5+
metadata:
6+
name: '${CLUSTER_NAME}'
7+
---
8+
# Cluster object with
9+
# - Reference to the KubeadmControlPlane object
10+
# - the label cni=${CLUSTER_NAME}-crs-0, so the cluster can be selected by the ClusterResourceSet.
11+
apiVersion: cluster.x-k8s.io/v1alpha3
12+
kind: Cluster
13+
metadata:
14+
name: '${CLUSTER_NAME}'
15+
labels:
16+
cni: "${CLUSTER_NAME}-crs-0"
17+
spec:
18+
clusterNetwork:
19+
services:
20+
cidrBlocks: ['${DOCKER_SERVICE_CIDRS}']
21+
pods:
22+
cidrBlocks: ['${DOCKER_POD_CIDRS}']
23+
serviceDomain: '${DOCKER_SERVICE_DOMAIN}'
24+
infrastructureRef:
25+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
26+
kind: DockerCluster
27+
name: '${CLUSTER_NAME}'
28+
controlPlaneRef:
29+
kind: KubeadmControlPlane
30+
apiVersion: controlplane.cluster.x-k8s.io/v1alpha3
31+
name: "${CLUSTER_NAME}-control-plane"
32+
---
33+
# DockerMachineTemplate object referenced by the KubeadmControlPlane object
34+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
35+
kind: DockerMachineTemplate
36+
metadata:
37+
name: "${CLUSTER_NAME}-control-plane"
38+
spec:
39+
template:
40+
spec:
41+
# NOTE: If the Kubernetes version is changed in `clusterctl_upgrade_test.go` the image and SHA must be updated here.
42+
customImage: "kindest/node:v1.22.17@sha256:9af784f45a584f6b28bce2af84c494d947a05bd709151466489008f80a9ce9d5"
43+
extraMounts:
44+
- containerPath: "/var/run/docker.sock"
45+
hostPath: "/var/run/docker.sock"
46+
---
47+
# KubeadmControlPlane referenced by the Cluster object with
48+
# - the label kcp-adoption.step2, because it should be created in the second step of the kcp-adoption test.
49+
kind: KubeadmControlPlane
50+
apiVersion: controlplane.cluster.x-k8s.io/v1alpha3
51+
metadata:
52+
name: "${CLUSTER_NAME}-control-plane"
53+
labels:
54+
kcp-adoption.step2: ""
55+
spec:
56+
replicas: ${CONTROL_PLANE_MACHINE_COUNT}
57+
infrastructureTemplate:
58+
kind: DockerMachineTemplate
59+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
60+
name: "${CLUSTER_NAME}-control-plane"
61+
kubeadmConfigSpec:
62+
clusterConfiguration:
63+
controllerManager:
64+
extraArgs: {enable-hostpath-provisioner: 'true'}
65+
apiServer:
66+
# host.docker.internal is required by kubetest when running on MacOS because of the way ports are proxied.
67+
certSANs: [localhost, 127.0.0.1, 0.0.0.0, host.docker.internal]
68+
initConfiguration:
69+
nodeRegistration:
70+
criSocket: unix:///var/run/containerd/containerd.sock
71+
kubeletExtraArgs:
72+
# We have to pin the cgroupDriver to cgroupfs for Kubernetes < v1.24 because kind does not support systemd for those versions, but kubeadm >= 1.21 defaults to systemd.
73+
# This cluster is used in tests where the Kubernetes version is < 1.24
74+
cgroup-driver: cgroupfs
75+
eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'
76+
fail-swap-on: "false"
77+
joinConfiguration:
78+
nodeRegistration:
79+
criSocket: unix:///var/run/containerd/containerd.sock
80+
kubeletExtraArgs:
81+
# We have to pin the cgroupDriver to cgroupfs for Kubernetes < v1.24 because kind does not support systemd for those versions, but kubeadm >= 1.21 defaults to systemd.
82+
# This cluster is used in tests where the Kubernetes version is < 1.24
83+
cgroup-driver: cgroupfs
84+
eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'
85+
fail-swap-on: "false"
86+
version: "${KUBERNETES_VERSION}"
Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
---
2+
# ConfigMap object referenced by the ClusterResourceSet object and with
3+
# the CNI resource defined in the test config file
4+
apiVersion: v1
5+
kind: ConfigMap
6+
metadata:
7+
name: "cni-${CLUSTER_NAME}-crs-0"
8+
data: ${CNI_RESOURCES}
9+
binaryData:
10+
---
11+
# ClusterResourceSet object with
12+
# a selector that targets all the Cluster with label cni=${CLUSTER_NAME}-crs-0
13+
apiVersion: addons.cluster.x-k8s.io/v1alpha3
14+
kind: ClusterResourceSet
15+
metadata:
16+
name: "${CLUSTER_NAME}-crs-0"
17+
spec:
18+
strategy: ApplyOnce
19+
clusterSelector:
20+
matchLabels:
21+
cni: "${CLUSTER_NAME}-crs-0"
22+
resources:
23+
- name: "cni-${CLUSTER_NAME}-crs-0"
24+
kind: ConfigMap
Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
---
2+
# DockerMachineTemplate referenced by the MachineDeployment and with
3+
# - extraMounts for the docker sock, thus allowing self-hosting test
4+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
5+
kind: DockerMachineTemplate
6+
metadata:
7+
name: "${CLUSTER_NAME}-md-0"
8+
spec:
9+
template:
10+
spec:
11+
# NOTE: If the Kubernetes version is changed in `clusterctl_upgrade_test.go` the image and SHA must be updated here.
12+
customImage: "kindest/node:v1.22.17@sha256:9af784f45a584f6b28bce2af84c494d947a05bd709151466489008f80a9ce9d5"
13+
extraMounts:
14+
- containerPath: "/var/run/docker.sock"
15+
hostPath: "/var/run/docker.sock"
16+
---
17+
# KubeadmConfigTemplate referenced by the MachineDeployment
18+
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3
19+
kind: KubeadmConfigTemplate
20+
metadata:
21+
name: "${CLUSTER_NAME}-md-0"
22+
spec:
23+
template:
24+
spec:
25+
joinConfiguration:
26+
nodeRegistration:
27+
criSocket: unix:///var/run/containerd/containerd.sock
28+
kubeletExtraArgs:
29+
eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'
30+
---
31+
# MachineDeployment object with
32+
# - the label nodepool=pool1 that applies to all the machines, so those machine can be targeted by the MachineHealthCheck object
33+
apiVersion: cluster.x-k8s.io/v1alpha3
34+
kind: MachineDeployment
35+
metadata:
36+
name: "${CLUSTER_NAME}-md-0"
37+
spec:
38+
clusterName: "${CLUSTER_NAME}"
39+
replicas: ${WORKER_MACHINE_COUNT}
40+
selector:
41+
matchLabels:
42+
template:
43+
metadata:
44+
labels:
45+
"nodepool": "pool1"
46+
spec:
47+
clusterName: "${CLUSTER_NAME}"
48+
version: "${KUBERNETES_VERSION}"
49+
bootstrap:
50+
configRef:
51+
name: "${CLUSTER_NAME}-md-0"
52+
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3
53+
kind: KubeadmConfigTemplate
54+
infrastructureRef:
55+
name: "${CLUSTER_NAME}-md-0"
56+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
57+
kind: DockerMachineTemplate
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
bases:
2+
- ../bases/cluster-with-kcp.yaml
3+
- ../bases/md.yaml
4+
- ../bases/crs.yaml

test/e2e/data/shared/main/metadata.yaml

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,4 +27,7 @@ releaseSeries:
2727
contract: v1beta1
2828
- major: 0
2929
minor: 4
30-
contract: v1alpha4
30+
contract: v1alpha4
31+
- major: 0
32+
minor: 3
33+
contract: v1alpha3

0 commit comments

Comments
 (0)