diff --git a/Makefile b/Makefile index aee65b29c96..fd69dc742ad 100644 --- a/Makefile +++ b/Makefile @@ -49,7 +49,7 @@ GO_INSTALL = ./scripts/go_install.sh E2E_DATA_DIR ?= $(ROOT_DIR)/test/e2e/data KUBETEST_CONF_PATH ?= $(abspath $(E2E_DATA_DIR)/kubetest/conformance.yaml) KUBETEST_WINDOWS_CONF_PATH ?= $(abspath $(E2E_DATA_DIR)/kubetest/upstream-windows.yaml) -KUBETEST_REPO_LIST_PATH ?= $(abspath $(E2E_DATA_DIR)/kubetest/repo-list.yaml) +KUBETEST_REPO_LIST_PATH ?= $(abspath $(E2E_DATA_DIR)/kubetest/) AZURE_TEMPLATES := $(E2E_DATA_DIR)/infrastructure-azure # set --output-base used for conversion-gen which needs to be different for in GOPATH and outside GOPATH dev @@ -130,7 +130,7 @@ E2E_CONF_FILE ?= $(ROOT_DIR)/test/e2e/config/azure-dev.yaml E2E_CONF_FILE_ENVSUBST := $(ROOT_DIR)/test/e2e/config/azure-dev-envsubst.yaml SKIP_CLEANUP ?= false SKIP_CREATE_MGMT_CLUSTER ?= false -WIN_REPO_LIST ?= https://raw.githubusercontent.com/kubernetes-sigs/windows-testing/master/images/image-repo-list +WIN_REPO_URL ?= # Build time versioning details. LDFLAGS := $(shell hack/version.sh) @@ -202,8 +202,10 @@ test-conformance-fast: ## Run conformance test on workload cluster using a subse .PHONY: test-windows-upstream test-windows-upstream: ## Run windows upstream tests on workload cluster. - curl --retry $(CURL_RETRIES) $(WIN_REPO_LIST) -o $(KUBETEST_REPO_LIST_PATH) - $(MAKE) test-conformance CONFORMANCE_E2E_ARGS="-kubetest.config-file=$(KUBETEST_WINDOWS_CONF_PATH) -kubetest.repo-list-file=$(KUBETEST_REPO_LIST_PATH) $(E2E_ARGS)" +ifneq ($(WIN_REPO_URL), ) + curl --retry $(CURL_RETRIES) $(WIN_REPO_URL) -o $(KUBETEST_REPO_LIST_PATH)/custom-repo-list.yaml +endif + $(MAKE) test-conformance CONFORMANCE_E2E_ARGS="-kubetest.config-file=$(KUBETEST_WINDOWS_CONF_PATH) -kubetest.repo-list-path=$(KUBETEST_REPO_LIST_PATH) $(E2E_ARGS)" $(KUBE_APISERVER) $(ETCD): ## install test asset kubectl, kube-apiserver, etcd source ./scripts/fetch_ext_bins.sh && fetch_tools diff --git a/scripts/ci-build-kubernetes.sh b/scripts/ci-build-kubernetes.sh index 1df8256a41c..9a8c9a0e557 100755 --- a/scripts/ci-build-kubernetes.sh +++ b/scripts/ci-build-kubernetes.sh @@ -38,6 +38,7 @@ source "${REPO_ROOT}/hack/parse-prow-creds.sh" : "${JOB_NAME:?Environment variable empty or not defined.}" declare -a BINARIES=("kubeadm" "kubectl" "kubelet") +declare -a WINDOWS_BINARIES=("kubeadm" "kubectl" "kubelet" "kube-proxy") declare -a IMAGES=("kube-apiserver" "kube-controller-manager" "kube-proxy" "kube-scheduler") setup() { @@ -100,7 +101,19 @@ main() { for BINARY in "${BINARIES[@]}"; do az storage blob upload --container-name "${JOB_NAME}" --file "${KUBE_ROOT}/_output/dockerized/bin/linux/amd64/${BINARY}" --name "${KUBE_GIT_VERSION}/bin/linux/amd64/${BINARY}" done - fi + + if [[ "${WINDOWS:-}" == "true" ]]; then + echo "Building Kubernetes Windows binaries" + + for BINARY in "${WINDOWS_BINARIES[@]}"; do + "${KUBE_ROOT}"/build/run.sh make WHAT=cmd/"${BINARY}" KUBE_BUILD_PLATFORMS=windows/amd64 KUBE_VERBOSE=0 + done + + for BINARY in "${WINDOWS_BINARIES[@]}"; do + az storage blob upload --container-name "${JOB_NAME}" --file "${KUBE_ROOT}/_output/dockerized/bin/windows/amd64/${BINARY}.exe" --name "${KUBE_GIT_VERSION}/bin/windows/amd64/${BINARY}.exe" + done + fi + fi } # can_reuse_artifacts returns true if there exists Kubernetes artifacts built from a PR that we can reuse @@ -117,6 +130,14 @@ can_reuse_artifacts() { fi done + if [[ "${WINDOWS:-}" == "true" ]]; then + for BINARY in "${WINDOWS_BINARIES[@]}"; do + if [[ "$(az storage blob exists --container-name "${JOB_NAME}" --name "${KUBE_GIT_VERSION}/bin/windows/amd64/${BINARY}.exe" --query exists)" == "false" ]]; then + echo "false" && return + fi + done + fi + echo "true" } diff --git a/templates/addons/windows/flannel.yaml b/templates/addons/windows/flannel.yaml index 729d3ce9afc..6edb9f0e314 100644 --- a/templates/addons/windows/flannel.yaml +++ b/templates/addons/windows/flannel.yaml @@ -47,7 +47,7 @@ spec: rule: 'RunAsAny' --- kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 metadata: name: flannel rules: @@ -76,7 +76,7 @@ rules: - patch --- kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 metadata: name: flannel roleRef: diff --git a/templates/addons/windows/kube-proxy-windows.yaml b/templates/addons/windows/kube-proxy-windows.yaml index bd087b992d7..0fedffb7e11 100644 --- a/templates/addons/windows/kube-proxy-windows.yaml +++ b/templates/addons/windows/kube-proxy-windows.yaml @@ -5,6 +5,12 @@ data: mkdir -force /host/var/lib/kube-proxy/var/run/secrets/kubernetes.io/serviceaccount mkdir -force /host/k/kube-proxy + $$CI_VERSION="${CI_VERSION:-}" + if($$CI_VERSION -ne "" -And (Test-Path -Path "/host/k/kube-proxy.exe")) + { + cp -force /host/k/kube-proxy.exe /k/kube-proxy/kube-proxy.exe + } + cp -force /k/kube-proxy/* /host/k/kube-proxy cp -force /var/lib/kube-proxy/* /host/var/lib/kube-proxy cp -force /var/run/secrets/kubernetes.io/serviceaccount/* /host/var/lib/kube-proxy/var/run/secrets/kubernetes.io/serviceaccount #FIXME? @@ -57,7 +63,7 @@ spec: valueFrom: fieldRef: fieldPath: status.podIP - image: sigwindowstools/kube-proxy:${KUBERNETES_VERSION}-nanoserver + image: sigwindowstools/kube-proxy:${KUBERNETES_VERSION/+/_}-nanoserver name: kube-proxy volumeMounts: - name: wins diff --git a/templates/cluster-template-aad.yaml b/templates/cluster-template-aad.yaml index e66cd58f9e4..570c782c4e8 100644 --- a/templates/cluster-template-aad.yaml +++ b/templates/cluster-template-aad.yaml @@ -115,6 +115,8 @@ spec: mounts: - - LABEL=etcd_disk - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: [] useExperimentalRetryJoin: true machineTemplate: infrastructureRef: diff --git a/templates/cluster-template-azure-bastion.yaml b/templates/cluster-template-azure-bastion.yaml index e594c049292..ca20882429b 100644 --- a/templates/cluster-template-azure-bastion.yaml +++ b/templates/cluster-template-azure-bastion.yaml @@ -112,6 +112,8 @@ spec: mounts: - - LABEL=etcd_disk - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: [] machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 @@ -199,6 +201,7 @@ spec: cloud-config: /etc/kubernetes/azure.json cloud-provider: azure name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: [] --- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 kind: AzureClusterIdentity diff --git a/templates/cluster-template-ephemeral.yaml b/templates/cluster-template-ephemeral.yaml index 2376c940560..1e272b5b693 100644 --- a/templates/cluster-template-ephemeral.yaml +++ b/templates/cluster-template-ephemeral.yaml @@ -110,6 +110,8 @@ spec: mounts: - - LABEL=etcd_disk - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: [] machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 @@ -203,6 +205,7 @@ spec: cloud-config: /etc/kubernetes/azure.json cloud-provider: azure name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: [] --- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 kind: AzureClusterIdentity diff --git a/templates/cluster-template-external-cloud-provider.yaml b/templates/cluster-template-external-cloud-provider.yaml index 0a53187d1bc..51335d14060 100644 --- a/templates/cluster-template-external-cloud-provider.yaml +++ b/templates/cluster-template-external-cloud-provider.yaml @@ -111,6 +111,8 @@ spec: mounts: - - LABEL=etcd_disk - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: [] machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 @@ -198,6 +200,7 @@ spec: cloud-config: /etc/kubernetes/azure.json cloud-provider: external name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: [] --- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 kind: AzureClusterIdentity diff --git a/templates/cluster-template-ipv6.yaml b/templates/cluster-template-ipv6.yaml index c5b9293116c..39ca16e341f 100644 --- a/templates/cluster-template-ipv6.yaml +++ b/templates/cluster-template-ipv6.yaml @@ -151,6 +151,7 @@ spec: - mv /etc/resolv.conf /etc/resolv.conf.OLD && ln -s /run/systemd/resolve/resolv.conf /etc/resolv.conf - systemctl restart systemd-resolved + preKubeadmCommands: [] machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 diff --git a/templates/cluster-template-machinepool-multiple-subnets.yaml b/templates/cluster-template-machinepool-multiple-subnets.yaml index b8edcf02c8f..41a65a70acd 100644 --- a/templates/cluster-template-machinepool-multiple-subnets.yaml +++ b/templates/cluster-template-machinepool-multiple-subnets.yaml @@ -117,6 +117,8 @@ spec: mounts: - - LABEL=etcd_disk - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: [] machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 diff --git a/templates/cluster-template-machinepool-system-assigned-identity.yaml b/templates/cluster-template-machinepool-system-assigned-identity.yaml index 1409e38909f..713fe630729 100644 --- a/templates/cluster-template-machinepool-system-assigned-identity.yaml +++ b/templates/cluster-template-machinepool-system-assigned-identity.yaml @@ -106,6 +106,8 @@ spec: mounts: - - LABEL=etcd_disk - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: [] machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 diff --git a/templates/cluster-template-machinepool-user-assigned-identity.yaml b/templates/cluster-template-machinepool-user-assigned-identity.yaml index e29e8b904c9..7201571953b 100644 --- a/templates/cluster-template-machinepool-user-assigned-identity.yaml +++ b/templates/cluster-template-machinepool-user-assigned-identity.yaml @@ -106,6 +106,8 @@ spec: mounts: - - LABEL=etcd_disk - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: [] machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 diff --git a/templates/cluster-template-machinepool-windows.yaml b/templates/cluster-template-machinepool-windows.yaml index 0af068ab8f0..876069e4051 100644 --- a/templates/cluster-template-machinepool-windows.yaml +++ b/templates/cluster-template-machinepool-windows.yaml @@ -127,6 +127,7 @@ spec: - mac=$(ip -o link | grep eth0 | grep ether | awk '{ print $17 }') - sed -i -e "s/MACADDRESS/$${mac}/g" /etc/netplan/60-eth0.yaml - netplan apply + preKubeadmCommands: [] machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 diff --git a/templates/cluster-template-machinepool.yaml b/templates/cluster-template-machinepool.yaml index 8332407287e..b2877869ad1 100644 --- a/templates/cluster-template-machinepool.yaml +++ b/templates/cluster-template-machinepool.yaml @@ -110,6 +110,8 @@ spec: mounts: - - LABEL=etcd_disk - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: [] machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 diff --git a/templates/cluster-template-nat-gateway.yaml b/templates/cluster-template-nat-gateway.yaml index c2e45608236..3d576256fd9 100644 --- a/templates/cluster-template-nat-gateway.yaml +++ b/templates/cluster-template-nat-gateway.yaml @@ -123,6 +123,8 @@ spec: mounts: - - LABEL=etcd_disk - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: [] machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 @@ -210,6 +212,7 @@ spec: cloud-config: /etc/kubernetes/azure.json cloud-provider: azure name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: [] --- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 kind: AzureClusterIdentity diff --git a/templates/cluster-template-nvidia-gpu.yaml b/templates/cluster-template-nvidia-gpu.yaml index 66ce4cfa804..84fd0dfa9cb 100644 --- a/templates/cluster-template-nvidia-gpu.yaml +++ b/templates/cluster-template-nvidia-gpu.yaml @@ -111,6 +111,8 @@ spec: mounts: - - LABEL=etcd_disk - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: [] machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 diff --git a/templates/cluster-template-private.yaml b/templates/cluster-template-private.yaml index 9805a32ebde..ccb8fe73481 100644 --- a/templates/cluster-template-private.yaml +++ b/templates/cluster-template-private.yaml @@ -213,6 +213,7 @@ spec: cloud-config: /etc/kubernetes/azure.json cloud-provider: azure name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: [] --- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 kind: AzureClusterIdentity diff --git a/templates/cluster-template-system-assigned-identity.yaml b/templates/cluster-template-system-assigned-identity.yaml index 59438bfd045..5b31f689292 100644 --- a/templates/cluster-template-system-assigned-identity.yaml +++ b/templates/cluster-template-system-assigned-identity.yaml @@ -106,6 +106,8 @@ spec: mounts: - - LABEL=etcd_disk - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: [] machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 @@ -195,3 +197,4 @@ spec: cloud-config: /etc/kubernetes/azure.json cloud-provider: azure name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: [] diff --git a/templates/cluster-template-user-assigned-identity.yaml b/templates/cluster-template-user-assigned-identity.yaml index 31c399e6889..a1e8bfcf967 100644 --- a/templates/cluster-template-user-assigned-identity.yaml +++ b/templates/cluster-template-user-assigned-identity.yaml @@ -106,6 +106,8 @@ spec: mounts: - - LABEL=etcd_disk - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: [] machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 @@ -199,3 +201,4 @@ spec: cloud-config: /etc/kubernetes/azure.json cloud-provider: azure name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: [] diff --git a/templates/cluster-template-windows.yaml b/templates/cluster-template-windows.yaml index 94227e3a3db..45fa9e813db 100644 --- a/templates/cluster-template-windows.yaml +++ b/templates/cluster-template-windows.yaml @@ -127,6 +127,7 @@ spec: - mac=$(ip -o link | grep eth0 | grep ether | awk '{ print $17 }') - sed -i -e "s/MACADDRESS/$${mac}/g" /etc/netplan/60-eth0.yaml - netplan apply + preKubeadmCommands: [] machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 @@ -160,7 +161,7 @@ metadata: namespace: default spec: clusterName: ${CLUSTER_NAME} - replicas: ${WORKER_MACHINE_COUNT} + replicas: ${LINUX_WORKER_MACHINE_COUNT:-1} selector: matchLabels: null template: @@ -232,6 +233,7 @@ spec: - mac=$(ip -o link | grep eth0 | grep ether | awk '{ print $17 }') - sed -i -e "s/MACADDRESS/$${mac}/g" /etc/netplan/60-eth0.yaml - netplan apply + preKubeadmCommands: [] useExperimentalRetryJoin: true --- apiVersion: cluster.x-k8s.io/v1alpha4 diff --git a/templates/cluster-template.yaml b/templates/cluster-template.yaml index cccd48db083..3fe7e4e5989 100644 --- a/templates/cluster-template.yaml +++ b/templates/cluster-template.yaml @@ -110,6 +110,8 @@ spec: mounts: - - LABEL=etcd_disk - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: [] machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 @@ -197,6 +199,7 @@ spec: cloud-config: /etc/kubernetes/azure.json cloud-provider: azure name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: [] --- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 kind: AzureClusterIdentity diff --git a/templates/flavors/base/cluster-template.yaml b/templates/flavors/base/cluster-template.yaml index 039d72f6fa8..d53df34681b 100644 --- a/templates/flavors/base/cluster-template.yaml +++ b/templates/flavors/base/cluster-template.yaml @@ -109,6 +109,8 @@ spec: mounts: - - LABEL=etcd_disk - /var/lib/etcddisk + preKubeadmCommands: [] + postKubeadmCommands: [] version: "${KUBERNETES_VERSION}" --- kind: AzureMachineTemplate diff --git a/templates/flavors/default/machine-deployment.yaml b/templates/flavors/default/machine-deployment.yaml index 337f30f97c6..82fd59ec9e4 100644 --- a/templates/flavors/default/machine-deployment.yaml +++ b/templates/flavors/default/machine-deployment.yaml @@ -42,6 +42,7 @@ metadata: spec: template: spec: + preKubeadmCommands: [] joinConfiguration: nodeRegistration: name: '{{ ds.meta_data["local_hostname"] }}' diff --git a/templates/flavors/windows/machine-deployment.yaml b/templates/flavors/windows/machine-deployment.yaml index 14b591ebeeb..12997864ab8 100644 --- a/templates/flavors/windows/machine-deployment.yaml +++ b/templates/flavors/windows/machine-deployment.yaml @@ -5,7 +5,7 @@ metadata: name: "${CLUSTER_NAME}-md-0" spec: clusterName: "${CLUSTER_NAME}" - replicas: ${WORKER_MACHINE_COUNT} + replicas: ${LINUX_WORKER_MACHINE_COUNT:-1} selector: matchLabels: template: @@ -44,6 +44,7 @@ metadata: spec: template: spec: + preKubeadmCommands: [] postKubeadmCommands: # Azures vnet MTU is 1400. # When using Flannel VXLAN to avoid packet fragmentation diff --git a/templates/test/ci/cluster-template-prow-ci-version-windows.yaml b/templates/test/ci/cluster-template-prow-ci-version-windows.yaml new file mode 100644 index 00000000000..22806f83f75 --- /dev/null +++ b/templates/test/ci/cluster-template-prow-ci-version-windows.yaml @@ -0,0 +1,1417 @@ +apiVersion: cluster.x-k8s.io/v1alpha4 +kind: Cluster +metadata: + labels: + cni: ${CLUSTER_NAME}-flannel + name: ${CLUSTER_NAME} + namespace: default +spec: + clusterNetwork: + pods: + cidrBlocks: + - 10.244.0.0/16 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha4 + kind: KubeadmControlPlane + name: ${CLUSTER_NAME}-control-plane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 + kind: AzureCluster + name: ${CLUSTER_NAME} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 +kind: AzureCluster +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + additionalTags: + buildProvenance: ${BUILD_PROVENANCE} + creationTimestamp: ${TIMESTAMP} + jobName: ${JOB_NAME} + identityRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 + kind: AzureClusterIdentity + name: ${CLUSTER_IDENTITY_NAME} + location: ${AZURE_LOCATION} + networkSpec: + vnet: + name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} + resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} + subscriptionID: ${AZURE_SUBSCRIPTION_ID} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1alpha4 +kind: KubeadmControlPlane +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + extraVolumes: + - hostPath: /etc/kubernetes/azure.json + mountPath: /etc/kubernetes/azure.json + name: cloud-config + readOnly: true + timeoutForControlPlane: 20m + controllerManager: + extraArgs: + allocate-node-cidrs: "true" + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + cluster-name: ${CLUSTER_NAME} + configure-cloud-routes: "false" + v: "4" + extraVolumes: + - hostPath: /etc/kubernetes/azure.json + mountPath: /etc/kubernetes/azure.json + name: cloud-config + readOnly: true + etcd: + local: + dataDir: /var/lib/etcddisk/etcd + kubernetesVersion: ci/${CI_VERSION} + diskSetup: + filesystems: + - device: /dev/disk/azure/scsi1/lun0 + extraOpts: + - -E + - lazy_itable_init=1,lazy_journal_init=1 + filesystem: ext4 + label: etcd_disk + - device: ephemeral0.1 + filesystem: ext4 + label: ephemeral0 + replaceFS: ntfs + partitions: + - device: /dev/disk/azure/scsi1/lun0 + layout: true + overwrite: false + tableType: gpt + files: + - content: | + network: + version: 2 + ethernets: + eth0: + mtu: 1400 + match: + macaddress: MACADDRESS + set-name: eth0 + owner: root:root + path: /etc/netplan/60-eth0.yaml + permissions: "0644" + - contentFrom: + secret: + key: control-plane-azure.json + name: ${CLUSTER_NAME}-control-plane-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + - content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO="" + + # This test installs release packages or binaries that are a result of the CI and release builds. + # It runs '... --version' commands to verify that the binaries are correctly installed + # and finally uninstalls the packages. + # For the release packages it tests all versions in the support skew. + LINE_SEPARATOR="*************************************************" + echo "$$LINE_SEPARATOR" + CI_VERSION=${CI_VERSION} + if [[ "$${CI_VERSION}" != "" ]]; then + CI_DIR=/tmp/k8s-ci + mkdir -p $$CI_DIR + declare -a PACKAGES_TO_TEST=("kubectl" "kubelet" "kubeadm") + declare -a CONTAINERS_TO_TEST=("kube-apiserver" "kube-controller-manager" "kube-proxy" "kube-scheduler") + CONTAINER_EXT="tar" + echo "* testing CI version $$CI_VERSION" + # Check for semver + if [[ "$${CI_VERSION}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + CI_URL="https://storage.googleapis.com/k8s-release-dev/ci/$${CI_VERSION}/bin/linux/amd64" + VERSION_WITHOUT_PREFIX="${CI_VERSION#v}" + DEBIAN_FRONTEND=noninteractive apt-get install -y apt-transport-https curl + curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - + echo 'deb https://apt.kubernetes.io/ kubernetes-xenial main' > /etc/apt/sources.list.d/kubernetes.list + apt-get update + # replace . with \. + VERSION_REGEX="${VERSION_WITHOUT_PREFIX//./\\.}" + PACKAGE_VERSION="$(apt-cache madison kubelet|grep $${VERSION_REGEX}- | head -n1 | cut -d '|' -f 2 | tr -d '[:space:]')" + for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do + echo "* installing package: $$CI_PACKAGE $${PACKAGE_VERSION}" + DEBIAN_FRONTEND=noninteractive apt-get install -y $$CI_PACKAGE=$$PACKAGE_VERSION + done + else + CI_URL="https://storage.googleapis.com/k8s-release-dev/ci/$${CI_VERSION}/bin/linux/amd64" + fi + for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do + echo "* downloading binary: $$CI_URL/$$CI_PACKAGE" + wget "$$CI_URL/$$CI_PACKAGE" -O "$$CI_DIR/$$CI_PACKAGE" + chmod +x "$$CI_DIR/$$CI_PACKAGE" + mv "$$CI_DIR/$$CI_PACKAGE" "/usr/bin/$$CI_PACKAGE" + done + systemctl restart kubelet + fi + for CI_CONTAINER in "$${CONTAINERS_TO_TEST[@]}"; do + echo "* downloading package: $$CI_URL/$$CI_CONTAINER.$$CONTAINER_EXT" + wget "$$CI_URL/$$CI_CONTAINER.$$CONTAINER_EXT" -O "$$CI_DIR/$$CI_CONTAINER.$$CONTAINER_EXT" + $${SUDO} ctr -n k8s.io images import "$$CI_DIR/$$CI_CONTAINER.$$CONTAINER_EXT" || echo "* ignoring expected 'ctr images import' result" + $${SUDO} ctr -n k8s.io images tag k8s.gcr.io/$$CI_CONTAINER-amd64:"$${CI_VERSION//+/_}" k8s.gcr.io/$$CI_CONTAINER:"$${CI_VERSION//+/_}" + $${SUDO} ctr -n k8s.io images tag k8s.gcr.io/$$CI_CONTAINER-amd64:"$${CI_VERSION//+/_}" gcr.io/k8s-staging-ci-images/$$CI_CONTAINER:"$${CI_VERSION//+/_}" + done + fi + echo "* checking binary versions" + echo "ctr version: " $(ctr version) + echo "kubeadm version: " $(kubeadm version -o=short) + echo "kubectl version: " $(kubectl version --client=true --short=true) + echo "kubelet version: " $(kubelet --version) + echo "$$LINE_SEPARATOR" + owner: root:root + path: /tmp/kubeadm-bootstrap.sh + permissions: "0744" + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + azure-container-registry-config: /etc/kubernetes/azure.json + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + name: '{{ ds.meta_data["local_hostname"] }}' + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + azure-container-registry-config: /etc/kubernetes/azure.json + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + name: '{{ ds.meta_data["local_hostname"] }}' + mounts: + - - LABEL=etcd_disk + - /var/lib/etcddisk + postKubeadmCommands: + - mac=$(ip -o link | grep eth0 | grep ether | awk '{ print $17 }') + - sed -i -e "s/MACADDRESS/$${mac}/g" /etc/netplan/60-eth0.yaml + - netplan apply + preKubeadmCommands: + - bash -c /tmp/kubeadm-bootstrap.sh + useExperimentalRetryJoin: true + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-control-plane + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + template: + spec: + dataDisks: + - diskSizeGB: 256 + lun: 0 + nameSuffix: etcddisk + image: + marketplace: + offer: capi + publisher: cncf-upstream + sku: k8s-1dot18dot8-ubuntu-1804 + version: 2020.08.17 + osDisk: + diskSizeGB: 128 + osType: Linux + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + vmSize: ${AZURE_CONTROL_PLANE_MACHINE_TYPE} +--- +apiVersion: cluster.x-k8s.io/v1alpha4 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${LINUX_WORKER_MACHINE_COUNT:-1} + selector: + matchLabels: null + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha4 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-md-0 + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-md-0 + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + template: + spec: + image: + marketplace: + offer: capi + publisher: cncf-upstream + sku: k8s-1dot18dot8-ubuntu-1804 + version: 2020.08.17 + osDisk: + diskSizeGB: 128 + managedDisk: + storageAccountType: Premium_LRS + osType: Linux + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + vmSize: ${AZURE_NODE_MACHINE_TYPE} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha4 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + template: + spec: + files: + - contentFrom: + secret: + key: worker-node-azure.json + name: ${CLUSTER_NAME}-md-0-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + - content: | + network: + version: 2 + ethernets: + eth0: + mtu: 1400 + match: + macaddress: MACADDRESS + set-name: eth0 + owner: root:root + path: /etc/netplan/60-eth0.yaml + permissions: "0644" + - content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO="" + + # This test installs release packages or binaries that are a result of the CI and release builds. + # It runs '... --version' commands to verify that the binaries are correctly installed + # and finally uninstalls the packages. + # For the release packages it tests all versions in the support skew. + LINE_SEPARATOR="*************************************************" + echo "$$LINE_SEPARATOR" + CI_VERSION=${CI_VERSION} + if [[ "$${CI_VERSION}" != "" ]]; then + CI_DIR=/tmp/k8s-ci + mkdir -p $$CI_DIR + declare -a PACKAGES_TO_TEST=("kubectl" "kubelet" "kubeadm") + declare -a CONTAINERS_TO_TEST=("kube-apiserver" "kube-controller-manager" "kube-proxy" "kube-scheduler") + CONTAINER_EXT="tar" + echo "* testing CI version $$CI_VERSION" + # Check for semver + if [[ "$${CI_VERSION}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + CI_URL="https://storage.googleapis.com/k8s-release-dev/ci/$${CI_VERSION}/bin/linux/amd64" + VERSION_WITHOUT_PREFIX="${CI_VERSION#v}" + DEBIAN_FRONTEND=noninteractive apt-get install -y apt-transport-https curl + curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - + echo 'deb https://apt.kubernetes.io/ kubernetes-xenial main' > /etc/apt/sources.list.d/kubernetes.list + apt-get update + # replace . with \. + VERSION_REGEX="${VERSION_WITHOUT_PREFIX//./\\.}" + PACKAGE_VERSION="$(apt-cache madison kubelet|grep $${VERSION_REGEX}- | head -n1 | cut -d '|' -f 2 | tr -d '[:space:]')" + for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do + echo "* installing package: $$CI_PACKAGE $${PACKAGE_VERSION}" + DEBIAN_FRONTEND=noninteractive apt-get install -y $$CI_PACKAGE=$$PACKAGE_VERSION + done + else + CI_URL="https://storage.googleapis.com/k8s-release-dev/ci/$${CI_VERSION}/bin/linux/amd64" + fi + for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do + echo "* downloading binary: $$CI_URL/$$CI_PACKAGE" + wget "$$CI_URL/$$CI_PACKAGE" -O "$$CI_DIR/$$CI_PACKAGE" + chmod +x "$$CI_DIR/$$CI_PACKAGE" + mv "$$CI_DIR/$$CI_PACKAGE" "/usr/bin/$$CI_PACKAGE" + done + systemctl restart kubelet + fi + for CI_CONTAINER in "$${CONTAINERS_TO_TEST[@]}"; do + echo "* downloading package: $$CI_URL/$$CI_CONTAINER.$$CONTAINER_EXT" + wget "$$CI_URL/$$CI_CONTAINER.$$CONTAINER_EXT" -O "$$CI_DIR/$$CI_CONTAINER.$$CONTAINER_EXT" + $${SUDO} ctr -n k8s.io images import "$$CI_DIR/$$CI_CONTAINER.$$CONTAINER_EXT" || echo "* ignoring expected 'ctr images import' result" + $${SUDO} ctr -n k8s.io images tag k8s.gcr.io/$$CI_CONTAINER-amd64:"$${CI_VERSION//+/_}" k8s.gcr.io/$$CI_CONTAINER:"$${CI_VERSION//+/_}" + $${SUDO} ctr -n k8s.io images tag k8s.gcr.io/$$CI_CONTAINER-amd64:"$${CI_VERSION//+/_}" gcr.io/k8s-staging-ci-images/$$CI_CONTAINER:"$${CI_VERSION//+/_}" + done + fi + echo "* checking binary versions" + echo "ctr version: " $(ctr version) + echo "kubeadm version: " $(kubeadm version -o=short) + echo "kubectl version: " $(kubectl version --client=true --short=true) + echo "kubelet version: " $(kubelet --version) + echo "$$LINE_SEPARATOR" + owner: root:root + path: /tmp/kubeadm-bootstrap.sh + permissions: "0744" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + azure-container-registry-config: /etc/kubernetes/azure.json + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + name: '{{ ds.meta_data["local_hostname"] }}' + postKubeadmCommands: + - mac=$(ip -o link | grep eth0 | grep ether | awk '{ print $17 }') + - sed -i -e "s/MACADDRESS/$${mac}/g" /etc/netplan/60-eth0.yaml + - netplan apply + preKubeadmCommands: + - bash -c /tmp/kubeadm-bootstrap.sh + useExperimentalRetryJoin: true +--- +apiVersion: cluster.x-k8s.io/v1alpha4 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-md-win + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: null + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha4 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-md-win + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-md-win + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-md-win + namespace: default +spec: + template: + spec: + image: + marketplace: + offer: capi-windows + publisher: cncf-upstream + sku: k8s-1dot18dot19-windows-2019 + version: 2021.05.17 + osDisk: + diskSizeGB: 128 + managedDisk: + storageAccountType: Premium_LRS + osType: Windows + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + vmSize: ${AZURE_NODE_MACHINE_TYPE} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha4 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-md-win + namespace: default +spec: + template: + spec: + files: + - contentFrom: + secret: + key: worker-node-azure.json + name: ${CLUSTER_NAME}-md-win-azure-json + owner: root:root + path: c:/k/azure.json + permissions: "0644" + - content: | + # required as a work around for Flannel and Wins bugs + # https://github.com/coreos/flannel/issues/1359 + # https://github.com/kubernetes-sigs/sig-windows-tools/issues/103#issuecomment-709426828 + ipmo C:\k\debug\hns.psm1; + New-HnsNetwork -Type Overlay -AddressPrefix "192.168.255.0/30" -Gateway "192.168.255.1" -Name "External" -AdapterName "Ethernet 2" -SubnetPolicies @(@{Type = "VSID"; VSID = 9999; }) + path: C:/create-external-network.ps1 + permissions: "0744" + - content: | + # /tmp is assumed created and required for upstream e2e tests to pass + New-Item -ItemType Directory -Force -Path C:\tmp\ + path: C:/create-temp-folder.ps1 + permissions: "0744" + - content: | + Stop-Service kubelet -Force + + $$CI_VERSION="${CI_VERSION}" + if($$CI_VERSION -ne "") + { + $$binaries=@("kubeadm", "kubectl", "kubelet", "kube-proxy") + $$ci_url="https://storage.googleapis.com/k8s-release-dev/ci/$$CI_VERSION/bin/windows/amd64" + foreach ( $$binary in $$binaries ) + { + echo "downloading binary: $$ci_url/$$binary.exe" + curl.exe --retry 10 --retry-delay 5 "$$ci_url/$$binary.exe" --output "c:/k/$$binary.exe" + } + } + + # We are using a VHD that maps to v1.18.19 so the kubeproxy image is already pulled. (pull it just in case) + # Tag it to the ci version. The image knows how to use the copy locally. + docker pull sigwindowstools/kube-proxy:v1.18.19-nanoserver + docker tag sigwindowstools/kube-proxy:v1.18.19-nanoserver "sigwindowstools/kube-proxy:${CI_VERSION/+/_}-nanoserver" + + kubeadm.exe version -o=short + kubectl.exe version --client=true --short=true + kubelet.exe --version + path: C:/replace-k8s-binaries.ps1 + permissions: "0744" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + azure-container-registry-config: c:/k/azure.json + cloud-config: c:/k/azure.json + cloud-provider: azure + pod-infra-container-image: mcr.microsoft.com/oss/kubernetes/pause:1.4.1 + name: '{{ ds.meta_data["local_hostname"] }}' + postKubeadmCommands: + - nssm set kubelet start SERVICE_AUTO_START + preKubeadmCommands: + - powershell c:/create-external-network.ps1 + - powershell C:/create-temp-folder.ps1 + - powershell C:/replace-k8s-binaries.ps1 + users: + - groups: Administrators + name: capi + sshAuthorizedKeys: + - ${AZURE_SSH_PUBLIC_KEY:=""} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 +kind: AzureClusterIdentity +metadata: + labels: + clusterctl.cluster.x-k8s.io/move-hierarchy: "true" + name: ${CLUSTER_IDENTITY_NAME} + namespace: default +spec: + allowedNamespaces: {} + clientID: ${AZURE_CLIENT_ID} + clientSecret: + name: ${AZURE_CLUSTER_IDENTITY_SECRET_NAME} + namespace: ${AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE} + tenantID: ${AZURE_TENANT_ID} + type: ServicePrincipal +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha4 +kind: ClusterResourceSet +metadata: + name: ${CLUSTER_NAME}-flannel + namespace: default +spec: + clusterSelector: + matchLabels: + cni: ${CLUSTER_NAME}-flannel + resources: + - kind: ConfigMap + name: cni-${CLUSTER_NAME}-flannel + strategy: ApplyOnce +--- +apiVersion: v1 +data: + cni: |+ + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: psp.flannel.unprivileged + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default + seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default + apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default + apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default + spec: + privileged: false + volumes: + - configMap + - secret + - emptyDir + - hostPath + allowedHostPaths: + - pathPrefix: "/etc/cni/net.d" + - pathPrefix: "/etc/kube-flannel" + - pathPrefix: "/run/flannel" + readOnlyRootFilesystem: false + # Users and groups + runAsUser: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + fsGroup: + rule: RunAsAny + # Privilege Escalation + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + # Capabilities + allowedCapabilities: ['NET_ADMIN'] + defaultAddCapabilities: [] + requiredDropCapabilities: [] + # Host namespaces + hostPID: false + hostIPC: false + hostNetwork: true + hostPorts: + - min: 0 + max: 65535 + # SELinux + seLinux: + # SELinux is unused in CaaSP + rule: 'RunAsAny' + --- + kind: ClusterRole + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: flannel + rules: + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: ['psp.flannel.unprivileged'] + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + --- + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: flannel + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel + subjects: + - kind: ServiceAccount + name: flannel + namespace: kube-system + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: flannel + namespace: kube-system + --- + kind: ConfigMap + apiVersion: v1 + metadata: + name: kube-flannel-cfg + namespace: kube-system + labels: + tier: node + app: flannel + data: + cni-conf.json: | + { + "name": "cbr0", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + net-conf.json: | + { + "Network": "10.244.0.0/16", + "Backend": { + "Type": "vxlan", + "VNI" : 4096, + "Port": 4789 + } + } + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: kube-flannel-ds-amd64 + namespace: kube-system + labels: + tier: node + app: flannel + spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - key: beta.kubernetes.io/arch + operator: In + values: + - amd64 + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.12.0-amd64 + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.12.0-amd64 + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: kube-flannel-ds-arm64 + namespace: kube-system + labels: + tier: node + app: flannel + spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - key: beta.kubernetes.io/arch + operator: In + values: + - arm64 + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.12.0-arm64 + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.12.0-arm64 + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: kube-flannel-ds-arm + namespace: kube-system + labels: + tier: node + app: flannel + spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - key: beta.kubernetes.io/arch + operator: In + values: + - arm + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.12.0-arm + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.12.0-arm + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: kube-flannel-ds-ppc64le + namespace: kube-system + labels: + tier: node + app: flannel + spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - key: beta.kubernetes.io/arch + operator: In + values: + - ppc64le + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.12.0-ppc64le + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.12.0-ppc64le + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: kube-flannel-ds-s390x + namespace: kube-system + labels: + tier: node + app: flannel + spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - key: beta.kubernetes.io/arch + operator: In + values: + - s390x + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.12.0-s390x + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.12.0-s390x + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + --- + kind: ConfigMap + apiVersion: v1 + metadata: + name: kube-flannel-windows-cfg + namespace: kube-system + labels: + tier: node + app: flannel + data: + run.ps1: | + $ErrorActionPreference = "Stop"; + + mkdir -force /host/etc/cni/net.d + mkdir -force /host/etc/kube-flannel + mkdir -force /host/opt/cni/bin + mkdir -force /host/k/flannel + mkdir -force /host/k/flannel/var/run/secrets/kubernetes.io/serviceaccount + + $cniJson = get-content /etc/kube-flannel-windows/cni-conf.json | ConvertFrom-Json + $serviceSubnet = yq r /etc/kubeadm-config/ClusterConfiguration networking.serviceSubnet + $podSubnet = yq r /etc/kubeadm-config/ClusterConfiguration networking.podSubnet + $networkJson = wins cli net get | convertfrom-json + + $cniJson.delegate.policies[0].Value.ExceptionList = $serviceSubnet, $podSubnet + $cniJson.delegate.policies[1].Value.DestinationPrefix = $serviceSubnet + Set-Content -Path /host/etc/cni/net.d/10-flannel.conf ($cniJson | ConvertTo-Json -depth 100) + + cp -force /etc/kube-flannel/net-conf.json /host/etc/kube-flannel + cp -force -recurse /cni/* /host/opt/cni/bin + cp -force /k/flannel/* /host/k/flannel/ + cp -force /kube-proxy/kubeconfig.conf /host/k/flannel/kubeconfig.yml + cp -force /var/run/secrets/kubernetes.io/serviceaccount/* /host/k/flannel/var/run/secrets/kubernetes.io/serviceaccount/ + wins cli process run --path /k/flannel/setup.exe --args "--mode=overlay --interface=Ethernet 2" + wins cli route add --addresses 169.254.169.254 + wins cli process run --path /k/flannel/flanneld.exe --args "--kube-subnet-mgr --kubeconfig-file /k/flannel/kubeconfig.yml" --envs "POD_NAME=$env:POD_NAME POD_NAMESPACE=$env:POD_NAMESPACE" + cni-conf.json: | + { + "name": "flannel.4096", + "cniVersion": "0.3.0", + "type": "flannel", + "capabilities": { + "dns": true + }, + "delegate": { + "type": "win-overlay", + "policies": [ + { + "Name": "EndpointPolicy", + "Value": { + "Type": "OutBoundNAT", + "ExceptionList": [] + } + }, + { + "Name": "EndpointPolicy", + "Value": { + "Type": "ROUTE", + "DestinationPrefix": "", + "NeedEncap": true + } + } + ] + } + } + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: kube-flannel-ds-windows-amd64 + labels: + tier: node + app: flannel + namespace: kube-system + spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - windows + - key: kubernetes.io/arch + operator: In + values: + - amd64 + hostNetwork: true + serviceAccountName: flannel + tolerations: + - operator: Exists + effect: NoSchedule + containers: + - name: kube-flannel + image: sigwindowstools/flannel:v0.13.0-nanoserver + command: + - pwsh + args: + - -file + - /etc/kube-flannel-windows/run.ps1 + volumeMounts: + - name: wins + mountPath: \\.\pipe\rancher_wins + - name: host + mountPath: /host + - name: kube-proxy + mountPath: /kube-proxy + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + - name: flannel-windows-cfg + mountPath: /etc/kube-flannel-windows/ + - name: kubeadm-config + mountPath: /etc/kubeadm-config/ + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + volumes: + - name: wins + hostPath: + path: \\.\pipe\rancher_wins + type: null + - name: opt + hostPath: + path: /opt + - name: host + hostPath: + path: / + - name: cni + hostPath: + path: /etc + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + - name: flannel-windows-cfg + configMap: + name: kube-flannel-windows-cfg + - name: kube-proxy + configMap: + name: kube-proxy + - name: kubeadm-config + configMap: + name: kubeadm-config + + proxy: | + apiVersion: v1 + data: + run-script.ps1: |- + $ErrorActionPreference = "Stop"; + mkdir -force /host/var/lib/kube-proxy/var/run/secrets/kubernetes.io/serviceaccount + mkdir -force /host/k/kube-proxy + + $$CI_VERSION="${CI_VERSION:-}" + if($$CI_VERSION -ne "" -And (Test-Path -Path "/host/k/kube-proxy.exe")) + { + cp -force /host/k/kube-proxy.exe /k/kube-proxy/kube-proxy.exe + } + + cp -force /k/kube-proxy/* /host/k/kube-proxy + cp -force /var/lib/kube-proxy/* /host/var/lib/kube-proxy + cp -force /var/run/secrets/kubernetes.io/serviceaccount/* /host/var/lib/kube-proxy/var/run/secrets/kubernetes.io/serviceaccount #FIXME? + + $networkName = (Get-Content /host/etc/cni/net.d/* | ConvertFrom-Json).name + $sourceVip = ($env:POD_IP -split "\.")[0..2] + 0 -join "." + yq w -i /host/var/lib/kube-proxy/config.conf winkernel.sourceVip $sourceVip + yq w -i /host/var/lib/kube-proxy/config.conf winkernel.networkName $networkName + yq w -i /host/var/lib/kube-proxy/config.conf featureGates.WinOverlay true + yq w -i /host/var/lib/kube-proxy/config.conf featureGates.IPv6DualStack false + yq w -i /host/var/lib/kube-proxy/config.conf mode "kernelspace" + wins cli process run --path /k/kube-proxy/kube-proxy.exe --args "--v=6 --config=/var/lib/kube-proxy/config.conf --hostname-override=$env:NODE_NAME --feature-gates=WinOverlay=true" + kind: ConfigMap + metadata: + labels: + app: kube-proxy + name: kube-proxy-windows + namespace: kube-system + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy-windows + namespace: kube-system + spec: + selector: + matchLabels: + k8s-app: kube-proxy-windows + template: + metadata: + labels: + k8s-app: kube-proxy-windows + spec: + serviceAccountName: kube-proxy + containers: + - command: + - pwsh + args: + - -file + - /var/lib/kube-proxy-windows/run-script.ps1 + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + image: sigwindowstools/kube-proxy:${KUBERNETES_VERSION/+/_}-nanoserver + name: kube-proxy + volumeMounts: + - name: wins + mountPath: \\.\pipe\rancher_wins + - name: host + mountPath: /host + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: /var/lib/kube-proxy-windows + name: kube-proxy-windows + nodeSelector: + kubernetes.io/os: windows + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - name: wins + hostPath: + path: \\.\pipe\rancher_wins + type: null + - configMap: + defaultMode: 420 + name: kube-proxy-windows + name: kube-proxy-windows + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: / + name: host + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + annotations: + note: generated + labels: + type: generated + name: cni-${CLUSTER_NAME}-flannel + namespace: default diff --git a/templates/test/ci/cluster-template-prow-ci-version.yaml b/templates/test/ci/cluster-template-prow-ci-version.yaml index a1e01784da1..d960c9c7940 100644 --- a/templates/test/ci/cluster-template-prow-ci-version.yaml +++ b/templates/test/ci/cluster-template-prow-ci-version.yaml @@ -92,6 +92,13 @@ spec: overwrite: false tableType: gpt files: + - contentFrom: + secret: + key: control-plane-azure.json + name: ${CLUSTER_NAME}-control-plane-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" - content: | #!/bin/bash @@ -157,13 +164,6 @@ spec: owner: root:root path: /tmp/kubeadm-bootstrap.sh permissions: "0744" - - contentFrom: - secret: - key: control-plane-azure.json - name: ${CLUSTER_NAME}-control-plane-azure-json - owner: root:root - path: /etc/kubernetes/azure.json - permissions: "0644" initConfiguration: nodeRegistration: kubeletExtraArgs: @@ -181,6 +181,7 @@ spec: mounts: - - LABEL=etcd_disk - /var/lib/etcddisk + postKubeadmCommands: [] preKubeadmCommands: - bash -c /tmp/kubeadm-bootstrap.sh useExperimentalRetryJoin: true @@ -271,6 +272,13 @@ spec: template: spec: files: + - contentFrom: + secret: + key: worker-node-azure.json + name: ${CLUSTER_NAME}-md-0-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" - content: | #!/bin/bash @@ -336,13 +344,6 @@ spec: owner: root:root path: /tmp/kubeadm-bootstrap.sh permissions: "0744" - - contentFrom: - secret: - key: worker-node-azure.json - name: ${CLUSTER_NAME}-md-0-azure-json - owner: root:root - path: /etc/kubernetes/azure.json - permissions: "0644" joinConfiguration: nodeRegistration: kubeletExtraArgs: diff --git a/templates/test/ci/cluster-template-prow-custom-vnet.yaml b/templates/test/ci/cluster-template-prow-custom-vnet.yaml index 86adb7418f5..c8d25a1a07b 100644 --- a/templates/test/ci/cluster-template-prow-custom-vnet.yaml +++ b/templates/test/ci/cluster-template-prow-custom-vnet.yaml @@ -126,6 +126,8 @@ spec: mounts: - - LABEL=etcd_disk - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: [] machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 @@ -215,6 +217,7 @@ spec: cloud-config: /etc/kubernetes/azure.json cloud-provider: azure name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: [] --- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 kind: AzureClusterIdentity diff --git a/templates/test/ci/cluster-template-prow-external-cloud-provider.yaml b/templates/test/ci/cluster-template-prow-external-cloud-provider.yaml index 8d6ab1a567b..8bcac51dcd8 100644 --- a/templates/test/ci/cluster-template-prow-external-cloud-provider.yaml +++ b/templates/test/ci/cluster-template-prow-external-cloud-provider.yaml @@ -116,6 +116,8 @@ spec: mounts: - - LABEL=etcd_disk - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: [] machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 @@ -203,6 +205,7 @@ spec: cloud-config: /etc/kubernetes/azure.json cloud-provider: external name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: [] --- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 kind: AzureClusterIdentity diff --git a/templates/test/ci/cluster-template-prow-identity-from-env.yaml b/templates/test/ci/cluster-template-prow-identity-from-env.yaml index 2617b743c6c..f6a62bbb494 100644 --- a/templates/test/ci/cluster-template-prow-identity-from-env.yaml +++ b/templates/test/ci/cluster-template-prow-identity-from-env.yaml @@ -111,6 +111,8 @@ spec: mounts: - - LABEL=etcd_disk - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: [] machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 @@ -200,6 +202,7 @@ spec: cloud-config: /etc/kubernetes/azure.json cloud-provider: azure name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: [] --- apiVersion: cluster.x-k8s.io/v1alpha4 kind: MachineHealthCheck diff --git a/templates/test/ci/cluster-template-prow-ipv6.yaml b/templates/test/ci/cluster-template-prow-ipv6.yaml index 3f5976117c3..0cdb88c8273 100644 --- a/templates/test/ci/cluster-template-prow-ipv6.yaml +++ b/templates/test/ci/cluster-template-prow-ipv6.yaml @@ -156,6 +156,7 @@ spec: - mv /etc/resolv.conf /etc/resolv.conf.OLD && ln -s /run/systemd/resolve/resolv.conf /etc/resolv.conf - systemctl restart systemd-resolved + preKubeadmCommands: [] machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 diff --git a/templates/test/ci/cluster-template-prow-machine-pool-ci-version.yaml b/templates/test/ci/cluster-template-prow-machine-pool-ci-version.yaml index 773bbe2feaa..6b734ac72bb 100644 --- a/templates/test/ci/cluster-template-prow-machine-pool-ci-version.yaml +++ b/templates/test/ci/cluster-template-prow-machine-pool-ci-version.yaml @@ -92,6 +92,13 @@ spec: overwrite: false tableType: gpt files: + - contentFrom: + secret: + key: control-plane-azure.json + name: ${CLUSTER_NAME}-control-plane-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" - content: | #!/bin/bash @@ -157,13 +164,6 @@ spec: owner: root:root path: /tmp/kubeadm-bootstrap.sh permissions: "0744" - - contentFrom: - secret: - key: control-plane-azure.json - name: ${CLUSTER_NAME}-control-plane-azure-json - owner: root:root - path: /etc/kubernetes/azure.json - permissions: "0644" initConfiguration: nodeRegistration: kubeletExtraArgs: @@ -181,6 +181,7 @@ spec: mounts: - - LABEL=etcd_disk - /var/lib/etcddisk + postKubeadmCommands: [] preKubeadmCommands: - bash -c /tmp/kubeadm-bootstrap.sh useExperimentalRetryJoin: true diff --git a/templates/test/ci/cluster-template-prow-machine-pool-windows.yaml b/templates/test/ci/cluster-template-prow-machine-pool-windows.yaml index 412c64b5949..7220a8faf81 100644 --- a/templates/test/ci/cluster-template-prow-machine-pool-windows.yaml +++ b/templates/test/ci/cluster-template-prow-machine-pool-windows.yaml @@ -132,6 +132,7 @@ spec: - mac=$(ip -o link | grep eth0 | grep ether | awk '{ print $17 }') - sed -i -e "s/MACADDRESS/$${mac}/g" /etc/netplan/60-eth0.yaml - netplan apply + preKubeadmCommands: [] machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 @@ -401,7 +402,7 @@ data: rule: 'RunAsAny' --- kind: ClusterRole - apiVersion: rbac.authorization.k8s.io/v1beta1 + apiVersion: rbac.authorization.k8s.io/v1 metadata: name: flannel rules: @@ -430,7 +431,7 @@ data: - patch --- kind: ClusterRoleBinding - apiVersion: rbac.authorization.k8s.io/v1beta1 + apiVersion: rbac.authorization.k8s.io/v1 metadata: name: flannel roleRef: @@ -1127,6 +1128,12 @@ data: mkdir -force /host/var/lib/kube-proxy/var/run/secrets/kubernetes.io/serviceaccount mkdir -force /host/k/kube-proxy + $$CI_VERSION="${CI_VERSION:-}" + if($$CI_VERSION -ne "" -And (Test-Path -Path "/host/k/kube-proxy.exe")) + { + cp -force /host/k/kube-proxy.exe /k/kube-proxy/kube-proxy.exe + } + cp -force /k/kube-proxy/* /host/k/kube-proxy cp -force /var/lib/kube-proxy/* /host/var/lib/kube-proxy cp -force /var/run/secrets/kubernetes.io/serviceaccount/* /host/var/lib/kube-proxy/var/run/secrets/kubernetes.io/serviceaccount #FIXME? @@ -1179,7 +1186,7 @@ data: valueFrom: fieldRef: fieldPath: status.podIP - image: sigwindowstools/kube-proxy:${KUBERNETES_VERSION}-nanoserver + image: sigwindowstools/kube-proxy:${KUBERNETES_VERSION/+/_}-nanoserver name: kube-proxy volumeMounts: - name: wins diff --git a/templates/test/ci/cluster-template-prow-machine-pool.yaml b/templates/test/ci/cluster-template-prow-machine-pool.yaml index 2ab33409c35..18ebab3acf8 100644 --- a/templates/test/ci/cluster-template-prow-machine-pool.yaml +++ b/templates/test/ci/cluster-template-prow-machine-pool.yaml @@ -115,6 +115,8 @@ spec: mounts: - - LABEL=etcd_disk - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: [] machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 diff --git a/templates/test/ci/cluster-template-prow-nvidia-gpu.yaml b/templates/test/ci/cluster-template-prow-nvidia-gpu.yaml index 7acc9efca4a..cdf095cc3d9 100644 --- a/templates/test/ci/cluster-template-prow-nvidia-gpu.yaml +++ b/templates/test/ci/cluster-template-prow-nvidia-gpu.yaml @@ -116,6 +116,8 @@ spec: mounts: - - LABEL=etcd_disk - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: [] machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 diff --git a/templates/test/ci/cluster-template-prow-private.yaml b/templates/test/ci/cluster-template-prow-private.yaml index 50d835dc1ae..2ccfa794b9e 100644 --- a/templates/test/ci/cluster-template-prow-private.yaml +++ b/templates/test/ci/cluster-template-prow-private.yaml @@ -236,6 +236,7 @@ spec: cloud-config: /etc/kubernetes/azure.json cloud-provider: azure name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: [] --- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 kind: AzureClusterIdentity diff --git a/templates/test/ci/cluster-template-prow-windows.yaml b/templates/test/ci/cluster-template-prow-windows.yaml index 06e7a2f6b93..e4f280e7a8b 100644 --- a/templates/test/ci/cluster-template-prow-windows.yaml +++ b/templates/test/ci/cluster-template-prow-windows.yaml @@ -132,6 +132,7 @@ spec: - mac=$(ip -o link | grep eth0 | grep ether | awk '{ print $17 }') - sed -i -e "s/MACADDRESS/$${mac}/g" /etc/netplan/60-eth0.yaml - netplan apply + preKubeadmCommands: [] machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 @@ -165,7 +166,7 @@ metadata: namespace: default spec: clusterName: ${CLUSTER_NAME} - replicas: ${WORKER_MACHINE_COUNT} + replicas: ${LINUX_WORKER_MACHINE_COUNT:-1} selector: matchLabels: null template: @@ -237,6 +238,7 @@ spec: - mac=$(ip -o link | grep eth0 | grep ether | awk '{ print $17 }') - sed -i -e "s/MACADDRESS/$${mac}/g" /etc/netplan/60-eth0.yaml - netplan apply + preKubeadmCommands: [] useExperimentalRetryJoin: true --- apiVersion: cluster.x-k8s.io/v1alpha4 @@ -409,7 +411,7 @@ data: rule: 'RunAsAny' --- kind: ClusterRole - apiVersion: rbac.authorization.k8s.io/v1beta1 + apiVersion: rbac.authorization.k8s.io/v1 metadata: name: flannel rules: @@ -438,7 +440,7 @@ data: - patch --- kind: ClusterRoleBinding - apiVersion: rbac.authorization.k8s.io/v1beta1 + apiVersion: rbac.authorization.k8s.io/v1 metadata: name: flannel roleRef: @@ -1135,6 +1137,12 @@ data: mkdir -force /host/var/lib/kube-proxy/var/run/secrets/kubernetes.io/serviceaccount mkdir -force /host/k/kube-proxy + $$CI_VERSION="${CI_VERSION:-}" + if($$CI_VERSION -ne "" -And (Test-Path -Path "/host/k/kube-proxy.exe")) + { + cp -force /host/k/kube-proxy.exe /k/kube-proxy/kube-proxy.exe + } + cp -force /k/kube-proxy/* /host/k/kube-proxy cp -force /var/lib/kube-proxy/* /host/var/lib/kube-proxy cp -force /var/run/secrets/kubernetes.io/serviceaccount/* /host/var/lib/kube-proxy/var/run/secrets/kubernetes.io/serviceaccount #FIXME? @@ -1187,7 +1195,7 @@ data: valueFrom: fieldRef: fieldPath: status.podIP - image: sigwindowstools/kube-proxy:${KUBERNETES_VERSION}-nanoserver + image: sigwindowstools/kube-proxy:${KUBERNETES_VERSION/+/_}-nanoserver name: kube-proxy volumeMounts: - name: wins diff --git a/templates/test/ci/cluster-template-prow.yaml b/templates/test/ci/cluster-template-prow.yaml index 5f01538ebb4..d367f4d7b59 100644 --- a/templates/test/ci/cluster-template-prow.yaml +++ b/templates/test/ci/cluster-template-prow.yaml @@ -115,6 +115,8 @@ spec: mounts: - - LABEL=etcd_disk - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: [] machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 @@ -204,6 +206,7 @@ spec: cloud-config: /etc/kubernetes/azure.json cloud-provider: azure name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: [] --- apiVersion: cluster.x-k8s.io/v1alpha4 kind: MachineHealthCheck diff --git a/templates/test/ci/patches/control-plane-ci-version.yaml b/templates/test/ci/patches/control-plane-ci-version.yaml deleted file mode 100644 index 2376dc4066d..00000000000 --- a/templates/test/ci/patches/control-plane-ci-version.yaml +++ /dev/null @@ -1,100 +0,0 @@ -apiVersion: controlplane.cluster.x-k8s.io/v1alpha4 -kind: KubeadmControlPlane -metadata: - name: "${CLUSTER_NAME}-control-plane" -spec: - kubeadmConfigSpec: - useExperimentalRetryJoin: true - clusterConfiguration: - kubernetesVersion: "ci/${CI_VERSION}" - preKubeadmCommands: - - bash -c /tmp/kubeadm-bootstrap.sh - files: - - path: /tmp/kubeadm-bootstrap.sh - owner: "root:root" - permissions: "0744" - content: | - #!/bin/bash - - set -o nounset - set -o pipefail - set -o errexit - [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO="" - - # This test installs release packages or binaries that are a result of the CI and release builds. - # It runs '... --version' commands to verify that the binaries are correctly installed - # and finally uninstalls the packages. - # For the release packages it tests all versions in the support skew. - LINE_SEPARATOR="*************************************************" - echo "$$LINE_SEPARATOR" - CI_VERSION=${CI_VERSION} - if [[ "$${CI_VERSION}" != "" ]]; then - CI_DIR=/tmp/k8s-ci - mkdir -p $$CI_DIR - declare -a PACKAGES_TO_TEST=("kubectl" "kubelet" "kubeadm") - declare -a CONTAINERS_TO_TEST=("kube-apiserver" "kube-controller-manager" "kube-proxy" "kube-scheduler") - CONTAINER_EXT="tar" - echo "* testing CI version $$CI_VERSION" - # Check for semver - if [[ "$${CI_VERSION}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then - CI_URL="https://storage.googleapis.com/k8s-release-dev/ci/$${CI_VERSION}/bin/linux/amd64" - VERSION_WITHOUT_PREFIX="${CI_VERSION#v}" - DEBIAN_FRONTEND=noninteractive apt-get install -y apt-transport-https curl - curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - - echo 'deb https://apt.kubernetes.io/ kubernetes-xenial main' > /etc/apt/sources.list.d/kubernetes.list - apt-get update - # replace . with \. - VERSION_REGEX="${VERSION_WITHOUT_PREFIX//./\\.}" - PACKAGE_VERSION="$(apt-cache madison kubelet|grep $${VERSION_REGEX}- | head -n1 | cut -d '|' -f 2 | tr -d '[:space:]')" - for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do - echo "* installing package: $$CI_PACKAGE $${PACKAGE_VERSION}" - DEBIAN_FRONTEND=noninteractive apt-get install -y $$CI_PACKAGE=$$PACKAGE_VERSION - done - else - CI_URL="https://storage.googleapis.com/k8s-release-dev/ci/$${CI_VERSION}/bin/linux/amd64" - fi - for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do - echo "* downloading binary: $$CI_URL/$$CI_PACKAGE" - wget "$$CI_URL/$$CI_PACKAGE" -O "$$CI_DIR/$$CI_PACKAGE" - chmod +x "$$CI_DIR/$$CI_PACKAGE" - mv "$$CI_DIR/$$CI_PACKAGE" "/usr/bin/$$CI_PACKAGE" - done - systemctl restart kubelet - fi - for CI_CONTAINER in "$${CONTAINERS_TO_TEST[@]}"; do - echo "* downloading package: $$CI_URL/$$CI_CONTAINER.$$CONTAINER_EXT" - wget "$$CI_URL/$$CI_CONTAINER.$$CONTAINER_EXT" -O "$$CI_DIR/$$CI_CONTAINER.$$CONTAINER_EXT" - $${SUDO} ctr -n k8s.io images import "$$CI_DIR/$$CI_CONTAINER.$$CONTAINER_EXT" || echo "* ignoring expected 'ctr images import' result" - $${SUDO} ctr -n k8s.io images tag k8s.gcr.io/$$CI_CONTAINER-amd64:"$${CI_VERSION//+/_}" k8s.gcr.io/$$CI_CONTAINER:"$${CI_VERSION//+/_}" - $${SUDO} ctr -n k8s.io images tag k8s.gcr.io/$$CI_CONTAINER-amd64:"$${CI_VERSION//+/_}" gcr.io/k8s-staging-ci-images/$$CI_CONTAINER:"$${CI_VERSION//+/_}" - done - fi - echo "* checking binary versions" - echo "ctr version: " $(ctr version) - echo "kubeadm version: " $(kubeadm version -o=short) - echo "kubectl version: " $(kubectl version --client=true --short=true) - echo "kubelet version: " $(kubelet --version) - echo "$$LINE_SEPARATOR" - - path: /etc/kubernetes/azure.json - owner: "root:root" - permissions: "0644" - contentFrom: - secret: - key: control-plane-azure.json - name: ${CLUSTER_NAME}-control-plane-azure-json ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 -kind: AzureMachineTemplate -metadata: - name: "${CLUSTER_NAME}-control-plane" -spec: - template: - spec: - image: - # we use the 1.18.8 image as a workaround there is no published marketplace image for k8s CI versions. - # 1.18.8 binaries and images will get replaced to the desired version by the script above. - marketplace: - publisher: cncf-upstream - offer: capi - sku: k8s-1dot18dot8-ubuntu-1804 - version: "2020.08.17" diff --git a/templates/test/ci/patches/control-plane-image-ci-version.yaml b/templates/test/ci/patches/control-plane-image-ci-version.yaml new file mode 100644 index 00000000000..e6b392a37fc --- /dev/null +++ b/templates/test/ci/patches/control-plane-image-ci-version.yaml @@ -0,0 +1,15 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 +kind: AzureMachineTemplate +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + image: + # we use the 1.18.8 image as a workaround there is no published marketplace image for k8s CI versions. + # 1.18.8 binaries and images will get replaced to the desired version by the script above. + marketplace: + publisher: cncf-upstream + offer: capi + sku: k8s-1dot18dot8-ubuntu-1804 + version: "2020.08.17" diff --git a/templates/test/ci/patches/control-plane-kubeadm-boostrap-ci-version.yaml b/templates/test/ci/patches/control-plane-kubeadm-boostrap-ci-version.yaml new file mode 100644 index 00000000000..c0a70946a6a --- /dev/null +++ b/templates/test/ci/patches/control-plane-kubeadm-boostrap-ci-version.yaml @@ -0,0 +1,79 @@ +- op: add + path: /spec/kubeadmConfigSpec/files/- + value: + content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO="" + + # This test installs release packages or binaries that are a result of the CI and release builds. + # It runs '... --version' commands to verify that the binaries are correctly installed + # and finally uninstalls the packages. + # For the release packages it tests all versions in the support skew. + LINE_SEPARATOR="*************************************************" + echo "$$LINE_SEPARATOR" + CI_VERSION=${CI_VERSION} + if [[ "$${CI_VERSION}" != "" ]]; then + CI_DIR=/tmp/k8s-ci + mkdir -p $$CI_DIR + declare -a PACKAGES_TO_TEST=("kubectl" "kubelet" "kubeadm") + declare -a CONTAINERS_TO_TEST=("kube-apiserver" "kube-controller-manager" "kube-proxy" "kube-scheduler") + CONTAINER_EXT="tar" + echo "* testing CI version $$CI_VERSION" + # Check for semver + if [[ "$${CI_VERSION}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + CI_URL="https://storage.googleapis.com/k8s-release-dev/ci/$${CI_VERSION}/bin/linux/amd64" + VERSION_WITHOUT_PREFIX="${CI_VERSION#v}" + DEBIAN_FRONTEND=noninteractive apt-get install -y apt-transport-https curl + curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - + echo 'deb https://apt.kubernetes.io/ kubernetes-xenial main' > /etc/apt/sources.list.d/kubernetes.list + apt-get update + # replace . with \. + VERSION_REGEX="${VERSION_WITHOUT_PREFIX//./\\.}" + PACKAGE_VERSION="$(apt-cache madison kubelet|grep $${VERSION_REGEX}- | head -n1 | cut -d '|' -f 2 | tr -d '[:space:]')" + for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do + echo "* installing package: $$CI_PACKAGE $${PACKAGE_VERSION}" + DEBIAN_FRONTEND=noninteractive apt-get install -y $$CI_PACKAGE=$$PACKAGE_VERSION + done + else + CI_URL="https://storage.googleapis.com/k8s-release-dev/ci/$${CI_VERSION}/bin/linux/amd64" + fi + for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do + echo "* downloading binary: $$CI_URL/$$CI_PACKAGE" + wget "$$CI_URL/$$CI_PACKAGE" -O "$$CI_DIR/$$CI_PACKAGE" + chmod +x "$$CI_DIR/$$CI_PACKAGE" + mv "$$CI_DIR/$$CI_PACKAGE" "/usr/bin/$$CI_PACKAGE" + done + systemctl restart kubelet + fi + for CI_CONTAINER in "$${CONTAINERS_TO_TEST[@]}"; do + echo "* downloading package: $$CI_URL/$$CI_CONTAINER.$$CONTAINER_EXT" + wget "$$CI_URL/$$CI_CONTAINER.$$CONTAINER_EXT" -O "$$CI_DIR/$$CI_CONTAINER.$$CONTAINER_EXT" + $${SUDO} ctr -n k8s.io images import "$$CI_DIR/$$CI_CONTAINER.$$CONTAINER_EXT" || echo "* ignoring expected 'ctr images import' result" + $${SUDO} ctr -n k8s.io images tag k8s.gcr.io/$$CI_CONTAINER-amd64:"$${CI_VERSION//+/_}" k8s.gcr.io/$$CI_CONTAINER:"$${CI_VERSION//+/_}" + $${SUDO} ctr -n k8s.io images tag k8s.gcr.io/$$CI_CONTAINER-amd64:"$${CI_VERSION//+/_}" gcr.io/k8s-staging-ci-images/$$CI_CONTAINER:"$${CI_VERSION//+/_}" + done + fi + echo "* checking binary versions" + echo "ctr version: " $(ctr version) + echo "kubeadm version: " $(kubeadm version -o=short) + echo "kubectl version: " $(kubectl version --client=true --short=true) + echo "kubelet version: " $(kubelet --version) + echo "$$LINE_SEPARATOR" + path: /tmp/kubeadm-bootstrap.sh + owner: "root:root" + permissions: "0744" +- op: add + path: /spec/kubeadmConfigSpec/preKubeadmCommands/- + value: + bash -c /tmp/kubeadm-bootstrap.sh +- op: add + path: /spec/kubeadmConfigSpec/clusterConfiguration/kubernetesVersion + value: + "ci/${CI_VERSION}" +- op: add + path: /spec/kubeadmConfigSpec/useExperimentalRetryJoin + value: true diff --git a/templates/test/ci/prow-ci-version-windows/kustomization.yaml b/templates/test/ci/prow-ci-version-windows/kustomization.yaml new file mode 100644 index 00000000000..131831af143 --- /dev/null +++ b/templates/test/ci/prow-ci-version-windows/kustomization.yaml @@ -0,0 +1,32 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: default +resources: + - ../prow-windows +patchesStrategicMerge: + - patches/windows-image-update.yaml + - ../patches/control-plane-image-ci-version.yaml + - ../patches/controller-manager.yaml + - ../prow-ci-version/patches/machine-deployment-ci-version.yaml +patches: +- target: + group: bootstrap.cluster.x-k8s.io + version: v1alpha4 + kind: KubeadmConfigTemplate + name: .*-md-win + namespace: default + path: patches/machine-deployment-ci-version.yaml +- target: + group: bootstrap.cluster.x-k8s.io + version: v1alpha4 + kind: KubeadmConfigTemplate + name: .*-md-0 + namespace: default + path: ../prow-ci-version/patches/kubeadm-bootstrap.yaml +- target: + group: controlplane.cluster.x-k8s.io + version: v1alpha4 + kind: KubeadmControlPlane + name: .*-control-plane + namespace: default + path: ../patches/control-plane-kubeadm-boostrap-ci-version.yaml diff --git a/templates/test/ci/prow-ci-version-windows/patches/machine-deployment-ci-version.yaml b/templates/test/ci/prow-ci-version-windows/patches/machine-deployment-ci-version.yaml new file mode 100644 index 00000000000..961cbd27ff1 --- /dev/null +++ b/templates/test/ci/prow-ci-version-windows/patches/machine-deployment-ci-version.yaml @@ -0,0 +1,32 @@ +- op: add + path: /spec/template/spec/files/- + value: + content: | + Stop-Service kubelet -Force + + $$CI_VERSION="${CI_VERSION}" + if($$CI_VERSION -ne "") + { + $$binaries=@("kubeadm", "kubectl", "kubelet", "kube-proxy") + $$ci_url="https://storage.googleapis.com/k8s-release-dev/ci/$$CI_VERSION/bin/windows/amd64" + foreach ( $$binary in $$binaries ) + { + echo "downloading binary: $$ci_url/$$binary.exe" + curl.exe --retry 10 --retry-delay 5 "$$ci_url/$$binary.exe" --output "c:/k/$$binary.exe" + } + } + + # We are using a VHD that maps to v1.18.19 so the kubeproxy image is already pulled. (pull it just in case) + # Tag it to the ci version. The image knows how to use the copy locally. + docker pull sigwindowstools/kube-proxy:v1.18.19-nanoserver + docker tag sigwindowstools/kube-proxy:v1.18.19-nanoserver "sigwindowstools/kube-proxy:${CI_VERSION/+/_}-nanoserver" + + kubeadm.exe version -o=short + kubectl.exe version --client=true --short=true + kubelet.exe --version + path: C:/replace-k8s-binaries.ps1 + permissions: "0744" +- op: add + path: /spec/template/spec/preKubeadmCommands/- + value: + powershell C:/replace-k8s-binaries.ps1 diff --git a/templates/test/ci/prow-ci-version-windows/patches/windows-image-update.yaml b/templates/test/ci/prow-ci-version-windows/patches/windows-image-update.yaml new file mode 100644 index 00000000000..a2af9e6eec7 --- /dev/null +++ b/templates/test/ci/prow-ci-version-windows/patches/windows-image-update.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 +kind: AzureMachineTemplate +metadata: + name: "${CLUSTER_NAME}-md-win" +spec: + template: + spec: + image: + # we use the 1.18.19 image as a workaround there is no published marketplace image for k8s CI versions. + # 1.18.19 binaries and images will get replaced to the desired version by the script above. + marketplace: + publisher: cncf-upstream + offer: capi-windows + sku: k8s-1dot18dot19-windows-2019 + version: "2021.05.17" diff --git a/templates/test/ci/prow-ci-version/kustomization.yaml b/templates/test/ci/prow-ci-version/kustomization.yaml index a06c72e70be..d32f985a384 100644 --- a/templates/test/ci/prow-ci-version/kustomization.yaml +++ b/templates/test/ci/prow-ci-version/kustomization.yaml @@ -4,15 +4,20 @@ namespace: default resources: - ../prow patchesStrategicMerge: - - ../patches/control-plane-ci-version.yaml - - ../patches/controller-manager.yaml - - patches/machine-deployment-ci-version.yaml -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: default -resources: - - ../prow -patchesStrategicMerge: - - ../patches/control-plane-ci-version.yaml + - ../patches/control-plane-image-ci-version.yaml - ../patches/controller-manager.yaml - patches/machine-deployment-ci-version.yaml +patches: +- target: + group: bootstrap.cluster.x-k8s.io + version: v1alpha4 + kind: KubeadmConfigTemplate + name: .*-md-0 + namespace: default + path: patches/kubeadm-bootstrap.yaml +- target: + group: controlplane.cluster.x-k8s.io + version: v1alpha4 + kind: KubeadmControlPlane + name: .*-control-plane + path: ../patches/control-plane-kubeadm-boostrap-ci-version.yaml diff --git a/templates/test/ci/prow-ci-version/patches/kubeadm-bootstrap.yaml b/templates/test/ci/prow-ci-version/patches/kubeadm-bootstrap.yaml new file mode 100644 index 00000000000..03c55e9301d --- /dev/null +++ b/templates/test/ci/prow-ci-version/patches/kubeadm-bootstrap.yaml @@ -0,0 +1,72 @@ +- op: add + path: /spec/template/spec/files/- + value: + content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO="" + + # This test installs release packages or binaries that are a result of the CI and release builds. + # It runs '... --version' commands to verify that the binaries are correctly installed + # and finally uninstalls the packages. + # For the release packages it tests all versions in the support skew. + LINE_SEPARATOR="*************************************************" + echo "$$LINE_SEPARATOR" + CI_VERSION=${CI_VERSION} + if [[ "$${CI_VERSION}" != "" ]]; then + CI_DIR=/tmp/k8s-ci + mkdir -p $$CI_DIR + declare -a PACKAGES_TO_TEST=("kubectl" "kubelet" "kubeadm") + declare -a CONTAINERS_TO_TEST=("kube-apiserver" "kube-controller-manager" "kube-proxy" "kube-scheduler") + CONTAINER_EXT="tar" + echo "* testing CI version $$CI_VERSION" + # Check for semver + if [[ "$${CI_VERSION}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + CI_URL="https://storage.googleapis.com/k8s-release-dev/ci/$${CI_VERSION}/bin/linux/amd64" + VERSION_WITHOUT_PREFIX="${CI_VERSION#v}" + DEBIAN_FRONTEND=noninteractive apt-get install -y apt-transport-https curl + curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - + echo 'deb https://apt.kubernetes.io/ kubernetes-xenial main' > /etc/apt/sources.list.d/kubernetes.list + apt-get update + # replace . with \. + VERSION_REGEX="${VERSION_WITHOUT_PREFIX//./\\.}" + PACKAGE_VERSION="$(apt-cache madison kubelet|grep $${VERSION_REGEX}- | head -n1 | cut -d '|' -f 2 | tr -d '[:space:]')" + for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do + echo "* installing package: $$CI_PACKAGE $${PACKAGE_VERSION}" + DEBIAN_FRONTEND=noninteractive apt-get install -y $$CI_PACKAGE=$$PACKAGE_VERSION + done + else + CI_URL="https://storage.googleapis.com/k8s-release-dev/ci/$${CI_VERSION}/bin/linux/amd64" + fi + for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do + echo "* downloading binary: $$CI_URL/$$CI_PACKAGE" + wget "$$CI_URL/$$CI_PACKAGE" -O "$$CI_DIR/$$CI_PACKAGE" + chmod +x "$$CI_DIR/$$CI_PACKAGE" + mv "$$CI_DIR/$$CI_PACKAGE" "/usr/bin/$$CI_PACKAGE" + done + systemctl restart kubelet + fi + for CI_CONTAINER in "$${CONTAINERS_TO_TEST[@]}"; do + echo "* downloading package: $$CI_URL/$$CI_CONTAINER.$$CONTAINER_EXT" + wget "$$CI_URL/$$CI_CONTAINER.$$CONTAINER_EXT" -O "$$CI_DIR/$$CI_CONTAINER.$$CONTAINER_EXT" + $${SUDO} ctr -n k8s.io images import "$$CI_DIR/$$CI_CONTAINER.$$CONTAINER_EXT" || echo "* ignoring expected 'ctr images import' result" + $${SUDO} ctr -n k8s.io images tag k8s.gcr.io/$$CI_CONTAINER-amd64:"$${CI_VERSION//+/_}" k8s.gcr.io/$$CI_CONTAINER:"$${CI_VERSION//+/_}" + $${SUDO} ctr -n k8s.io images tag k8s.gcr.io/$$CI_CONTAINER-amd64:"$${CI_VERSION//+/_}" gcr.io/k8s-staging-ci-images/$$CI_CONTAINER:"$${CI_VERSION//+/_}" + done + fi + echo "* checking binary versions" + echo "ctr version: " $(ctr version) + echo "kubeadm version: " $(kubeadm version -o=short) + echo "kubectl version: " $(kubectl version --client=true --short=true) + echo "kubelet version: " $(kubelet --version) + echo "$$LINE_SEPARATOR" + path: /tmp/kubeadm-bootstrap.sh + owner: "root:root" + permissions: "0744" +- op: add + path: /spec/template/spec/preKubeadmCommands/- + value: + bash -c /tmp/kubeadm-bootstrap.sh \ No newline at end of file diff --git a/templates/test/ci/prow-ci-version/patches/machine-deployment-ci-version.yaml b/templates/test/ci/prow-ci-version/patches/machine-deployment-ci-version.yaml index 7439bc441f7..ef406774adf 100644 --- a/templates/test/ci/prow-ci-version/patches/machine-deployment-ci-version.yaml +++ b/templates/test/ci/prow-ci-version/patches/machine-deployment-ci-version.yaml @@ -1,86 +1,3 @@ -apiVersion: bootstrap.cluster.x-k8s.io/v1alpha4 -kind: KubeadmConfigTemplate -metadata: - name: ${CLUSTER_NAME}-md-0 -spec: - template: - spec: - preKubeadmCommands: - - bash -c /tmp/kubeadm-bootstrap.sh - files: - - path: /tmp/kubeadm-bootstrap.sh - owner: "root:root" - permissions: "0744" - content: | - #!/bin/bash - - set -o nounset - set -o pipefail - set -o errexit - [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO="" - - # This test installs release packages or binaries that are a result of the CI and release builds. - # It runs '... --version' commands to verify that the binaries are correctly installed - # and finally uninstalls the packages. - # For the release packages it tests all versions in the support skew. - LINE_SEPARATOR="*************************************************" - echo "$$LINE_SEPARATOR" - CI_VERSION=${CI_VERSION} - if [[ "$${CI_VERSION}" != "" ]]; then - CI_DIR=/tmp/k8s-ci - mkdir -p $$CI_DIR - declare -a PACKAGES_TO_TEST=("kubectl" "kubelet" "kubeadm") - declare -a CONTAINERS_TO_TEST=("kube-apiserver" "kube-controller-manager" "kube-proxy" "kube-scheduler") - CONTAINER_EXT="tar" - echo "* testing CI version $$CI_VERSION" - # Check for semver - if [[ "$${CI_VERSION}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then - CI_URL="https://storage.googleapis.com/k8s-release-dev/ci/$${CI_VERSION}/bin/linux/amd64" - VERSION_WITHOUT_PREFIX="${CI_VERSION#v}" - DEBIAN_FRONTEND=noninteractive apt-get install -y apt-transport-https curl - curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - - echo 'deb https://apt.kubernetes.io/ kubernetes-xenial main' > /etc/apt/sources.list.d/kubernetes.list - apt-get update - # replace . with \. - VERSION_REGEX="${VERSION_WITHOUT_PREFIX//./\\.}" - PACKAGE_VERSION="$(apt-cache madison kubelet|grep $${VERSION_REGEX}- | head -n1 | cut -d '|' -f 2 | tr -d '[:space:]')" - for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do - echo "* installing package: $$CI_PACKAGE $${PACKAGE_VERSION}" - DEBIAN_FRONTEND=noninteractive apt-get install -y $$CI_PACKAGE=$$PACKAGE_VERSION - done - else - CI_URL="https://storage.googleapis.com/k8s-release-dev/ci/$${CI_VERSION}/bin/linux/amd64" - fi - for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do - echo "* downloading binary: $$CI_URL/$$CI_PACKAGE" - wget "$$CI_URL/$$CI_PACKAGE" -O "$$CI_DIR/$$CI_PACKAGE" - chmod +x "$$CI_DIR/$$CI_PACKAGE" - mv "$$CI_DIR/$$CI_PACKAGE" "/usr/bin/$$CI_PACKAGE" - done - systemctl restart kubelet - fi - for CI_CONTAINER in "$${CONTAINERS_TO_TEST[@]}"; do - echo "* downloading package: $$CI_URL/$$CI_CONTAINER.$$CONTAINER_EXT" - wget "$$CI_URL/$$CI_CONTAINER.$$CONTAINER_EXT" -O "$$CI_DIR/$$CI_CONTAINER.$$CONTAINER_EXT" - $${SUDO} ctr -n k8s.io images import "$$CI_DIR/$$CI_CONTAINER.$$CONTAINER_EXT" || echo "* ignoring expected 'ctr images import' result" - $${SUDO} ctr -n k8s.io images tag k8s.gcr.io/$$CI_CONTAINER-amd64:"$${CI_VERSION//+/_}" k8s.gcr.io/$$CI_CONTAINER:"$${CI_VERSION//+/_}" - $${SUDO} ctr -n k8s.io images tag k8s.gcr.io/$$CI_CONTAINER-amd64:"$${CI_VERSION//+/_}" gcr.io/k8s-staging-ci-images/$$CI_CONTAINER:"$${CI_VERSION//+/_}" - done - fi - echo "* checking binary versions" - echo "ctr version: " $(ctr version) - echo "kubeadm version: " $(kubeadm version -o=short) - echo "kubectl version: " $(kubectl version --client=true --short=true) - echo "kubelet version: " $(kubelet --version) - echo "$$LINE_SEPARATOR" - - path: /etc/kubernetes/azure.json - owner: "root:root" - permissions: "0644" - contentFrom: - secret: - key: worker-node-azure.json - name: ${CLUSTER_NAME}-md-0-azure-json ---- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 kind: AzureMachineTemplate metadata: diff --git a/templates/test/ci/prow-machine-pool-ci-version/kustomization.yaml b/templates/test/ci/prow-machine-pool-ci-version/kustomization.yaml index b2748941001..bbfc46d0732 100644 --- a/templates/test/ci/prow-machine-pool-ci-version/kustomization.yaml +++ b/templates/test/ci/prow-machine-pool-ci-version/kustomization.yaml @@ -4,6 +4,14 @@ namespace: default resources: - ../prow-machine-pool patchesStrategicMerge: - - ../patches/control-plane-ci-version.yaml + - ../patches/control-plane-image-ci-version.yaml - ../patches/controller-manager.yaml - patches/machine-pool-ci-version.yaml +patches: +- target: + group: controlplane.cluster.x-k8s.io + version: v1alpha4 + kind: KubeadmControlPlane + name: .*-control-plane + namespace: default + path: ../patches/control-plane-kubeadm-boostrap-ci-version.yaml diff --git a/templates/test/dev/cluster-template-custom-builds-windows.yaml b/templates/test/dev/cluster-template-custom-builds-windows.yaml new file mode 100644 index 00000000000..c043a81c240 --- /dev/null +++ b/templates/test/dev/cluster-template-custom-builds-windows.yaml @@ -0,0 +1,1362 @@ +apiVersion: cluster.x-k8s.io/v1alpha4 +kind: Cluster +metadata: + labels: + cni: ${CLUSTER_NAME}-flannel + name: ${CLUSTER_NAME} + namespace: default +spec: + clusterNetwork: + pods: + cidrBlocks: + - 10.244.0.0/16 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1alpha4 + kind: KubeadmControlPlane + name: ${CLUSTER_NAME}-control-plane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 + kind: AzureCluster + name: ${CLUSTER_NAME} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 +kind: AzureCluster +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + additionalTags: + buildProvenance: ${BUILD_PROVENANCE} + creationTimestamp: ${TIMESTAMP} + jobName: ${JOB_NAME} + identityRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 + kind: AzureClusterIdentity + name: ${CLUSTER_IDENTITY_NAME} + location: ${AZURE_LOCATION} + networkSpec: + vnet: + name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} + resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} + subscriptionID: ${AZURE_SUBSCRIPTION_ID} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1alpha4 +kind: KubeadmControlPlane +metadata: + annotations: + controlplane.cluster.x-k8s.io/skip-kube-proxy: "true" + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + extraVolumes: + - hostPath: /etc/kubernetes/azure.json + mountPath: /etc/kubernetes/azure.json + name: cloud-config + readOnly: true + timeoutForControlPlane: 20m + controllerManager: + extraArgs: + allocate-node-cidrs: "true" + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + cluster-name: ${CLUSTER_NAME} + configure-cloud-routes: "false" + v: "4" + extraVolumes: + - hostPath: /etc/kubernetes/azure.json + mountPath: /etc/kubernetes/azure.json + name: cloud-config + readOnly: true + etcd: + local: + dataDir: /var/lib/etcddisk/etcd + kubernetesVersion: ci/${CI_VERSION} + diskSetup: + filesystems: + - device: /dev/disk/azure/scsi1/lun0 + extraOpts: + - -E + - lazy_itable_init=1,lazy_journal_init=1 + filesystem: ext4 + label: etcd_disk + - device: ephemeral0.1 + filesystem: ext4 + label: ephemeral0 + replaceFS: ntfs + partitions: + - device: /dev/disk/azure/scsi1/lun0 + layout: true + overwrite: false + tableType: gpt + files: + - content: | + network: + version: 2 + ethernets: + eth0: + mtu: 1400 + match: + macaddress: MACADDRESS + set-name: eth0 + owner: root:root + path: /etc/netplan/60-eth0.yaml + permissions: "0644" + - contentFrom: + secret: + key: control-plane-azure.json + name: ${CLUSTER_NAME}-control-plane-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + - content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + + systemctl stop kubelet + declare -a BINARIES=("kubeadm" "kubectl" "kubelet") + for BINARY in "$${BINARIES[@]}"; do + echo "* installing package: $${BINARY} ${KUBE_GIT_VERSION}" + curl --retry 10 --retry-delay 5 "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${JOB_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" --output "/usr/bin/$${BINARY}" + done + systemctl restart kubelet + + # prepull images from gcr.io/k8s-staging-ci-images and retag it to + # k8s.gcr.io so kubeadm can fetch correct images no matter what + declare -a IMAGES=("kube-apiserver" "kube-controller-manager" "kube-proxy" "kube-scheduler") + [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO="" + for IMAGE in "$${IMAGES[@]}"; do + $${SUDO} ctr -n k8s.io images pull "gcr.io/k8s-staging-ci-images/$${IMAGE}:${CI_VERSION/+/_}" + $${SUDO} ctr -n k8s.io images tag "gcr.io/k8s-staging-ci-images/$${IMAGE}:${CI_VERSION/+/_}" "k8s.gcr.io/$${IMAGE}:${CI_VERSION/+/_}" + done + + echo "kubeadm version: $(kubeadm version -o=short)" + echo "kubectl version: $(kubectl version --client=true --short=true)" + echo "kubelet version: $(kubelet --version)" + owner: root:root + path: /tmp/replace-k8s-binaries.sh + permissions: "0744" + - content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + + curl -L --retry 10 --retry-delay 5 https://github.com/mikefarah/yq/releases/download/v4.6.1/yq_linux_amd64.tar.gz --output /tmp/yq_linux_amd64.tar.gz + tar -xzvf /tmp/yq_linux_amd64.tar.gz -C /tmp && mv /tmp/yq_linux_amd64 /usr/bin/yq + rm /tmp/yq_linux_amd64.tar.gz + + export KUBECONFIG=/etc/kubernetes/admin.conf + kubectl -n kube-system set image daemonset/kube-proxy kube-proxy="${REGISTRY}/kube-proxy:${IMAGE_TAG}" + yq e '.spec.containers[0].image = "${REGISTRY}/kube-apiserver:${IMAGE_TAG}"' -i /etc/kubernetes/manifests/kube-apiserver.yaml + yq e '.spec.containers[0].image = "${REGISTRY}/kube-controller-manager:${IMAGE_TAG}"' -i /etc/kubernetes/manifests/kube-controller-manager.yaml + yq e '.spec.containers[0].image = "${REGISTRY}/kube-scheduler:${IMAGE_TAG}"' -i /etc/kubernetes/manifests/kube-scheduler.yaml + owner: root:root + path: /tmp/replace-k8s-components.sh + permissions: "0744" + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + azure-container-registry-config: /etc/kubernetes/azure.json + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + name: '{{ ds.meta_data["local_hostname"] }}' + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + azure-container-registry-config: /etc/kubernetes/azure.json + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + name: '{{ ds.meta_data["local_hostname"] }}' + mounts: + - - LABEL=etcd_disk + - /var/lib/etcddisk + postKubeadmCommands: + - mac=$(ip -o link | grep eth0 | grep ether | awk '{ print $17 }') + - sed -i -e "s/MACADDRESS/$${mac}/g" /etc/netplan/60-eth0.yaml + - netplan apply + - bash -c /tmp/replace-k8s-components.sh + preKubeadmCommands: + - bash -c /tmp/replace-k8s-binaries.sh + useExperimentalRetryJoin: true + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-control-plane + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + template: + spec: + dataDisks: + - diskSizeGB: 256 + lun: 0 + nameSuffix: etcddisk + image: + marketplace: + offer: capi + publisher: cncf-upstream + sku: k8s-1dot18dot8-ubuntu-1804 + version: 2020.08.17 + osDisk: + diskSizeGB: 128 + osType: Linux + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + vmSize: ${AZURE_CONTROL_PLANE_MACHINE_TYPE} +--- +apiVersion: cluster.x-k8s.io/v1alpha4 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${LINUX_WORKER_MACHINE_COUNT:-1} + selector: + matchLabels: null + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha4 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-md-0 + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-md-0 + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + template: + spec: + image: + marketplace: + offer: capi + publisher: cncf-upstream + sku: k8s-1dot18dot8-ubuntu-1804 + version: 2020.08.17 + osDisk: + diskSizeGB: 128 + managedDisk: + storageAccountType: Premium_LRS + osType: Linux + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + vmSize: ${AZURE_NODE_MACHINE_TYPE} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha4 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + template: + spec: + files: + - contentFrom: + secret: + key: worker-node-azure.json + name: ${CLUSTER_NAME}-md-0-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + - content: | + network: + version: 2 + ethernets: + eth0: + mtu: 1400 + match: + macaddress: MACADDRESS + set-name: eth0 + owner: root:root + path: /etc/netplan/60-eth0.yaml + permissions: "0644" + - content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + + systemctl stop kubelet + declare -a BINARIES=("kubeadm" "kubectl" "kubelet") + for BINARY in "$${BINARIES[@]}"; do + echo "* installing package: $${BINARY} ${KUBE_GIT_VERSION}" + curl --retry 10 --retry-delay 5 "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${JOB_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" --output "/usr/bin/$${BINARY}" + done + systemctl restart kubelet + + echo "kubeadm version: $(kubeadm version -o=short)" + echo "kubectl version: $(kubectl version --client=true --short=true)" + echo "kubelet version: $(kubelet --version)" + owner: root:root + path: /tmp/replace-k8s-binaries.sh + permissions: "0744" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + azure-container-registry-config: /etc/kubernetes/azure.json + cloud-config: /etc/kubernetes/azure.json + cloud-provider: azure + name: '{{ ds.meta_data["local_hostname"] }}' + postKubeadmCommands: + - mac=$(ip -o link | grep eth0 | grep ether | awk '{ print $17 }') + - sed -i -e "s/MACADDRESS/$${mac}/g" /etc/netplan/60-eth0.yaml + - netplan apply + preKubeadmCommands: + - bash -c /tmp/replace-k8s-binaries.sh + useExperimentalRetryJoin: true +--- +apiVersion: cluster.x-k8s.io/v1alpha4 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-md-win + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: null + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha4 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-md-win + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-md-win + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-md-win + namespace: default +spec: + template: + spec: + image: + marketplace: + offer: capi-windows + publisher: cncf-upstream + sku: k8s-1dot18dot19-windows-2019 + version: 2021.05.17 + osDisk: + diskSizeGB: 128 + managedDisk: + storageAccountType: Premium_LRS + osType: Windows + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + vmSize: ${AZURE_NODE_MACHINE_TYPE} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha4 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-md-win + namespace: default +spec: + template: + spec: + files: + - contentFrom: + secret: + key: worker-node-azure.json + name: ${CLUSTER_NAME}-md-win-azure-json + owner: root:root + path: c:/k/azure.json + permissions: "0644" + - content: | + # required as a work around for Flannel and Wins bugs + # https://github.com/coreos/flannel/issues/1359 + # https://github.com/kubernetes-sigs/sig-windows-tools/issues/103#issuecomment-709426828 + ipmo C:\k\debug\hns.psm1; + New-HnsNetwork -Type Overlay -AddressPrefix "192.168.255.0/30" -Gateway "192.168.255.1" -Name "External" -AdapterName "Ethernet 2" -SubnetPolicies @(@{Type = "VSID"; VSID = 9999; }) + path: C:/create-external-network.ps1 + permissions: "0744" + - content: | + # /tmp is assumed created and required for upstream e2e tests to pass + New-Item -ItemType Directory -Force -Path C:\tmp\ + path: C:/create-temp-folder.ps1 + permissions: "0744" + - content: | + Stop-Service kubelet -Force + + $$binaries=@("kubeadm", "kubectl", "kubelet", "kube-proxy") + $$ci_url="https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${JOB_NAME}/${KUBE_GIT_VERSION}/bin/windows/amd64" + foreach ( $$binary in $$binaries ) + { + echo "installing package: $$binary ${KUBE_GIT_VERSION}" + curl.exe --retry 10 --retry-delay 5 "$$ci_url/$$binary.exe" --output "c:/k/$$binary.exe" + } + + # We are using a VHD that maps to v1.18.19 so the kubeproxy image is already pulled. (pull it just in case) + # Tag it to the ci_version which is the version set when kicking off the CI builds and doesn't match the KUBE_GIT_VERSION + # but matches the kubeproxy image tag when it gets generated. The image configuraiton knows how to use the binary locally. + # This does mean the image tage will not match the verison of the binary running. + # See: + # https://github.com/kubernetes-sigs/cluster-api-provider-azure/blob/529dbb507962a52ee9fd5a56f3d3856b9bcc53c1/templates/addons/windows/kube-proxy-windows.yaml#L60 + # https://github.com/kubernetes-sigs/cluster-api-provider-azure/blob/529dbb507962a52ee9fd5a56f3d3856b9bcc53c1/scripts/ci-build-kubernetes.sh#L54-L59 + docker pull sigwindowstools/kube-proxy:v1.18.19-nanoserver + docker tag sigwindowstools/kube-proxy:v1.18.19-nanoserver "sigwindowstools/kube-proxy:${CI_VERSION/+/_}-nanoserver" + + kubeadm.exe version -o=short + kubectl.exe version --client=true --short=true + kubelet.exe --version + kube-proxy.exe --version + path: C:/replace-k8s-binaries.ps1 + permissions: "0744" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + azure-container-registry-config: c:/k/azure.json + cloud-config: c:/k/azure.json + cloud-provider: azure + pod-infra-container-image: mcr.microsoft.com/oss/kubernetes/pause:1.4.1 + name: '{{ ds.meta_data["local_hostname"] }}' + postKubeadmCommands: + - nssm set kubelet start SERVICE_AUTO_START + preKubeadmCommands: + - powershell c:/create-external-network.ps1 + - powershell C:/create-temp-folder.ps1 + - powershell C:/replace-k8s-binaries.ps1 + users: + - groups: Administrators + name: capi + sshAuthorizedKeys: + - ${AZURE_SSH_PUBLIC_KEY:=""} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 +kind: AzureClusterIdentity +metadata: + labels: + clusterctl.cluster.x-k8s.io/move-hierarchy: "true" + name: ${CLUSTER_IDENTITY_NAME} + namespace: default +spec: + allowedNamespaces: {} + clientID: ${AZURE_CLIENT_ID} + clientSecret: + name: ${AZURE_CLUSTER_IDENTITY_SECRET_NAME} + namespace: ${AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE} + tenantID: ${AZURE_TENANT_ID} + type: ServicePrincipal +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha4 +kind: ClusterResourceSet +metadata: + name: ${CLUSTER_NAME}-flannel + namespace: default +spec: + clusterSelector: + matchLabels: + cni: ${CLUSTER_NAME}-flannel + resources: + - kind: ConfigMap + name: cni-${CLUSTER_NAME}-flannel + strategy: ApplyOnce +--- +apiVersion: v1 +data: + cni: |+ + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: psp.flannel.unprivileged + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default + seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default + apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default + apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default + spec: + privileged: false + volumes: + - configMap + - secret + - emptyDir + - hostPath + allowedHostPaths: + - pathPrefix: "/etc/cni/net.d" + - pathPrefix: "/etc/kube-flannel" + - pathPrefix: "/run/flannel" + readOnlyRootFilesystem: false + # Users and groups + runAsUser: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + fsGroup: + rule: RunAsAny + # Privilege Escalation + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + # Capabilities + allowedCapabilities: ['NET_ADMIN'] + defaultAddCapabilities: [] + requiredDropCapabilities: [] + # Host namespaces + hostPID: false + hostIPC: false + hostNetwork: true + hostPorts: + - min: 0 + max: 65535 + # SELinux + seLinux: + # SELinux is unused in CaaSP + rule: 'RunAsAny' + --- + kind: ClusterRole + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: flannel + rules: + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: ['psp.flannel.unprivileged'] + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + --- + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: flannel + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel + subjects: + - kind: ServiceAccount + name: flannel + namespace: kube-system + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: flannel + namespace: kube-system + --- + kind: ConfigMap + apiVersion: v1 + metadata: + name: kube-flannel-cfg + namespace: kube-system + labels: + tier: node + app: flannel + data: + cni-conf.json: | + { + "name": "cbr0", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + net-conf.json: | + { + "Network": "10.244.0.0/16", + "Backend": { + "Type": "vxlan", + "VNI" : 4096, + "Port": 4789 + } + } + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: kube-flannel-ds-amd64 + namespace: kube-system + labels: + tier: node + app: flannel + spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - key: beta.kubernetes.io/arch + operator: In + values: + - amd64 + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.12.0-amd64 + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.12.0-amd64 + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: kube-flannel-ds-arm64 + namespace: kube-system + labels: + tier: node + app: flannel + spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - key: beta.kubernetes.io/arch + operator: In + values: + - arm64 + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.12.0-arm64 + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.12.0-arm64 + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: kube-flannel-ds-arm + namespace: kube-system + labels: + tier: node + app: flannel + spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - key: beta.kubernetes.io/arch + operator: In + values: + - arm + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.12.0-arm + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.12.0-arm + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: kube-flannel-ds-ppc64le + namespace: kube-system + labels: + tier: node + app: flannel + spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - key: beta.kubernetes.io/arch + operator: In + values: + - ppc64le + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.12.0-ppc64le + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.12.0-ppc64le + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: kube-flannel-ds-s390x + namespace: kube-system + labels: + tier: node + app: flannel + spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/os + operator: In + values: + - linux + - key: beta.kubernetes.io/arch + operator: In + values: + - s390x + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.12.0-s390x + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: quay.io/coreos/flannel:v0.12.0-s390x + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + --- + kind: ConfigMap + apiVersion: v1 + metadata: + name: kube-flannel-windows-cfg + namespace: kube-system + labels: + tier: node + app: flannel + data: + run.ps1: | + $ErrorActionPreference = "Stop"; + + mkdir -force /host/etc/cni/net.d + mkdir -force /host/etc/kube-flannel + mkdir -force /host/opt/cni/bin + mkdir -force /host/k/flannel + mkdir -force /host/k/flannel/var/run/secrets/kubernetes.io/serviceaccount + + $cniJson = get-content /etc/kube-flannel-windows/cni-conf.json | ConvertFrom-Json + $serviceSubnet = yq r /etc/kubeadm-config/ClusterConfiguration networking.serviceSubnet + $podSubnet = yq r /etc/kubeadm-config/ClusterConfiguration networking.podSubnet + $networkJson = wins cli net get | convertfrom-json + + $cniJson.delegate.policies[0].Value.ExceptionList = $serviceSubnet, $podSubnet + $cniJson.delegate.policies[1].Value.DestinationPrefix = $serviceSubnet + Set-Content -Path /host/etc/cni/net.d/10-flannel.conf ($cniJson | ConvertTo-Json -depth 100) + + cp -force /etc/kube-flannel/net-conf.json /host/etc/kube-flannel + cp -force -recurse /cni/* /host/opt/cni/bin + cp -force /k/flannel/* /host/k/flannel/ + cp -force /kube-proxy/kubeconfig.conf /host/k/flannel/kubeconfig.yml + cp -force /var/run/secrets/kubernetes.io/serviceaccount/* /host/k/flannel/var/run/secrets/kubernetes.io/serviceaccount/ + wins cli process run --path /k/flannel/setup.exe --args "--mode=overlay --interface=Ethernet 2" + wins cli route add --addresses 169.254.169.254 + wins cli process run --path /k/flannel/flanneld.exe --args "--kube-subnet-mgr --kubeconfig-file /k/flannel/kubeconfig.yml" --envs "POD_NAME=$env:POD_NAME POD_NAMESPACE=$env:POD_NAMESPACE" + cni-conf.json: | + { + "name": "flannel.4096", + "cniVersion": "0.3.0", + "type": "flannel", + "capabilities": { + "dns": true + }, + "delegate": { + "type": "win-overlay", + "policies": [ + { + "Name": "EndpointPolicy", + "Value": { + "Type": "OutBoundNAT", + "ExceptionList": [] + } + }, + { + "Name": "EndpointPolicy", + "Value": { + "Type": "ROUTE", + "DestinationPrefix": "", + "NeedEncap": true + } + } + ] + } + } + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: kube-flannel-ds-windows-amd64 + labels: + tier: node + app: flannel + namespace: kube-system + spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - windows + - key: kubernetes.io/arch + operator: In + values: + - amd64 + hostNetwork: true + serviceAccountName: flannel + tolerations: + - operator: Exists + effect: NoSchedule + containers: + - name: kube-flannel + image: sigwindowstools/flannel:v0.13.0-nanoserver + command: + - pwsh + args: + - -file + - /etc/kube-flannel-windows/run.ps1 + volumeMounts: + - name: wins + mountPath: \\.\pipe\rancher_wins + - name: host + mountPath: /host + - name: kube-proxy + mountPath: /kube-proxy + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + - name: flannel-windows-cfg + mountPath: /etc/kube-flannel-windows/ + - name: kubeadm-config + mountPath: /etc/kubeadm-config/ + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + volumes: + - name: wins + hostPath: + path: \\.\pipe\rancher_wins + type: null + - name: opt + hostPath: + path: /opt + - name: host + hostPath: + path: / + - name: cni + hostPath: + path: /etc + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + - name: flannel-windows-cfg + configMap: + name: kube-flannel-windows-cfg + - name: kube-proxy + configMap: + name: kube-proxy + - name: kubeadm-config + configMap: + name: kubeadm-config + + proxy: | + apiVersion: v1 + data: + run-script.ps1: |- + $ErrorActionPreference = "Stop"; + mkdir -force /host/var/lib/kube-proxy/var/run/secrets/kubernetes.io/serviceaccount + mkdir -force /host/k/kube-proxy + + $$CI_VERSION="${CI_VERSION:-}" + if($$CI_VERSION -ne "" -And (Test-Path -Path "/host/k/kube-proxy.exe")) + { + cp -force /host/k/kube-proxy.exe /k/kube-proxy/kube-proxy.exe + } + + cp -force /k/kube-proxy/* /host/k/kube-proxy + cp -force /var/lib/kube-proxy/* /host/var/lib/kube-proxy + cp -force /var/run/secrets/kubernetes.io/serviceaccount/* /host/var/lib/kube-proxy/var/run/secrets/kubernetes.io/serviceaccount #FIXME? + + $networkName = (Get-Content /host/etc/cni/net.d/* | ConvertFrom-Json).name + $sourceVip = ($env:POD_IP -split "\.")[0..2] + 0 -join "." + yq w -i /host/var/lib/kube-proxy/config.conf winkernel.sourceVip $sourceVip + yq w -i /host/var/lib/kube-proxy/config.conf winkernel.networkName $networkName + yq w -i /host/var/lib/kube-proxy/config.conf featureGates.WinOverlay true + yq w -i /host/var/lib/kube-proxy/config.conf featureGates.IPv6DualStack false + yq w -i /host/var/lib/kube-proxy/config.conf mode "kernelspace" + wins cli process run --path /k/kube-proxy/kube-proxy.exe --args "--v=6 --config=/var/lib/kube-proxy/config.conf --hostname-override=$env:NODE_NAME --feature-gates=WinOverlay=true" + kind: ConfigMap + metadata: + labels: + app: kube-proxy + name: kube-proxy-windows + namespace: kube-system + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy-windows + namespace: kube-system + spec: + selector: + matchLabels: + k8s-app: kube-proxy-windows + template: + metadata: + labels: + k8s-app: kube-proxy-windows + spec: + serviceAccountName: kube-proxy + containers: + - command: + - pwsh + args: + - -file + - /var/lib/kube-proxy-windows/run-script.ps1 + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + image: sigwindowstools/kube-proxy:${KUBERNETES_VERSION/+/_}-nanoserver + name: kube-proxy + volumeMounts: + - name: wins + mountPath: \\.\pipe\rancher_wins + - name: host + mountPath: /host + - mountPath: /var/lib/kube-proxy + name: kube-proxy + - mountPath: /var/lib/kube-proxy-windows + name: kube-proxy-windows + nodeSelector: + kubernetes.io/os: windows + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - name: wins + hostPath: + path: \\.\pipe\rancher_wins + type: null + - configMap: + defaultMode: 420 + name: kube-proxy-windows + name: kube-proxy-windows + - configMap: + name: kube-proxy + name: kube-proxy + - hostPath: + path: / + name: host + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + annotations: + note: generated + labels: + type: generated + name: cni-${CLUSTER_NAME}-flannel + namespace: default diff --git a/templates/test/dev/cluster-template-custom-builds.yaml b/templates/test/dev/cluster-template-custom-builds.yaml index f1cb4ec6f87..65e15703bfa 100644 --- a/templates/test/dev/cluster-template-custom-builds.yaml +++ b/templates/test/dev/cluster-template-custom-builds.yaml @@ -94,6 +94,13 @@ spec: overwrite: false tableType: gpt files: + - contentFrom: + secret: + key: control-plane-azure.json + name: ${CLUSTER_NAME}-control-plane-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" - content: | #!/bin/bash @@ -143,13 +150,6 @@ spec: owner: root:root path: /tmp/replace-k8s-components.sh permissions: "0744" - - contentFrom: - secret: - key: control-plane-azure.json - name: ${CLUSTER_NAME}-control-plane-azure-json - owner: root:root - path: /etc/kubernetes/azure.json - permissions: "0644" initConfiguration: nodeRegistration: kubeletExtraArgs: @@ -259,6 +259,13 @@ spec: template: spec: files: + - contentFrom: + secret: + key: worker-node-azure.json + name: ${CLUSTER_NAME}-md-0-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" - content: | #!/bin/bash @@ -280,13 +287,6 @@ spec: owner: root:root path: /tmp/replace-k8s-binaries.sh permissions: "0744" - - contentFrom: - secret: - key: control-plane-azure.json - name: ${CLUSTER_NAME}-control-plane-azure-json - owner: root:root - path: /etc/kubernetes/azure.json - permissions: "0644" joinConfiguration: nodeRegistration: kubeletExtraArgs: diff --git a/templates/test/dev/custom-builds-windows/kustomization.yaml b/templates/test/dev/custom-builds-windows/kustomization.yaml new file mode 100644 index 00000000000..64c71579e49 --- /dev/null +++ b/templates/test/dev/custom-builds-windows/kustomization.yaml @@ -0,0 +1,27 @@ +namespace: default +resources: + - ../../../test/ci/prow-windows +patchesStrategicMerge: + - patches/windows-image-update.yaml + - ../custom-builds/patches/machine-deployment-pr-version.yaml +patchesJson6902: +- target: + group: bootstrap.cluster.x-k8s.io + version: v1alpha4 + kind: KubeadmConfigTemplate + name: .*-md-win + namespace: default + path: patches/custom-builds-windows.yaml +- target: + group: bootstrap.cluster.x-k8s.io + version: v1alpha4 + kind: KubeadmConfigTemplate + name: .*-md-0 + namespace: default + path: ../custom-builds/patches/kubeadm-bootstrap.yaml +- target: + group: controlplane.cluster.x-k8s.io + version: v1alpha4 + kind: KubeadmControlPlane + name: .*-control-plane + path: ../custom-builds/patches/kubeadm-controlplane-bootstrap.yaml diff --git a/templates/test/dev/custom-builds-windows/patches/custom-builds-windows.yaml b/templates/test/dev/custom-builds-windows/patches/custom-builds-windows.yaml new file mode 100644 index 00000000000..c62fe55e4e2 --- /dev/null +++ b/templates/test/dev/custom-builds-windows/patches/custom-builds-windows.yaml @@ -0,0 +1,34 @@ +- op: add + path: /spec/template/spec/files/- + value: + content: | + Stop-Service kubelet -Force + + $$binaries=@("kubeadm", "kubectl", "kubelet", "kube-proxy") + $$ci_url="https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${JOB_NAME}/${KUBE_GIT_VERSION}/bin/windows/amd64" + foreach ( $$binary in $$binaries ) + { + echo "installing package: $$binary ${KUBE_GIT_VERSION}" + curl.exe --retry 10 --retry-delay 5 "$$ci_url/$$binary.exe" --output "c:/k/$$binary.exe" + } + + # We are using a VHD that maps to v1.18.19 so the kubeproxy image is already pulled. (pull it just in case) + # Tag it to the ci_version which is the version set when kicking off the CI builds and doesn't match the KUBE_GIT_VERSION + # but matches the kubeproxy image tag when it gets generated. The image configuraiton knows how to use the binary locally. + # This does mean the image tage will not match the verison of the binary running. + # See: + # https://github.com/kubernetes-sigs/cluster-api-provider-azure/blob/529dbb507962a52ee9fd5a56f3d3856b9bcc53c1/templates/addons/windows/kube-proxy-windows.yaml#L60 + # https://github.com/kubernetes-sigs/cluster-api-provider-azure/blob/529dbb507962a52ee9fd5a56f3d3856b9bcc53c1/scripts/ci-build-kubernetes.sh#L54-L59 + docker pull sigwindowstools/kube-proxy:v1.18.19-nanoserver + docker tag sigwindowstools/kube-proxy:v1.18.19-nanoserver "sigwindowstools/kube-proxy:${CI_VERSION/+/_}-nanoserver" + + kubeadm.exe version -o=short + kubectl.exe version --client=true --short=true + kubelet.exe --version + kube-proxy.exe --version + path: C:/replace-k8s-binaries.ps1 + permissions: "0744" +- op: add + path: /spec/template/spec/preKubeadmCommands/- + value: + powershell C:/replace-k8s-binaries.ps1 diff --git a/templates/test/dev/custom-builds-windows/patches/windows-image-update.yaml b/templates/test/dev/custom-builds-windows/patches/windows-image-update.yaml new file mode 100644 index 00000000000..a2af9e6eec7 --- /dev/null +++ b/templates/test/dev/custom-builds-windows/patches/windows-image-update.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 +kind: AzureMachineTemplate +metadata: + name: "${CLUSTER_NAME}-md-win" +spec: + template: + spec: + image: + # we use the 1.18.19 image as a workaround there is no published marketplace image for k8s CI versions. + # 1.18.19 binaries and images will get replaced to the desired version by the script above. + marketplace: + publisher: cncf-upstream + offer: capi-windows + sku: k8s-1dot18dot19-windows-2019 + version: "2021.05.17" diff --git a/templates/test/dev/custom-builds/kustomization.yaml b/templates/test/dev/custom-builds/kustomization.yaml index 6c7a10f7e90..a29eee3f6c5 100644 --- a/templates/test/dev/custom-builds/kustomization.yaml +++ b/templates/test/dev/custom-builds/kustomization.yaml @@ -2,5 +2,18 @@ namespace: default resources: - ../../../test/ci/prow patchesStrategicMerge: - - ../patches/control-plane-custom-builds.yaml - - patches/custom-builds.yaml + - patches/machine-deployment-pr-version.yaml +patches: +- target: + group: bootstrap.cluster.x-k8s.io + version: v1alpha4 + kind: KubeadmConfigTemplate + name: .*-md-0 + namespace: default + path: patches/kubeadm-bootstrap.yaml +- target: + group: controlplane.cluster.x-k8s.io + version: v1alpha4 + kind: KubeadmControlPlane + name: .*-control-plane + path: patches/kubeadm-controlplane-bootstrap.yaml \ No newline at end of file diff --git a/templates/test/dev/custom-builds/patches/custom-builds.yaml b/templates/test/dev/custom-builds/patches/custom-builds.yaml deleted file mode 100644 index 85b4b4833ff..00000000000 --- a/templates/test/dev/custom-builds/patches/custom-builds.yaml +++ /dev/null @@ -1,143 +0,0 @@ -apiVersion: controlplane.cluster.x-k8s.io/v1alpha4 -kind: KubeadmControlPlane -metadata: - name: "${CLUSTER_NAME}-control-plane" - annotations: - controlplane.cluster.x-k8s.io/skip-kube-proxy: "true" -spec: - kubeadmConfigSpec: - useExperimentalRetryJoin: true - clusterConfiguration: - kubernetesVersion: "ci/${CI_VERSION}" - preKubeadmCommands: - - bash -c /tmp/replace-k8s-binaries.sh - postKubeadmCommands: - - bash -c /tmp/replace-k8s-components.sh - files: - - path: /tmp/replace-k8s-binaries.sh - owner: "root:root" - permissions: "0744" - content: | - #!/bin/bash - - set -o nounset - set -o pipefail - set -o errexit - - systemctl stop kubelet - declare -a BINARIES=("kubeadm" "kubectl" "kubelet") - for BINARY in "$${BINARIES[@]}"; do - echo "* installing package: $${BINARY} ${KUBE_GIT_VERSION}" - curl --retry 10 --retry-delay 5 "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${JOB_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" --output "/usr/bin/$${BINARY}" - done - systemctl restart kubelet - - # prepull images from gcr.io/k8s-staging-ci-images and retag it to - # k8s.gcr.io so kubeadm can fetch correct images no matter what - declare -a IMAGES=("kube-apiserver" "kube-controller-manager" "kube-proxy" "kube-scheduler") - [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO="" - for IMAGE in "$${IMAGES[@]}"; do - $${SUDO} ctr -n k8s.io images pull "gcr.io/k8s-staging-ci-images/$${IMAGE}:${CI_VERSION/+/_}" - $${SUDO} ctr -n k8s.io images tag "gcr.io/k8s-staging-ci-images/$${IMAGE}:${CI_VERSION/+/_}" "k8s.gcr.io/$${IMAGE}:${CI_VERSION/+/_}" - done - - echo "kubeadm version: $(kubeadm version -o=short)" - echo "kubectl version: $(kubectl version --client=true --short=true)" - echo "kubelet version: $(kubelet --version)" - - path: /tmp/replace-k8s-components.sh - owner: "root:root" - permissions: "0744" - content: | - #!/bin/bash - - set -o nounset - set -o pipefail - set -o errexit - - curl -L --retry 10 --retry-delay 5 https://github.com/mikefarah/yq/releases/download/v4.6.1/yq_linux_amd64.tar.gz --output /tmp/yq_linux_amd64.tar.gz - tar -xzvf /tmp/yq_linux_amd64.tar.gz -C /tmp && mv /tmp/yq_linux_amd64 /usr/bin/yq - rm /tmp/yq_linux_amd64.tar.gz - - export KUBECONFIG=/etc/kubernetes/admin.conf - kubectl -n kube-system set image daemonset/kube-proxy kube-proxy="${REGISTRY}/kube-proxy:${IMAGE_TAG}" - yq e '.spec.containers[0].image = "${REGISTRY}/kube-apiserver:${IMAGE_TAG}"' -i /etc/kubernetes/manifests/kube-apiserver.yaml - yq e '.spec.containers[0].image = "${REGISTRY}/kube-controller-manager:${IMAGE_TAG}"' -i /etc/kubernetes/manifests/kube-controller-manager.yaml - yq e '.spec.containers[0].image = "${REGISTRY}/kube-scheduler:${IMAGE_TAG}"' -i /etc/kubernetes/manifests/kube-scheduler.yaml - - path: /etc/kubernetes/azure.json - owner: "root:root" - permissions: "0644" - contentFrom: - secret: - key: control-plane-azure.json - name: ${CLUSTER_NAME}-control-plane-azure-json ---- -apiVersion: bootstrap.cluster.x-k8s.io/v1alpha4 -kind: KubeadmConfigTemplate -metadata: - name: ${CLUSTER_NAME}-md-0 -spec: - template: - spec: - preKubeadmCommands: - - bash -c /tmp/replace-k8s-binaries.sh - files: - - path: /tmp/replace-k8s-binaries.sh - owner: "root:root" - permissions: "0744" - content: | - #!/bin/bash - - set -o nounset - set -o pipefail - set -o errexit - - systemctl stop kubelet - declare -a BINARIES=("kubeadm" "kubectl" "kubelet") - for BINARY in "$${BINARIES[@]}"; do - echo "* installing package: $${BINARY} ${KUBE_GIT_VERSION}" - curl --retry 10 --retry-delay 5 "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${JOB_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" --output "/usr/bin/$${BINARY}" - done - systemctl restart kubelet - - echo "kubeadm version: $(kubeadm version -o=short)" - echo "kubectl version: $(kubectl version --client=true --short=true)" - echo "kubelet version: $(kubelet --version)" - - path: /etc/kubernetes/azure.json - owner: "root:root" - permissions: "0644" - contentFrom: - secret: - key: control-plane-azure.json - name: ${CLUSTER_NAME}-control-plane-azure-json ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 -kind: AzureMachineTemplate -metadata: - name: ${CLUSTER_NAME}-control-plane -spec: - template: - spec: - image: - # we use the 1.18.8 image as a workaround there is no published marketplace image for k8s CI versions. - # 1.18.8 binaries and images will get replaced to the desired version by the script above. - marketplace: - publisher: cncf-upstream - offer: capi - sku: k8s-1dot18dot8-ubuntu-1804 - version: "2020.08.17" ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 -kind: AzureMachineTemplate -metadata: - name: ${CLUSTER_NAME}-md-0 -spec: - template: - spec: - image: - # we use the 1.18.8 image as a workaround there is no published marketplace image for k8s CI versions. - # 1.18.8 binaries and images will get replaced to the desired version by the script above. - marketplace: - publisher: cncf-upstream - offer: capi - sku: k8s-1dot18dot8-ubuntu-1804 - version: "2020.08.17" diff --git a/templates/test/dev/custom-builds/patches/kubeadm-bootstrap.yaml b/templates/test/dev/custom-builds/patches/kubeadm-bootstrap.yaml new file mode 100644 index 00000000000..42be3a83049 --- /dev/null +++ b/templates/test/dev/custom-builds/patches/kubeadm-bootstrap.yaml @@ -0,0 +1,28 @@ +- op: add + path: /spec/template/spec/files/- + value: + content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + + systemctl stop kubelet + declare -a BINARIES=("kubeadm" "kubectl" "kubelet") + for BINARY in "$${BINARIES[@]}"; do + echo "* installing package: $${BINARY} ${KUBE_GIT_VERSION}" + curl --retry 10 --retry-delay 5 "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${JOB_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" --output "/usr/bin/$${BINARY}" + done + systemctl restart kubelet + + echo "kubeadm version: $(kubeadm version -o=short)" + echo "kubectl version: $(kubectl version --client=true --short=true)" + echo "kubelet version: $(kubelet --version)" + path: /tmp/replace-k8s-binaries.sh + owner: "root:root" + permissions: "0744" +- op: add + path: /spec/template/spec/preKubeadmCommands/- + value: + bash -c /tmp/replace-k8s-binaries.sh diff --git a/templates/test/dev/custom-builds/patches/kubeadm-controlplane-bootstrap.yaml b/templates/test/dev/custom-builds/patches/kubeadm-controlplane-bootstrap.yaml new file mode 100644 index 00000000000..f9bee901cb4 --- /dev/null +++ b/templates/test/dev/custom-builds/patches/kubeadm-controlplane-bootstrap.yaml @@ -0,0 +1,63 @@ +- op: add + path: /spec/kubeadmConfigSpec/files/- + value: + content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + + systemctl stop kubelet + declare -a BINARIES=("kubeadm" "kubectl" "kubelet") + for BINARY in "$${BINARIES[@]}"; do + echo "* installing package: $${BINARY} ${KUBE_GIT_VERSION}" + curl --retry 10 --retry-delay 5 "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${JOB_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" --output "/usr/bin/$${BINARY}" + done + systemctl restart kubelet + + # prepull images from gcr.io/k8s-staging-ci-images and retag it to + # k8s.gcr.io so kubeadm can fetch correct images no matter what + declare -a IMAGES=("kube-apiserver" "kube-controller-manager" "kube-proxy" "kube-scheduler") + [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO="" + for IMAGE in "$${IMAGES[@]}"; do + $${SUDO} ctr -n k8s.io images pull "gcr.io/k8s-staging-ci-images/$${IMAGE}:${CI_VERSION/+/_}" + $${SUDO} ctr -n k8s.io images tag "gcr.io/k8s-staging-ci-images/$${IMAGE}:${CI_VERSION/+/_}" "k8s.gcr.io/$${IMAGE}:${CI_VERSION/+/_}" + done + + echo "kubeadm version: $(kubeadm version -o=short)" + echo "kubectl version: $(kubectl version --client=true --short=true)" + echo "kubelet version: $(kubelet --version)" + path: /tmp/replace-k8s-binaries.sh + owner: "root:root" + permissions: "0744" +- op: add + path: /spec/kubeadmConfigSpec/files/- + value: + content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + + curl -L --retry 10 --retry-delay 5 https://github.com/mikefarah/yq/releases/download/v4.6.1/yq_linux_amd64.tar.gz --output /tmp/yq_linux_amd64.tar.gz + tar -xzvf /tmp/yq_linux_amd64.tar.gz -C /tmp && mv /tmp/yq_linux_amd64 /usr/bin/yq + rm /tmp/yq_linux_amd64.tar.gz + + export KUBECONFIG=/etc/kubernetes/admin.conf + kubectl -n kube-system set image daemonset/kube-proxy kube-proxy="${REGISTRY}/kube-proxy:${IMAGE_TAG}" + yq e '.spec.containers[0].image = "${REGISTRY}/kube-apiserver:${IMAGE_TAG}"' -i /etc/kubernetes/manifests/kube-apiserver.yaml + yq e '.spec.containers[0].image = "${REGISTRY}/kube-controller-manager:${IMAGE_TAG}"' -i /etc/kubernetes/manifests/kube-controller-manager.yaml + yq e '.spec.containers[0].image = "${REGISTRY}/kube-scheduler:${IMAGE_TAG}"' -i /etc/kubernetes/manifests/kube-scheduler.yaml + path: /tmp/replace-k8s-components.sh + owner: "root:root" + permissions: "0744" +- op: add + path: /spec/kubeadmConfigSpec/preKubeadmCommands/- + value: + bash -c /tmp/replace-k8s-binaries.sh +- op: add + path: /spec/kubeadmConfigSpec/postKubeadmCommands/- + value: + bash -c /tmp/replace-k8s-components.sh \ No newline at end of file diff --git a/templates/test/dev/custom-builds/patches/machine-deployment-pr-version.yaml b/templates/test/dev/custom-builds/patches/machine-deployment-pr-version.yaml new file mode 100644 index 00000000000..18afeadae24 --- /dev/null +++ b/templates/test/dev/custom-builds/patches/machine-deployment-pr-version.yaml @@ -0,0 +1,44 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane +spec: + template: + spec: + image: + # we use the 1.18.8 image as a workaround there is no published marketplace image for k8s CI versions. + # 1.18.8 binaries and images will get replaced to the desired version by the script above. + marketplace: + publisher: cncf-upstream + offer: capi + sku: k8s-1dot18dot8-ubuntu-1804 + version: "2020.08.17" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 +spec: + template: + spec: + image: + # we use the 1.18.8 image as a workaround there is no published marketplace image for k8s CI versions. + # 1.18.8 binaries and images will get replaced to the desired version by the script above. + marketplace: + publisher: cncf-upstream + offer: capi + sku: k8s-1dot18dot8-ubuntu-1804 + version: "2020.08.17" +--- +apiVersion: controlplane.cluster.x-k8s.io/v1alpha4 +kind: KubeadmControlPlane +metadata: + annotations: + controlplane.cluster.x-k8s.io/skip-kube-proxy: "true" + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + kubeadmConfigSpec: + clusterConfiguration: + kubernetesVersion: ci/${CI_VERSION} + useExperimentalRetryJoin: true \ No newline at end of file diff --git a/test/e2e/config/azure-dev.yaml b/test/e2e/config/azure-dev.yaml index abdc933adc1..93c93403c7e 100644 --- a/test/e2e/config/azure-dev.yaml +++ b/test/e2e/config/azure-dev.yaml @@ -78,8 +78,12 @@ providers: targetName: "cluster-template-private.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-ci-version.yaml" targetName: "cluster-template-conformance-ci-artifacts.yaml" + - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-ci-version-windows.yaml" + targetName: "cluster-template-conformance-ci-artifacts-windows.yaml" - sourcePath: "${PWD}/templates/test/dev/cluster-template-custom-builds.yaml" targetName: "cluster-template-conformance-presubmit-artifacts.yaml" + - sourcePath: "${PWD}/templates/test/dev/cluster-template-custom-builds-windows.yaml" + targetName: "cluster-template-conformance-presubmit-artifacts-windows.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-windows.yaml" targetName: "cluster-template-windows.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-machine-pool-windows.yaml" @@ -116,6 +120,7 @@ variables: MULTI_TENANCY_IDENTITY_NAME: "multi-tenancy-identity" CLUSTER_IDENTITY_NAME: "cluster-identity" NODE_DRAIN_TIMEOUT: "60s" + CI_VERSION: "" intervals: default/wait-controllers: ["3m", "10s"] diff --git a/test/e2e/conformance_test.go b/test/e2e/conformance_test.go index f965f35a9a6..3e849a4ffb7 100644 --- a/test/e2e/conformance_test.go +++ b/test/e2e/conformance_test.go @@ -50,6 +50,7 @@ var _ = Describe("Conformance Tests", func() { clusterName string namespace *corev1.Namespace specName = "conformance-tests" + repoList = "" ) BeforeEach(func() { @@ -103,6 +104,8 @@ var _ = Describe("Conformance Tests", func() { flavor := clusterctl.DefaultFlavor if isWindows(kubetestConfigFilePath) { flavor = "windows" + // conformance for windows doesn't require any linux worker machines. + Expect(os.Setenv("LINUX_WORKER_MACHINE_COUNT", "0")).To(Succeed()) } // clusters with CI artifacts or PR artifacts are based on a known CI version @@ -152,8 +155,8 @@ var _ = Describe("Conformance Tests", func() { b.RecordValue("cluster creation", runtime.Seconds()) workloadProxy := bootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, clusterName) - // Windows requires a taint on control nodes nodes since not all conformance tests have ability to run if isWindows(kubetestConfigFilePath) { + // Windows requires a taint on control nodes nodes since not all conformance tests have ability to run options := v1.ListOptions{ LabelSelector: "kubernetes.io/os=linux", } @@ -166,6 +169,11 @@ var _ = Describe("Conformance Tests", func() { err := node.TaintNode(workloadProxy.GetClientSet(), options, noScheduleTaint) Expect(err).NotTo(HaveOccurred()) + + // Windows requires a repo-list because some images are not in k8s gcr + repoList, err = resolveKubetestRepoListPath(kubernetesVersion, kubetestRepoListPath) + Expect(err).NotTo(HaveOccurred()) + fmt.Fprintf(GinkgoWriter, "INFO: Using repo-list %s for version %s\n", repoList, kubernetesVersion) } ginkgoNodes, err := strconv.Atoi(e2eConfig.GetVariable("CONFORMANCE_NODES")) @@ -177,7 +185,7 @@ var _ = Describe("Conformance Tests", func() { ClusterProxy: workloadProxy, NumberOfNodes: int(workerMachineCount), ConfigFilePath: kubetestConfigFilePath, - KubeTestRepoListPath: kubetestRepoListPath, + KubeTestRepoListPath: repoList, ConformanceImage: e2eConfig.GetVariable("CONFORMANCE_IMAGE"), GinkgoNodes: ginkgoNodes, }, diff --git a/test/e2e/data/kubetest/repo-list-k8sprow.yaml b/test/e2e/data/kubetest/repo-list-k8sprow.yaml new file mode 100644 index 00000000000..418c112a82c --- /dev/null +++ b/test/e2e/data/kubetest/repo-list-k8sprow.yaml @@ -0,0 +1,8 @@ +# used for k8s versions <= 1.20 https://github.com/kubernetes-sigs/windows-testing/tree/master/images#image-repository-list +dockerLibraryRegistry: k8sprow.azurecr.io/kubernetes-e2e-test-images +e2eRegistry: k8sprow.azurecr.io/kubernetes-e2e-test-images +promoterE2eRegistry: k8sprow.azurecr.io/kubernetes-e2e-test-images +etcdRegistry: k8sprow.azurecr.io/kubernetes-e2e-test-images +gcRegistry: k8sprow.azurecr.io/kubernetes-e2e-test-images +PrivateRegistry: e2eteam +sampleRegistry: k8sprow.azurecr.io/kubernetes-e2e-test-images diff --git a/test/e2e/data/kubetest/repo-list.yaml b/test/e2e/data/kubetest/repo-list.yaml index 8f84224db48..5d48ea223d5 100644 --- a/test/e2e/data/kubetest/repo-list.yaml +++ b/test/e2e/data/kubetest/repo-list.yaml @@ -1,3 +1,7 @@ -gcAuthenticatedRegistry: e2eprivate +# used for 1.21+ https://github.com/kubernetes-sigs/windows-testing/tree/master/images#image-repository-list gcEtcdRegistry: k8sprow.azurecr.io/kubernetes-e2e-test-images -privateRegistry: e2eteam \ No newline at end of file +# There are two other repos that could be set: +# privateRegistry: e2eteam +# - only used in gce tests +# gcAuthenticatedRegistry: e2eprivate +# - needed for OS versions 20H2+ requires update to capi framework to accepted dockerregistry config file \ No newline at end of file diff --git a/test/e2e/data/kubetest/upstream-windows.yaml b/test/e2e/data/kubetest/upstream-windows.yaml index 3ffda2ca23d..5d9b708edd5 100644 --- a/test/e2e/data/kubetest/upstream-windows.yaml +++ b/test/e2e/data/kubetest/upstream-windows.yaml @@ -1,5 +1,5 @@ ginkgo.focus: \[Conformance\]|\[NodeConformance\]|\[sig-windows\]|\[sig-apps\].CronJob|\[sig-api-machinery\].ResourceQuota|\[sig-scheduling\].SchedulerPreemption -ginkgo.skip: \[LinuxOnly\]|\[Serial\]|Guestbook.application.should.create.and.stop.a.working.application|device.plugin.for.Windows|Container.Lifecycle.Hook.when.create.a.pod.with.lifecycle.hook.should.execute(.*)http.hook.properly +ginkgo.skip: \[LinuxOnly\]|\[Serial\]|\[Slow\]|Guestbook.application.should.create.and.stop.a.working.application|device.plugin.for.Windows|Container.Lifecycle.Hook.when.create.a.pod.with.lifecycle.hook.should.execute(.*)http.hook.properly disable-log-dump: true ginkgo.progress: true ginkgo.slowSpecThreshold: 120.0 diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 5a38f97a813..e88cdc65a43 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -32,14 +32,10 @@ import ( "testing" "time" - expv1 "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha4" - clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha4" - - "github.com/Azure/go-autorest/autorest/to" - aadpodv1 "github.com/Azure/aad-pod-identity/pkg/apis/aadpodidentity/v1" "github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights" "github.com/Azure/go-autorest/autorest/azure/auth" + "github.com/Azure/go-autorest/autorest/to" . "github.com/onsi/ginkgo" "github.com/onsi/ginkgo/config" "github.com/onsi/ginkgo/reporters" @@ -49,6 +45,8 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha4" + expv1 "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha4" + clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha4" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/bootstrap" @@ -248,7 +246,7 @@ func init() { flag.BoolVar(&skipCleanup, "e2e.skip-resource-cleanup", false, "if true, the resource cleanup after tests will be skipped") flag.BoolVar(&useExistingCluster, "e2e.use-existing-cluster", false, "if true, the test uses the current cluster instead of creating a new one (default discovery rules apply)") flag.StringVar(&kubetestConfigFilePath, "kubetest.config-file", "", "path to the kubetest configuration file") - flag.StringVar(&kubetestRepoListPath, "kubetest.repo-list-file", "", "path to the kubetest repo-list file") + flag.StringVar(&kubetestRepoListPath, "kubetest.repo-list-path", "", "path to the kubetest repo-list path") } func TestE2E(t *testing.T) { diff --git a/test/e2e/helpers.go b/test/e2e/helpers.go index f0ffa5372c9..9b92a1bfca8 100644 --- a/test/e2e/helpers.go +++ b/test/e2e/helpers.go @@ -33,6 +33,7 @@ import ( "text/tabwriter" "time" + "github.com/blang/semver" . "github.com/onsi/ginkgo" "github.com/onsi/ginkgo/config" . "github.com/onsi/gomega" @@ -576,3 +577,36 @@ func latestCIVersion(label string) (string, error) { return strings.TrimSpace(string(b)), nil } + +// resolveKubetestRepoListPath will set the correct repo list for Windows: +// - if WIN_REPO_URL is set use the custom file downloaded via makefile +// - if CI version is "latest" will use repo-list.yaml +// - if CI version is "latest-1.xx" will compare values and use correct repoList +// - if standard version will compare values and use correct repoList +// - if unable to determine version falls back to using latest +func resolveKubetestRepoListPath(version string, path string) (string, error) { + if _, ok := os.LookupEnv("WIN_REPO_URL"); ok { + return filepath.Join(path, "custom-repo-list.yaml"), nil + } + + if version == "latest" { + return filepath.Join(path, "repo-list.yaml"), nil + } + + version = strings.TrimPrefix(version, "latest-") + currentVersion, err := semver.ParseTolerant(version) + if err != nil { + return "", err + } + + versionCutoff, err := semver.Make("1.21.0") + if err != nil { + return "", err + } + + if currentVersion.LT(versionCutoff) { + return filepath.Join(path, "repo-list-k8sprow.yaml"), nil + } + + return filepath.Join(path, "repo-list.yaml"), nil +}