From 1577deeddf5da96a56f7b70d51de5a783d772b8a Mon Sep 17 00:00:00 2001 From: Allen Ray Date: Thu, 20 Mar 2025 10:07:51 -0400 Subject: [PATCH 1/4] Update to kubernetes 1.32.3 --- go.mod | 22 +++++++++++----------- vendor/modules.txt | 22 +++++++++++----------- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/go.mod b/go.mod index 2d32378635ac..8a99116fe2ca 100644 --- a/go.mod +++ b/go.mod @@ -63,20 +63,20 @@ require ( gopkg.in/src-d/go-git.v4 v4.13.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.32.2 - k8s.io/apiextensions-apiserver v0.32.2 - k8s.io/apimachinery v0.32.2 - k8s.io/apiserver v0.32.2 - k8s.io/cli-runtime v0.32.2 - k8s.io/client-go v0.32.2 - k8s.io/component-base v0.32.2 - k8s.io/component-helpers v0.32.2 + k8s.io/api v0.32.3 + k8s.io/apiextensions-apiserver v0.32.3 + k8s.io/apimachinery v0.32.3 + k8s.io/apiserver v0.32.3 + k8s.io/cli-runtime v0.32.3 + k8s.io/client-go v0.32.3 + k8s.io/component-base v0.32.3 + k8s.io/component-helpers v0.32.3 k8s.io/klog/v2 v2.130.1 - k8s.io/kube-aggregator v0.32.2 + k8s.io/kube-aggregator v0.32.3 k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f - k8s.io/kubectl v0.32.2 + k8s.io/kubectl v0.32.3 k8s.io/kubernetes v1.32.2 - k8s.io/pod-security-admission v0.32.2 + k8s.io/pod-security-admission v0.32.3 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 sigs.k8s.io/cloud-provider-azure v1.30.4 sigs.k8s.io/kustomize/kyaml v0.18.1 diff --git a/vendor/modules.txt b/vendor/modules.txt index 83715ebdd3ed..bfee566e15f2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1781,7 +1781,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.32.2 => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/api v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250220043805-86db063ce6f2 ## explicit; go 1.23.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -1842,7 +1842,7 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apiextensions-apiserver v0.32.2 => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/apiextensions-apiserver v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250220043805-86db063ce6f2 ## explicit; go 1.23.0 k8s.io/apiextensions-apiserver/pkg/apihelpers k8s.io/apiextensions-apiserver/pkg/apis/apiextensions @@ -1892,7 +1892,7 @@ k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition k8s.io/apiextensions-apiserver/test/integration k8s.io/apiextensions-apiserver/test/integration/fixtures -# k8s.io/apimachinery v0.32.2 => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/apimachinery v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250220043805-86db063ce6f2 ## explicit; go 1.23.0 k8s.io/apimachinery/pkg/api/apitesting k8s.io/apimachinery/pkg/api/equality @@ -1965,7 +1965,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.32.2 => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/apiserver v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250220043805-86db063ce6f2 ## explicit; go 1.23.0 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration @@ -2148,13 +2148,13 @@ k8s.io/apiserver/plugin/pkg/authenticator/token/oidc k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook/metrics -# k8s.io/cli-runtime v0.32.2 => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/cli-runtime v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20250220043805-86db063ce6f2 ## explicit; go 1.23.0 k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericiooptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.32.2 => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/client-go v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250220043805-86db063ce6f2 ## explicit; go 1.23.0 k8s.io/client-go/applyconfigurations k8s.io/client-go/applyconfigurations/admissionregistration/v1 @@ -2543,7 +2543,7 @@ k8s.io/cluster-bootstrap/token/api k8s.io/cluster-bootstrap/token/util k8s.io/cluster-bootstrap/util/secrets k8s.io/cluster-bootstrap/util/tokens -# k8s.io/component-base v0.32.2 => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/component-base v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250220043805-86db063ce6f2 ## explicit; go 1.23.0 k8s.io/component-base/cli/flag k8s.io/component-base/cli/globalflag @@ -2577,7 +2577,7 @@ k8s.io/component-base/version/verflag k8s.io/component-base/zpages/features k8s.io/component-base/zpages/flagz k8s.io/component-base/zpages/statusz -# k8s.io/component-helpers v0.32.2 => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/component-helpers v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20250220043805-86db063ce6f2 ## explicit; go 1.23.0 k8s.io/component-helpers/apimachinery/lease k8s.io/component-helpers/apps/poddisruptionbudget @@ -2647,7 +2647,7 @@ k8s.io/kms/apis/v1beta1 k8s.io/kms/apis/v2 k8s.io/kms/pkg/service k8s.io/kms/pkg/util -# k8s.io/kube-aggregator v0.32.2 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/kube-aggregator v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20250220043805-86db063ce6f2 ## explicit; go 1.23.0 k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration/install @@ -2710,7 +2710,7 @@ k8s.io/kube-openapi/pkg/validation/validate ## explicit; go 1.23.0 k8s.io/kube-scheduler/config/v1 k8s.io/kube-scheduler/extender/v1 -# k8s.io/kubectl v0.32.2 => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/kubectl v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20250220043805-86db063ce6f2 ## explicit; go 1.23.0 k8s.io/kubectl/pkg/apps k8s.io/kubectl/pkg/cmd/util @@ -3583,7 +3583,7 @@ k8s.io/kubernetes/third_party/forked/libcontainer/utils # k8s.io/mount-utils v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20250220043805-86db063ce6f2 ## explicit; go 1.23.0 k8s.io/mount-utils -# k8s.io/pod-security-admission v0.32.2 => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/pod-security-admission v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20250220043805-86db063ce6f2 ## explicit; go 1.23.0 k8s.io/pod-security-admission/admission k8s.io/pod-security-admission/admission/api From dc316d1977cb29fe8f25329b792ed0bad1122356 Mon Sep 17 00:00:00 2001 From: Allen Ray Date: Thu, 20 Mar 2025 10:08:06 -0400 Subject: [PATCH 2/4] Update dockerfile --- images/tests/Dockerfile.rhel | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/images/tests/Dockerfile.rhel b/images/tests/Dockerfile.rhel index 2e3cbdbd35dc..e644bfb13771 100644 --- a/images/tests/Dockerfile.rhel +++ b/images/tests/Dockerfile.rhel @@ -20,7 +20,7 @@ RUN PACKAGES="git gzip util-linux" && \ git config --system user.email test@test.com && \ chmod g+w /etc/passwd LABEL io.k8s.display-name="OpenShift End-to-End Tests" \ - io.openshift.release.operator=true \ - io.k8s.description="OpenShift is a platform for developing, building, and deploying containerized applications." \ - io.openshift.build.versions="kubernetes-tests=1.32.2" \ - io.openshift.tags="openshift,tests,e2e" + io.openshift.release.operator=true \ + io.k8s.description="OpenShift is a platform for developing, building, and deploying containerized applications." \ + io.openshift.build.versions="kubernetes-tests=1.32.3" \ + io.openshift.tags="openshift,tests,e2e" From 6aba4ae811bfc142de8f3df54c63075931f3cb69 Mon Sep 17 00:00:00 2001 From: Allen Ray Date: Thu, 20 Mar 2025 10:09:44 -0400 Subject: [PATCH 3/4] Run hack/update-kube-vendor.sh --- go.mod | 62 +- go.sum | 96 +-- .../apiserver/node/minimum_kubelet_version.go | 80 +++ vendor/k8s.io/api/resource/v1alpha3/types.go | 4 + vendor/k8s.io/api/resource/v1beta1/types.go | 4 + .../plugin/resourcequota/admission.go | 7 +- .../plugin/resourcequota/resource_access.go | 7 +- .../apiserver/pkg/features/kube_features.go | 2 +- .../pkg/util/proxy/streamtranslator.go | 3 +- .../dynamic-resource-allocation/cel/cache.go | 4 +- .../cel/compile.go | 140 +++- .../kube-apiserver/app/options/completion.go | 2 +- .../cmd/kube-apiserver/app/server.go | 4 + .../generated/zz_generated.annotations.go | 652 +++++++++++++++++- .../openshift-hack/e2e/annotate/rules.go | 20 +- ...restrict_extreme_worker_latency_profile.go | 124 ---- .../node/validate_node_config.go | 218 ++++++ .../minimum_kubelet_version.go | 90 +++ .../enablement/intialization.go | 5 + vendor/k8s.io/kubernetes/pkg/api/pod/util.go | 4 +- .../kubernetes/pkg/apis/resource/types.go | 4 + .../controlplane/apiserver/options/options.go | 4 +- .../kubernetes/pkg/features/kube_features.go | 6 + .../pkg/features/openshift_features.go | 5 + .../pkg/features/versioned_kube_features.go | 6 +- .../kubeapiserver/authorizer/modes/patch.go | 3 +- .../pkg/kubeapiserver/authorizer/patch.go | 46 ++ .../pkg/kubeapiserver/authorizer/reload.go | 11 + .../k8s.io/kubernetes/pkg/kubelet/kubelet.go | 6 - .../pkg/kubelet/kubelet_node_status.go | 29 +- .../rbac/bootstrappolicy/controller_policy.go | 24 +- .../test/e2e/apimachinery/namespace.go | 122 ++++ .../kubernetes/test/e2e/feature/feature.go | 4 + .../kubernetes/test/e2e/kubectl/kubectl.go | 10 + .../kubernetes/test/e2e/node/pod_admission.go | 27 +- .../openshift_group_snapshot_driver.go | 285 ++++++++ .../volume_group_snapshot_resource.go | 31 +- .../test/e2e/storage/openshift_csi_volumes.go | 45 ++ .../testsuites/volume_group_snapshottable.go | 31 +- .../storage/utils/volume_group_snapshot.go | 7 +- ...age.k8s.io_volumegroupsnapshotclasses.yaml | 4 +- ...ge.k8s.io_volumegroupsnapshotcontents.yaml | 73 +- ...t.storage.k8s.io_volumegroupsnapshots.yaml | 40 +- .../csi-hostpath-plugin.yaml | 6 +- .../run_group_snapshot_e2e.sh | 11 +- .../hostpath/csi-hostpath-plugin.yaml | 2 +- vendor/modules.txt | 112 +-- 47 files changed, 1987 insertions(+), 495 deletions(-) create mode 100644 vendor/github.com/openshift/library-go/pkg/apiserver/node/minimum_kubelet_version.go delete mode 100644 vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/node/restrict_extreme_worker_latency_profile.go create mode 100644 vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/node/validate_node_config.go create mode 100644 vendor/k8s.io/kubernetes/openshift-kube-apiserver/authorization/minimumkubeletversion/minimum_kubelet_version.go create mode 100644 vendor/k8s.io/kubernetes/test/e2e/storage/drivers/openshift_group_snapshot_driver.go create mode 100644 vendor/k8s.io/kubernetes/test/e2e/storage/openshift_csi_volumes.go diff --git a/go.mod b/go.mod index 8a99116fe2ca..40e1d0063a19 100644 --- a/go.mod +++ b/go.mod @@ -314,37 +314,37 @@ require ( replace ( github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 - k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250220043805-86db063ce6f2 - k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250220043805-86db063ce6f2 - k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250220043805-86db063ce6f2 - k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250220043805-86db063ce6f2 - k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20250220043805-86db063ce6f2 - k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250220043805-86db063ce6f2 - k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20250220043805-86db063ce6f2 - k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20250220043805-86db063ce6f2 - k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20250220043805-86db063ce6f2 - k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250220043805-86db063ce6f2 - k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20250220043805-86db063ce6f2 - k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20250220043805-86db063ce6f2 - k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20250220043805-86db063ce6f2 - k8s.io/cri-client => github.com/openshift/kubernetes/staging/src/k8s.io/cri-client v0.0.0-20250220043805-86db063ce6f2 - k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20250220043805-86db063ce6f2 - k8s.io/dynamic-resource-allocation => github.com/openshift/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20250220043805-86db063ce6f2 - k8s.io/endpointslice => github.com/openshift/kubernetes/staging/src/k8s.io/endpointslice v0.0.0-20250220043805-86db063ce6f2 - k8s.io/externaljwt => github.com/openshift/kubernetes/staging/src/k8s.io/externaljwt v0.0.0-20250220043805-86db063ce6f2 - k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20250220043805-86db063ce6f2 - k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20250220043805-86db063ce6f2 - k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20250220043805-86db063ce6f2 - k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20250220043805-86db063ce6f2 - k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20250220043805-86db063ce6f2 - k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20250220043805-86db063ce6f2 - k8s.io/kubernetes => github.com/openshift/kubernetes v1.30.1-0.20250220043805-86db063ce6f2 - k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20250220043805-86db063ce6f2 - k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20250220043805-86db063ce6f2 - k8s.io/pod-security-admission => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20250220043805-86db063ce6f2 - k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20250220043805-86db063ce6f2 - k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20250220043805-86db063ce6f2 - k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20250220043805-86db063ce6f2 + k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250320083338-1601b9e27d85 + k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250320083338-1601b9e27d85 + k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250320083338-1601b9e27d85 + k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250320083338-1601b9e27d85 + k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20250320083338-1601b9e27d85 + k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250320083338-1601b9e27d85 + k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20250320083338-1601b9e27d85 + k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20250320083338-1601b9e27d85 + k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20250320083338-1601b9e27d85 + k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250320083338-1601b9e27d85 + k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20250320083338-1601b9e27d85 + k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20250320083338-1601b9e27d85 + k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20250320083338-1601b9e27d85 + k8s.io/cri-client => github.com/openshift/kubernetes/staging/src/k8s.io/cri-client v0.0.0-20250320083338-1601b9e27d85 + k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20250320083338-1601b9e27d85 + k8s.io/dynamic-resource-allocation => github.com/openshift/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20250320083338-1601b9e27d85 + k8s.io/endpointslice => github.com/openshift/kubernetes/staging/src/k8s.io/endpointslice v0.0.0-20250320083338-1601b9e27d85 + k8s.io/externaljwt => github.com/openshift/kubernetes/staging/src/k8s.io/externaljwt v0.0.0-20250320083338-1601b9e27d85 + k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20250320083338-1601b9e27d85 + k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20250320083338-1601b9e27d85 + k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20250320083338-1601b9e27d85 + k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20250320083338-1601b9e27d85 + k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20250320083338-1601b9e27d85 + k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20250320083338-1601b9e27d85 + k8s.io/kubernetes => github.com/openshift/kubernetes v1.30.1-0.20250320083338-1601b9e27d85 + k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20250320083338-1601b9e27d85 + k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20250320083338-1601b9e27d85 + k8s.io/pod-security-admission => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20250320083338-1601b9e27d85 + k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20250320083338-1601b9e27d85 + k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20250320083338-1601b9e27d85 + k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20250320083338-1601b9e27d85 ) // The cadvisor version used in k8s v1.32.1 (v0.51.0) relies on code present on this version diff --git a/go.sum b/go.sum index 3bfd982d513b..1be18f27db59 100644 --- a/go.sum +++ b/go.sum @@ -605,54 +605,54 @@ github.com/openshift/client-go v0.0.0-20250131180035-f7ec47e2d87a h1:duO3JMrUOqV github.com/openshift/client-go v0.0.0-20250131180035-f7ec47e2d87a/go.mod h1:Qw3ThpzVZ0bfTILpBNYg4LGyjtNxfyCiGh/uDLOOTP8= github.com/openshift/cluster-network-operator v0.0.0-20240708200319-1cd8678b38fb h1:Dr0dbSQTAU9UaoAvimGjR+fsvwx2twJ5KR0s/jyAz88= github.com/openshift/cluster-network-operator v0.0.0-20240708200319-1cd8678b38fb/go.mod h1:LnhqxbWhAnhPwilJ4yX1/ly7wCMCYJKkaiSJQSh+Wjg= -github.com/openshift/kubernetes v1.30.1-0.20250220043805-86db063ce6f2 h1:fKtY84wQmMyG4ncV3mcoK3EMKf8ks5QaPbmfxPuKT8w= -github.com/openshift/kubernetes v1.30.1-0.20250220043805-86db063ce6f2/go.mod h1:Efh/f6Fm21RgDa88xImz+MPatPfcsoQW6GcWkWXWEEk= -github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250220043805-86db063ce6f2 h1:Iz5VoilPHi6Ovc2LCnz1zKePAuI6PJe+aSbHxYeoftE= -github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250220043805-86db063ce6f2/go.mod h1:PXATPrizf40XG7caGH9r9qdYgUAJBqDa/s6i2hDU1Us= -github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250220043805-86db063ce6f2 h1:9cHwAGg47vgSO8PkP+UCo4NIgcYRK3f9ir0dX/zmqjE= -github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250220043805-86db063ce6f2/go.mod h1:E8q0Qr0CEJ5WyjJhMaeP0mQTssLTVJzW2EwJ3CwXd2s= -github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250220043805-86db063ce6f2 h1:OQAukFrCdQ6A8c/2AomfU1YCXcYn3mDfeSl3q5yz+go= -github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250220043805-86db063ce6f2/go.mod h1:D9TPIbbk/g9FXxVr7G9GQBxEEgSf2Zb4ISLuwJKaoOg= -github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250220043805-86db063ce6f2 h1:6MbLJ0BcAq7yNv6Fwfq1OP/DjWP+qGA4i1nYHHMkCSE= -github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250220043805-86db063ce6f2/go.mod h1:pK4RgwS+SKk1OrkSFsmPPgtWCURPeqOpQ/DmWZ6kKss= -github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20250220043805-86db063ce6f2 h1:go4mifYV19X5mcwo+eJRHYwrcmEmY0NiPjadSLvMVuk= -github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20250220043805-86db063ce6f2/go.mod h1:62aNvJLPzLZy7i3OFOCZEi7jZq/Qgu20g9o1gFrHOeo= -github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250220043805-86db063ce6f2 h1:zihM7Ph+S97Ko6LA2UvuyHXsP4/abuhnfSnUHcfwDXk= -github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250220043805-86db063ce6f2/go.mod h1:osLkL6Y05PrvsnN8Y4VvF7WhT8Rhltj8n3si6gsfa8I= -github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20250220043805-86db063ce6f2 h1:HGlmPykZJZ9RwFr+S0HvRP20j4k9TWLUxHKXaWbAgr0= -github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20250220043805-86db063ce6f2/go.mod h1:1Y0C45cj5HUz6T2zCd/rbsFKVVUtxBWB1WJc3ksgino= -github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20250220043805-86db063ce6f2 h1:qTb3cTKIJE6rgKdqcRmivwqR/l4foQ1exo/Jmqs6Dwc= -github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20250220043805-86db063ce6f2/go.mod h1:ZC4TOTM6y6/3o5l0X0UAj/xYI4FhhW2X1kf4H7aym+4= -github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250220043805-86db063ce6f2 h1:w3UK+vc6nAmEA1bkBsSkqxKXeLDW6lR858KQWxMOjoY= -github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250220043805-86db063ce6f2/go.mod h1:CeSMAQiDPBLz3tybbU3C4F6UWmNE+MmaTLas/4vd5IE= -github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20250220043805-86db063ce6f2 h1:r7b1ilP4x0JNVOezW4/c2NgBkdmc92Apn3jqVgk+YsQ= -github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20250220043805-86db063ce6f2/go.mod h1:BqZv1ycl+iUCtUH94v2/bw29nXFoA9ZxU5HiCld5UuM= -github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20250220043805-86db063ce6f2 h1:9ZDSYIKjRVoXxOdD2/O78D0GgI2lVifP0+cYPeguq5M= -github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20250220043805-86db063ce6f2/go.mod h1:G2ofVSclHMBjE4px9vYy5N1b8103cQrKpIAwkHIQu4g= -github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20250220043805-86db063ce6f2 h1:XXgTP8FOopr+TPyHVW/cbGEAZeEmXfNfR9f/NFoqqXE= -github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20250220043805-86db063ce6f2/go.mod h1:g5RiEG3qLdo0/6ULdxWcfVjtDlJlxfmEvk8HnvVKgI8= -github.com/openshift/kubernetes/staging/src/k8s.io/cri-client v0.0.0-20250220043805-86db063ce6f2 h1:oNUQtvs8YOP8vkM/mkXcXCN6mIYq4lNij3VLyfEvQ+g= -github.com/openshift/kubernetes/staging/src/k8s.io/cri-client v0.0.0-20250220043805-86db063ce6f2/go.mod h1:oQJlDoAmJUdRvVPlROBdBIfceFKY2TRjK0ah+6I8jAI= -github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20250220043805-86db063ce6f2 h1:SiBDL8OvI1d8LGKGhsY7p2DZ9bPb2xd6syjXpslrnJw= -github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20250220043805-86db063ce6f2/go.mod h1:oGwCVeWGOp5ydzljG3HwHeYSTg0AkU8AVVI6WA7p/NQ= -github.com/openshift/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20250220043805-86db063ce6f2 h1:b73UaBuGWUb5RFbNhw/u5P6FRs9uaDxb+2R45MyurCk= -github.com/openshift/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20250220043805-86db063ce6f2/go.mod h1:lHc2r9QqTP5jQ/qzHRjEjvq+P2pMIJxH3pW2OIeKGm4= -github.com/openshift/kubernetes/staging/src/k8s.io/externaljwt v0.0.0-20250220043805-86db063ce6f2 h1:pXgKZk4Z4dMczqpDk9colTnGoJ8oNRcELFXSNv9/GCc= -github.com/openshift/kubernetes/staging/src/k8s.io/externaljwt v0.0.0-20250220043805-86db063ce6f2/go.mod h1:cYoh1BjUit+0mmm1QFiHHIvFRp5NuJ/PtXFjqU6lG2I= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20250220043805-86db063ce6f2 h1:0GajQEf0NKWMeZ6LRV5bxZU1V/jgEx/HwOkZqXQlDnA= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20250220043805-86db063ce6f2/go.mod h1:0G+pIl5ZSz2OJTbaNrYgYDas3AAuLuyb6f7yp67vUc0= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20250220043805-86db063ce6f2 h1:fWFsslK1TQe/L2Bm/YAqqaDccCouvyOROJVZSEZFcvY= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20250220043805-86db063ce6f2/go.mod h1:g8ueMAVzUqNpdqLNTOC39CTx0uw8xOA8Rr6YYN9Csmc= -github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20250220043805-86db063ce6f2 h1:Xd7px85GY01bxhIT5VwQI1LnnhvzeOkhdwEpi/Vy6oA= -github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20250220043805-86db063ce6f2/go.mod h1:eUOhccfuBuDXD7CuDtMfg3phiyI+FMtOQV60/SOPw1k= -github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20250220043805-86db063ce6f2 h1:deB06vjw/sco41VyK+AiJtBW0VIfdwyzcZx1cWIBVS4= -github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20250220043805-86db063ce6f2/go.mod h1:KDLFmWxF6aaZOjAfLBQa6WKAgolILaDhFOHAVXhtPkI= -github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20250220043805-86db063ce6f2 h1:XpzB6+S0/fTpks2lZ2cevAk724qIPkzewsJ7C4WTTOs= -github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20250220043805-86db063ce6f2/go.mod h1:Kun5c2svjAPx0nnvJKYQWhfeNW+O0EpzHgRhDcYoSY0= -github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20250220043805-86db063ce6f2 h1:5LFI+YKtrPyZzX9KizXNDNAI6ffFddqO45+lA0xAeXU= -github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20250220043805-86db063ce6f2/go.mod h1:mwfS00zLOJUpOmV4dAXY/mstuDnRfVI+094R5F41K00= -github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20250220043805-86db063ce6f2 h1:AbiESeMgTdy4+SbunwjSJyh8NRM8kIYKfVFED2iO8ys= -github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20250220043805-86db063ce6f2/go.mod h1:owAehkeYLq2tHccss/uBNAaqS7UtrFlgkK40soLfHLc= +github.com/openshift/kubernetes v1.30.1-0.20250320083338-1601b9e27d85 h1:GGa1y6SWhlKjw9Utv+Q5bNH4dEk5ArVFRwaqlYsNS2Q= +github.com/openshift/kubernetes v1.30.1-0.20250320083338-1601b9e27d85/go.mod h1:kYKiuMo4pAcEkOTRTzAzJeogeK5+HBuvT7zGZoHleG4= +github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250320083338-1601b9e27d85 h1:kfaz5mEtKZPKZv/775D7p7M9ghbMcdYfglSAcbckZBo= +github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250320083338-1601b9e27d85/go.mod h1:PXATPrizf40XG7caGH9r9qdYgUAJBqDa/s6i2hDU1Us= +github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250320083338-1601b9e27d85 h1:KMW/WJPmLcGues6kP9hcOKeYzw60AyKfge54ULBl54g= +github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250320083338-1601b9e27d85/go.mod h1:y79okk+2JwH3UYZXNtHla4F5wRvwM0a10bXWeOWqHXQ= +github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250320083338-1601b9e27d85 h1:hWDkMAEkG7n7MWQwvaFbBcYOCrzcszLqt4QZnAxJF4k= +github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250320083338-1601b9e27d85/go.mod h1:D9TPIbbk/g9FXxVr7G9GQBxEEgSf2Zb4ISLuwJKaoOg= +github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250320083338-1601b9e27d85 h1:FDYhbSifxW3sv14zJIlli98WI2H9jsZucA3tb5aQxm4= +github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250320083338-1601b9e27d85/go.mod h1:pK4RgwS+SKk1OrkSFsmPPgtWCURPeqOpQ/DmWZ6kKss= +github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20250320083338-1601b9e27d85 h1:qkRBfXQ7YPH1ejE10oE8F6CvarIXMySiRdGbxQHi2rQ= +github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20250320083338-1601b9e27d85/go.mod h1:62aNvJLPzLZy7i3OFOCZEi7jZq/Qgu20g9o1gFrHOeo= +github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250320083338-1601b9e27d85 h1:5Xz0hBQkb0z1fV11UTyIr9wcA1cmxJrnOXHudMkZsrU= +github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250320083338-1601b9e27d85/go.mod h1:osLkL6Y05PrvsnN8Y4VvF7WhT8Rhltj8n3si6gsfa8I= +github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20250320083338-1601b9e27d85 h1:EfUoujeJ9qZEhclTcWWLWa9Tyn6znKpSj4GAi37MkTU= +github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20250320083338-1601b9e27d85/go.mod h1:1Y0C45cj5HUz6T2zCd/rbsFKVVUtxBWB1WJc3ksgino= +github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20250320083338-1601b9e27d85 h1:nQqREVSDSFkIRmuMYomi2aYpNfL7DJz5DYq/S31hNZw= +github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20250320083338-1601b9e27d85/go.mod h1:ZC4TOTM6y6/3o5l0X0UAj/xYI4FhhW2X1kf4H7aym+4= +github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250320083338-1601b9e27d85 h1:SqInBPd+clyW3JpxF8M561teaiUXyb+Orv8G3fVJHu4= +github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250320083338-1601b9e27d85/go.mod h1:CeSMAQiDPBLz3tybbU3C4F6UWmNE+MmaTLas/4vd5IE= +github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20250320083338-1601b9e27d85 h1:3RZqHaxf0oZcgspDxCpm0ZgY+RThq7TOdnEfxJg3YoM= +github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20250320083338-1601b9e27d85/go.mod h1:BqZv1ycl+iUCtUH94v2/bw29nXFoA9ZxU5HiCld5UuM= +github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20250320083338-1601b9e27d85 h1:Cr6xFONau0MEdUSzljOJCupk/1S84aHRNlqpE04GII8= +github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20250320083338-1601b9e27d85/go.mod h1:G2ofVSclHMBjE4px9vYy5N1b8103cQrKpIAwkHIQu4g= +github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20250320083338-1601b9e27d85 h1:hn5h9pOszpINf3ZUeAidC/kb+gxB02tqRnrzNRwPvXI= +github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20250320083338-1601b9e27d85/go.mod h1:g5RiEG3qLdo0/6ULdxWcfVjtDlJlxfmEvk8HnvVKgI8= +github.com/openshift/kubernetes/staging/src/k8s.io/cri-client v0.0.0-20250320083338-1601b9e27d85 h1:rezfmUGf1Sj9Lmz7Q1MsQIWcNxBNPTvrtleicSQuFXQ= +github.com/openshift/kubernetes/staging/src/k8s.io/cri-client v0.0.0-20250320083338-1601b9e27d85/go.mod h1:oQJlDoAmJUdRvVPlROBdBIfceFKY2TRjK0ah+6I8jAI= +github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20250320083338-1601b9e27d85 h1:k4eO2YBKbTeG0MySQ3b2bLLDCy1Lw7MIyxjNZfUdK5M= +github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20250320083338-1601b9e27d85/go.mod h1:oGwCVeWGOp5ydzljG3HwHeYSTg0AkU8AVVI6WA7p/NQ= +github.com/openshift/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20250320083338-1601b9e27d85 h1:omio1ScjnOTNqzMmY2ez6Qp7pmxHbD+fhfLnL15Usc0= +github.com/openshift/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20250320083338-1601b9e27d85/go.mod h1:lHc2r9QqTP5jQ/qzHRjEjvq+P2pMIJxH3pW2OIeKGm4= +github.com/openshift/kubernetes/staging/src/k8s.io/externaljwt v0.0.0-20250320083338-1601b9e27d85 h1:O7c5bEDEOa0J8RL3LufaiCMPGN3liP9izmXfwc4WyUg= +github.com/openshift/kubernetes/staging/src/k8s.io/externaljwt v0.0.0-20250320083338-1601b9e27d85/go.mod h1:cYoh1BjUit+0mmm1QFiHHIvFRp5NuJ/PtXFjqU6lG2I= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20250320083338-1601b9e27d85 h1:O/XgqIVajX2ykcSmYCHIp0cpdXBra1LOwG468pb8xHM= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20250320083338-1601b9e27d85/go.mod h1:0G+pIl5ZSz2OJTbaNrYgYDas3AAuLuyb6f7yp67vUc0= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20250320083338-1601b9e27d85 h1:vMjBGugHVJS5k01poMESR7Ms2EeQtnWmHLGXEBVuHNM= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20250320083338-1601b9e27d85/go.mod h1:g8ueMAVzUqNpdqLNTOC39CTx0uw8xOA8Rr6YYN9Csmc= +github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20250320083338-1601b9e27d85 h1:HwVTbBCjkjW0RC+bvLqnQ2JmqHRCvWu0BvlLHourLZo= +github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20250320083338-1601b9e27d85/go.mod h1:eUOhccfuBuDXD7CuDtMfg3phiyI+FMtOQV60/SOPw1k= +github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20250320083338-1601b9e27d85 h1:+pWgwTCic60tr3fiGJgnxXEXNmV/7xD2JwsxESPrBQU= +github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20250320083338-1601b9e27d85/go.mod h1:KDLFmWxF6aaZOjAfLBQa6WKAgolILaDhFOHAVXhtPkI= +github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20250320083338-1601b9e27d85 h1:FZPHLQt8Lq+qND1lvXHN30BCQtx7emC0d7Imp+pYSyI= +github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20250320083338-1601b9e27d85/go.mod h1:Kun5c2svjAPx0nnvJKYQWhfeNW+O0EpzHgRhDcYoSY0= +github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20250320083338-1601b9e27d85 h1:MZcpICTeQE+wEfe19XjQj9TnTROW6+DucCV0h3T0Z9I= +github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20250320083338-1601b9e27d85/go.mod h1:mwfS00zLOJUpOmV4dAXY/mstuDnRfVI+094R5F41K00= +github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20250320083338-1601b9e27d85 h1:FHkpX5RnBovG459kCHC7IJwD0zJmmiP80DApUmaJrxY= +github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20250320083338-1601b9e27d85/go.mod h1:owAehkeYLq2tHccss/uBNAaqS7UtrFlgkK40soLfHLc= github.com/openshift/library-go v0.0.0-20250129210218-fe56c2cf5d70 h1:VLj8CU9q009xlMuR4wNcqDX4lVa2Ji3u/iYnBLHtQUc= github.com/openshift/library-go v0.0.0-20250129210218-fe56c2cf5d70/go.mod h1:TQx0VEhZ/92qRXIMDu2Wg4bUPmw5HRNE6wpSZ+IsP0Y= github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI= diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/node/minimum_kubelet_version.go b/vendor/github.com/openshift/library-go/pkg/apiserver/node/minimum_kubelet_version.go new file mode 100644 index 000000000000..208adb815a46 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/node/minimum_kubelet_version.go @@ -0,0 +1,80 @@ +package node + +import ( + "fmt" + "strings" + + "github.com/blang/semver/v4" + + corev1 "k8s.io/api/core/v1" +) + +// An error to be returned when kubelet version is lower than specified minimum kubelet version. +// Used to differentiate between a parsing failure, and a failure because the kubelet is out of date. +var ErrKubeletOutdated = fmt.Errorf("kubelet version is outdated") + +// ValidateMinimumKubeletVersion takes a list of nodes and a currently set min version. +// It parses the min version and iterates through the nodes, comparing the version of the kubelets +// to the min version. +// It will error if any nodes are older than the min version. +func ValidateMinimumKubeletVersion(nodes []*corev1.Node, minimumKubeletVersion string) error { + // unset, no error + if minimumKubeletVersion == "" { + return nil + } + + version, err := semver.Parse(minimumKubeletVersion) + if err != nil { + return fmt.Errorf("failed to parse submitted version %s %v", minimumKubeletVersion, err.Error()) + } + + for _, node := range nodes { + if err := IsNodeTooOld(node, &version); err != nil { + return err + } + } + return nil +} + +// IsNodeTooOld answers that very question. It takes a node object and a minVersion, +// parses each into a semver version, and then determines whether the version of the kubelet on the +// node is older than min version. +// When the node is too old, it returns the error ErrKubeletOutdated. If a different error occurs, an error is returned. +// If the node is new enough and no error happens, nil is returned. +func IsNodeTooOld(node *corev1.Node, minVersion *semver.Version) error { + return IsKubeletVersionTooOld(node.Status.NodeInfo.KubeletVersion, minVersion) +} + +// IsKubeletVerisionTooOld answers that very question. It takes a kubelet version and a minVersion, +// parses each into a semver version, and then determines whether the version of the kubelet on the +// node is older than min version. +// It will fail if the minVersion is nil, if the kubeletVersion is invalid, or if the minVersion is greater than +// the kubeletVersion +// When the kubelet is too old, it returns the error ErrKubeletOutdated. If a different error occurs, an error is returned. +// If the node is new enough and no error happens, nil is returned. +func IsKubeletVersionTooOld(kubeletVersion string, minVersion *semver.Version) error { + if minVersion == nil { + return fmt.Errorf("given minimum version is nil") + } + version, err := ParseKubeletVersion(kubeletVersion) + if err != nil { + return fmt.Errorf("failed to parse node version %s: %v", kubeletVersion, err) + } + if minVersion.GT(*version) { + return fmt.Errorf("%w: kubelet version is %v, which is lower than minimumKubeletVersion of %v", ErrKubeletOutdated, *version, *minVersion) + } + return nil +} + +// ParseKubeletVersion parses it into a semver.Version object, stripping +// any information in the version that isn't "major.minor.patch". +func ParseKubeletVersion(kubeletVersion string) (*semver.Version, error) { + version, err := semver.Parse(strings.TrimPrefix(kubeletVersion, "v")) + if err != nil { + return nil, err + } + + version.Pre = nil + version.Build = nil + return &version, nil +} diff --git a/vendor/k8s.io/api/resource/v1alpha3/types.go b/vendor/k8s.io/api/resource/v1alpha3/types.go index fb4d7041dbdf..49d7c86de317 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/types.go +++ b/vendor/k8s.io/api/resource/v1alpha3/types.go @@ -145,6 +145,10 @@ type ResourceSliceSpec struct { Devices []Device `json:"devices" protobuf:"bytes,6,name=devices"` } +// DriverNameMaxLength is the maximum valid length of a driver name in the +// ResourceSliceSpec and other places. It's the same as for CSI driver names. +const DriverNameMaxLength = 63 + // ResourcePool describes the pool that ResourceSlices belong to. type ResourcePool struct { // Name is used to identify the pool. For node-local devices, this diff --git a/vendor/k8s.io/api/resource/v1beta1/types.go b/vendor/k8s.io/api/resource/v1beta1/types.go index ca79c5a66403..fbdc35ca86b2 100644 --- a/vendor/k8s.io/api/resource/v1beta1/types.go +++ b/vendor/k8s.io/api/resource/v1beta1/types.go @@ -144,6 +144,10 @@ type ResourceSliceSpec struct { Devices []Device `json:"devices" protobuf:"bytes,6,name=devices"` } +// DriverNameMaxLength is the maximum valid length of a driver name in the +// ResourceSliceSpec and other places. It's the same as for CSI driver names. +const DriverNameMaxLength = 63 + // ResourcePool describes the pool that ResourceSlices belong to. type ResourcePool struct { // Name is used to identify the pool. For node-local devices, this diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/resourcequota/admission.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/resourcequota/admission.go index e7d59bb707da..5455b414eda6 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/resourcequota/admission.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/resourcequota/admission.go @@ -114,7 +114,9 @@ func (a *QuotaAdmission) SetExternalKubeClientSet(client kubernetes.Interface) { // SetExternalKubeInformerFactory registers an informer factory into QuotaAdmission func (a *QuotaAdmission) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) { - a.quotaAccessor.lister = f.Core().V1().ResourceQuotas().Lister() + quotas := f.Core().V1().ResourceQuotas() + a.quotaAccessor.lister = quotas.Lister() + a.quotaAccessor.hasSynced = quotas.Informer().HasSynced } // SetQuotaConfiguration assigns and initializes configuration and evaluator for QuotaAdmission @@ -144,6 +146,9 @@ func (a *QuotaAdmission) ValidateInitialization() error { if a.quotaAccessor.lister == nil { return fmt.Errorf("missing quotaAccessor.lister") } + if a.quotaAccessor.hasSynced == nil { + return fmt.Errorf("missing quotaAccessor.hasSynced") + } if a.quotaConfiguration == nil { return fmt.Errorf("missing quotaConfiguration") } diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/resourcequota/resource_access.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/resourcequota/resource_access.go index d189446f032a..fd4c102e6d52 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/resourcequota/resource_access.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/resourcequota/resource_access.go @@ -48,6 +48,9 @@ type quotaAccessor struct { // lister can list/get quota objects from a shared informer's cache lister corev1listers.ResourceQuotaLister + // hasSynced indicates whether the lister has completed its initial sync + hasSynced func() bool + // liveLookups holds the last few live lookups we've done to help ammortize cost on repeated lookup failures. // This lets us handle the case of latent caches, by looking up actual results for a namespace on cache miss/no results. // We track the lookup result here so that for repeated requests, we don't look it up very often. @@ -112,8 +115,8 @@ func (e *quotaAccessor) GetQuotas(namespace string) ([]corev1.ResourceQuota, err return nil, fmt.Errorf("error resolving quota: %v", err) } - // if there are no items held in our indexer, check our live-lookup LRU, if that misses, do the live lookup to prime it. - if len(items) == 0 { + // if there are no items held in our unsynced lister, check our live-lookup LRU, if that misses, do the live lookup to prime it. + if len(items) == 0 && !e.hasSynced() { lruItemObj, ok := e.liveLookupCache.Get(namespace) if !ok || lruItemObj.(liveLookupEntry).expiry.Before(time.Now()) { // use singleflight.Group to avoid flooding the apiserver with repeated diff --git a/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiserver/pkg/features/kube_features.go index bbba688fbb80..c23343346e46 100644 --- a/vendor/k8s.io/apiserver/pkg/features/kube_features.go +++ b/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -371,7 +371,7 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate }, ResilientWatchCacheInitialization: { - {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, }, RetryGenerateName: { diff --git a/vendor/k8s.io/apiserver/pkg/util/proxy/streamtranslator.go b/vendor/k8s.io/apiserver/pkg/util/proxy/streamtranslator.go index 6dabc1c7b4ab..6593a3ed9667 100644 --- a/vendor/k8s.io/apiserver/pkg/util/proxy/streamtranslator.go +++ b/vendor/k8s.io/apiserver/pkg/util/proxy/streamtranslator.go @@ -21,6 +21,7 @@ import ( "net/http" "net/url" "strconv" + "time" "github.com/mxk/go-flowrate/flowrate" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -70,7 +71,7 @@ func (h *StreamTranslatorHandler) ServeHTTP(w http.ResponseWriter, req *http.Req defer websocketStreams.conn.Close() // Creating SPDY executor, ensuring redirects are not followed. - spdyRoundTripper, err := spdy.NewRoundTripperWithConfig(spdy.RoundTripperConfig{UpgradeTransport: h.Transport}) + spdyRoundTripper, err := spdy.NewRoundTripperWithConfig(spdy.RoundTripperConfig{UpgradeTransport: h.Transport, PingPeriod: 5 * time.Second}) if err != nil { websocketStreams.writeStatus(apierrors.NewInternalError(err)) //nolint:errcheck metrics.IncStreamTranslatorRequest(req.Context(), strconv.Itoa(http.StatusInternalServerError)) diff --git a/vendor/k8s.io/dynamic-resource-allocation/cel/cache.go b/vendor/k8s.io/dynamic-resource-allocation/cel/cache.go index 2868886c5bbd..e807ba2b9ec6 100644 --- a/vendor/k8s.io/dynamic-resource-allocation/cel/cache.go +++ b/vendor/k8s.io/dynamic-resource-allocation/cel/cache.go @@ -43,6 +43,8 @@ func NewCache(maxCacheEntries int) *Cache { // GetOrCompile checks whether the cache already has a compilation result // and returns that if available. Otherwise it compiles, stores successful // results and returns the new result. +// +// Cost estimation is disabled. func (c *Cache) GetOrCompile(expression string) CompilationResult { // Compiling a CEL expression is expensive enough that it is cheaper // to lock a mutex than doing it several times in parallel. @@ -55,7 +57,7 @@ func (c *Cache) GetOrCompile(expression string) CompilationResult { return *cached } - expr := GetCompiler().CompileCELExpression(expression, Options{}) + expr := GetCompiler().CompileCELExpression(expression, Options{DisableCostEstimation: true}) if expr.Error == nil { c.add(expression, &expr) } diff --git a/vendor/k8s.io/dynamic-resource-allocation/cel/compile.go b/vendor/k8s.io/dynamic-resource-allocation/cel/compile.go index 9ad4bed0f675..d59f7d7d4bb4 100644 --- a/vendor/k8s.io/dynamic-resource-allocation/cel/compile.go +++ b/vendor/k8s.io/dynamic-resource-allocation/cel/compile.go @@ -20,12 +20,14 @@ import ( "context" "errors" "fmt" + "math" "reflect" "strings" "sync" "github.com/blang/semver/v4" "github.com/google/cel-go/cel" + "github.com/google/cel-go/checker" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" @@ -50,6 +52,23 @@ const ( var ( lazyCompilerInit sync.Once lazyCompiler *compiler + + // A variant of AnyType = https://github.com/kubernetes/kubernetes/blob/ec2e0de35a298363872897e5904501b029817af3/staging/src/k8s.io/apiserver/pkg/cel/types.go#L550: + // unknown actual type (could be bool, int, string, etc.) but with a known maximum size. + attributeType = withMaxElements(apiservercel.AnyType, resourceapi.DeviceAttributeMaxValueLength) + + // Other strings also have a known maximum size. + domainType = withMaxElements(apiservercel.StringType, resourceapi.DeviceMaxDomainLength) + idType = withMaxElements(apiservercel.StringType, resourceapi.DeviceMaxIDLength) + driverType = withMaxElements(apiservercel.StringType, resourceapi.DriverNameMaxLength) + + // Each map is bound by the maximum number of different attributes. + innerAttributesMapType = apiservercel.NewMapType(idType, attributeType, resourceapi.ResourceSliceMaxAttributesAndCapacitiesPerDevice) + outerAttributesMapType = apiservercel.NewMapType(domainType, innerAttributesMapType, resourceapi.ResourceSliceMaxAttributesAndCapacitiesPerDevice) + + // Same for capacity. + innerCapacityMapType = apiservercel.NewMapType(idType, apiservercel.QuantityDeclType, resourceapi.ResourceSliceMaxAttributesAndCapacitiesPerDevice) + outerCapacityMapType = apiservercel.NewMapType(domainType, innerCapacityMapType, resourceapi.ResourceSliceMaxAttributesAndCapacitiesPerDevice) ) func GetCompiler() *compiler { @@ -85,11 +104,12 @@ type Device struct { } type compiler struct { - envset *environment.EnvSet -} - -func newCompiler() *compiler { - return &compiler{envset: mustBuildEnv()} + // deviceType is a definition for the type of the `device` variable. + // This is needed for the cost estimator. Both are currently version-independent. + // If that ever changes, some additional logic might be needed to make + // cost estimates version-dependent. + deviceType *apiservercel.DeclType + envset *environment.EnvSet } // Options contains several additional parameters @@ -101,6 +121,10 @@ type Options struct { // CostLimit allows overriding the default runtime cost limit [resourceapi.CELSelectorExpressionMaxCost]. CostLimit *uint64 + + // DisableCostEstimation can be set to skip estimating the worst-case CEL cost. + // If disabled or after an error, [CompilationResult.MaxCost] will be set to [math.Uint64]. + DisableCostEstimation bool } // CompileCELExpression returns a compiled CEL expression. It evaluates to bool. @@ -114,6 +138,7 @@ func (c compiler) CompileCELExpression(expression string, options Options) Compi Detail: errorString, }, Expression: expression, + MaxCost: math.MaxUint64, } } @@ -122,10 +147,6 @@ func (c compiler) CompileCELExpression(expression string, options Options) Compi return resultError(fmt.Sprintf("unexpected error loading CEL environment: %v", err), apiservercel.ErrorTypeInternal) } - // We don't have a SizeEstimator. The potential size of the input (= a - // device) is already declared in the definition of the environment. - estimator := &library.CostEstimator{} - ast, issues := env.Compile(expression) if issues != nil { return resultError("compilation failed: "+issues.String(), apiservercel.ErrorTypeInvalid) @@ -157,18 +178,28 @@ func (c compiler) CompileCELExpression(expression string, options Options) Compi OutputType: ast.OutputType(), Environment: env, emptyMapVal: env.CELTypeAdapter().NativeToValue(map[string]any{}), + MaxCost: math.MaxUint64, } - costEst, err := env.EstimateCost(ast, estimator) - if err != nil { - compilationResult.Error = &apiservercel.Error{Type: apiservercel.ErrorTypeInternal, Detail: "cost estimation failed: " + err.Error()} - return compilationResult + if !options.DisableCostEstimation { + // We don't have a SizeEstimator. The potential size of the input (= a + // device) is already declared in the definition of the environment. + estimator := c.newCostEstimator() + costEst, err := env.EstimateCost(ast, estimator) + if err != nil { + compilationResult.Error = &apiservercel.Error{Type: apiservercel.ErrorTypeInternal, Detail: "cost estimation failed: " + err.Error()} + return compilationResult + } + compilationResult.MaxCost = costEst.Max } - compilationResult.MaxCost = costEst.Max return compilationResult } +func (c *compiler) newCostEstimator() *library.CostEstimator { + return &library.CostEstimator{SizeEstimator: &sizeEstimator{compiler: c}} +} + // getAttributeValue returns the native representation of the one value that // should be stored in the attribute, otherwise an error. An error is // also returned when there is no supported value. @@ -241,7 +272,7 @@ func (c CompilationResult) DeviceMatches(ctx context.Context, input Device) (boo return resultBool, details, nil } -func mustBuildEnv() *environment.EnvSet { +func newCompiler() *compiler { envset := environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true /* strictCost */) field := func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField { return apiservercel.NewDeclField(name, declType, required, nil, nil) @@ -253,10 +284,11 @@ func mustBuildEnv() *environment.EnvSet { } return result } + deviceType := apiservercel.NewObjectType("kubernetes.DRADevice", fields( - field(driverVar, apiservercel.StringType, true), - field(attributesVar, apiservercel.NewMapType(apiservercel.StringType, apiservercel.NewMapType(apiservercel.StringType, apiservercel.AnyType, resourceapi.ResourceSliceMaxAttributesAndCapacitiesPerDevice), resourceapi.ResourceSliceMaxAttributesAndCapacitiesPerDevice), true), - field(capacityVar, apiservercel.NewMapType(apiservercel.StringType, apiservercel.NewMapType(apiservercel.StringType, apiservercel.QuantityDeclType, resourceapi.ResourceSliceMaxAttributesAndCapacitiesPerDevice), resourceapi.ResourceSliceMaxAttributesAndCapacitiesPerDevice), true), + field(driverVar, driverType, true), + field(attributesVar, outerAttributesMapType, true), + field(capacityVar, outerCapacityMapType, true), )) versioned := []environment.VersionedOptions{ @@ -284,7 +316,13 @@ func mustBuildEnv() *environment.EnvSet { if err != nil { panic(fmt.Errorf("internal error building CEL environment: %w", err)) } - return envset + return &compiler{envset: envset, deviceType: deviceType} +} + +func withMaxElements(in *apiservercel.DeclType, maxElements uint64) *apiservercel.DeclType { + out := *in + out.MaxElements = int64(maxElements) + return &out } // parseQualifiedName splits into domain and identified, using the default domain @@ -322,3 +360,67 @@ func (m mapper) Find(key ref.Val) (ref.Val, bool) { return m.defaultValue, true } + +// sizeEstimator tells the cost estimator the maximum size of maps or strings accessible through the `device` variable. +// Without this, the maximum string size of e.g. `device.attributes["dra.example.com"].services` would be unknown. +// +// sizeEstimator is derived from the sizeEstimator in k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel. +type sizeEstimator struct { + compiler *compiler +} + +func (s *sizeEstimator) EstimateSize(element checker.AstNode) *checker.SizeEstimate { + path := element.Path() + if len(path) == 0 { + // Path() can return an empty list, early exit if it does since we can't + // provide size estimates when that happens + return nil + } + + // The estimator provides information about the environment's variable(s). + var currentNode *apiservercel.DeclType + switch path[0] { + case deviceVar: + currentNode = s.compiler.deviceType + default: + // Unknown root, shouldn't happen. + return nil + } + + // Cut off initial variable from path, it was checked above. + for _, name := range path[1:] { + switch name { + case "@items", "@values": + if currentNode.ElemType == nil { + return nil + } + currentNode = currentNode.ElemType + case "@keys": + if currentNode.KeyType == nil { + return nil + } + currentNode = currentNode.KeyType + default: + field, ok := currentNode.Fields[name] + if !ok { + // If this is an attribute map, then we know that all elements + // have the same maximum size as set in attributeType, regardless + // of their name. + if currentNode.ElemType == attributeType { + currentNode = attributeType + continue + } + return nil + } + if field.Type == nil { + return nil + } + currentNode = field.Type + } + } + return &checker.SizeEstimate{Min: 0, Max: uint64(currentNode.MaxElements)} +} + +func (s *sizeEstimator) EstimateCallCost(function, overloadID string, target *checker.AstNode, args []checker.AstNode) *checker.CallEstimate { + return nil +} diff --git a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/options/completion.go b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/options/completion.go index 0b83e79a9edd..89e2dbf9e415 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/options/completion.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/options/completion.go @@ -57,7 +57,7 @@ func (s *ServerRunOptions) Complete(ctx context.Context) (CompletedOptions, erro if err != nil { return CompletedOptions{}, err } - controlplane, err := s.Options.Complete(ctx, s.Flags(), []string{"kubernetes.default.svc", "kubernetes.default", "kubernetes"}, []net.IP{apiServerServiceIP}) + controlplane, err := s.Options.Complete(ctx, []string{"kubernetes.default.svc", "kubernetes.default", "kubernetes"}, []net.IP{apiServerServiceIP}) if err != nil { return CompletedOptions{}, err } diff --git a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go index ae2259c26c38..2b024e0c49bb 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go @@ -52,6 +52,7 @@ import ( "k8s.io/component-base/term" utilversion "k8s.io/component-base/version" "k8s.io/component-base/version/verflag" + "k8s.io/component-base/zpages/flagz" "k8s.io/klog/v2" aggregatorapiserver "k8s.io/kube-aggregator/pkg/apiserver" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" @@ -161,6 +162,9 @@ cluster's shared state through which all other components interact.`, fs := cmd.Flags() namedFlagSets := s.Flags() + s.Flagz = flagz.NamedFlagSetsReader{ + FlagSets: namedFlagSets, + } verflag.AddFlags(namedFlagSets.FlagSet("global")) globalflag.AddGlobalFlags(namedFlagSets.FlagSet("global"), cmd.Name(), logs.SkipLoggingConfigurationFlags()) options.AddCustomGlobalFlags(namedFlagSets.FlagSet("generic")) diff --git a/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/generated/zz_generated.annotations.go b/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/generated/zz_generated.annotations.go index 5adb98f63471..b2eb2f1e27e8 100644 --- a/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/generated/zz_generated.annotations.go +++ b/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/generated/zz_generated.annotations.go @@ -227,6 +227,8 @@ var Annotations = map[string]string{ "[sig-api-machinery] OpenAPIV3 should round trip OpenAPI V3 for all built-in group versions": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-api-machinery] OrderedNamespaceDeletion namespace deletion should delete pod first [Feature:OrderedNamespaceDeletion] [FeatureGate:OrderedNamespaceDeletion] [Beta]": " [Disabled:Alpha] [Suite:k8s]", + "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -749,7 +751,7 @@ var Annotations = map[string]string{ "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with short downscale stabilization window should scale down soon after the stabilization period": " [Suite:k8s]", - "[sig-cli] Kubectl Port forwarding Shutdown client connection while the remote stream is writing data to the port-forward connection port-forward should keep working after detect broken connection": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-cli] Kubectl Port forwarding Shutdown client connection while the remote stream is writing data to the port-forward connection port-forward should keep working after detect broken connection": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 should support forwarding over websockets": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -845,6 +847,8 @@ var Annotations = map[string]string{ "[sig-cli] Kubectl client Simple pod Kubectl run running a successful command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-cli] Kubectl client Simple pod [Slow] should support exec idle connections": " [Suite:k8s]", + "[sig-cli] Kubectl client Simple pod should contain last line of the log": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-cli] Kubectl client Simple pod should return command exit codes execing into a container with a failing command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -1757,7 +1761,7 @@ var Annotations = map[string]string{ "[sig-node] PodOSRejection [NodeConformance] Kubelet [LinuxOnly] should reject pod when the node OS doesn't match pod's OS": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-node] PodRejectionStatus Kubelet should reject pod when the node didn't have enough resource": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-node] PodRejectionStatus Kubelet should reject pod when the node didn't have enough resource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-node] PodTemplates should delete a collection of pod templates [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", @@ -2223,17 +2227,17 @@ var Annotations = map[string]string{ "[sig-storage] CSI Mock volume expansion CSI online volume expansion with secret should expand volume without restarting pod if attach=on, nodeExpansion=on, csiNodeExpandSecret=on": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] recovery should be possible for node-only expanded volumes with final error": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] recovery should be possible for node-only expanded volumes with final error": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] recovery should be possible for node-only expanded volumes with infeasible error": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] recovery should be possible for node-only expanded volumes with infeasible error": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] recovery should not be possible in partially expanded volumes": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] recovery should not be possible in partially expanded volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] should allow recovery if controller expansion fails with final error": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] should allow recovery if controller expansion fails with final error": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] should allow recovery if controller expansion fails with infeasible error": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] should allow recovery if controller expansion fails with infeasible error": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] should record target size in allocated resources": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] should record target size in allocated resources": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should not update fsGroup if update from File to None": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -2319,7 +2323,7 @@ var Annotations = map[string]string{ "[sig-storage] CSI Mock workload info CSI workload information using mock driver should not be passed when podInfoOnMount=false": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:SpecialConfig] [Suite:k8s]", "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -2913,7 +2917,7 @@ var Annotations = map[string]string{ "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Skipped:gce] [Suite:k8s]", + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read-only inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -3737,7 +3741,7 @@ var Annotations = map[string]string{ "[sig-storage] HostPathType Socket [Slow] Should fail on mounting socket 'asocket' when HostPathType is HostPathFile": " [Suite:k8s]", - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -4267,7 +4271,7 @@ var Annotations = map[string]string{ "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", - "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -4801,7 +4805,7 @@ var Annotations = map[string]string{ "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5331,7 +5335,7 @@ var Annotations = map[string]string{ "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", - "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5865,7 +5869,7 @@ var Annotations = map[string]string{ "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", - "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -6399,7 +6403,7 @@ var Annotations = map[string]string{ "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", - "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -6933,7 +6937,7 @@ var Annotations = map[string]string{ "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", - "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:Broken] [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:Broken] [Suite:k8s]", "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Disabled:Broken] [Suite:k8s]", @@ -7463,7 +7467,7 @@ var Annotations = map[string]string{ "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Disabled:Broken] [Suite:k8s]", - "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7993,7 +7997,7 @@ var Annotations = map[string]string{ "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", - "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8527,7 +8531,7 @@ var Annotations = map[string]string{ "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", - "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -9061,7 +9065,7 @@ var Annotations = map[string]string{ "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", - "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -9595,7 +9599,7 @@ var Annotations = map[string]string{ "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", - "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -10129,7 +10133,7 @@ var Annotations = map[string]string{ "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", - "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -10663,7 +10667,7 @@ var Annotations = map[string]string{ "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", - "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Skipped:gce] [Suite:k8s]", + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -11197,7 +11201,7 @@ var Annotations = map[string]string{ "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Skipped:gce] [Suite:k8s]", - "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -11731,7 +11735,7 @@ var Annotations = map[string]string{ "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", - "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -12265,7 +12269,7 @@ var Annotations = map[string]string{ "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", - "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -12805,6 +12809,600 @@ var Annotations = map[string]string{ "[sig-storage] NFSPersistentVolumes [Disruptive] [Flaky] when kubelet restarts Should test that a volume mounted to a pod that is force deleted while the kubelet is down unmounts when the kubelet returns.": " [Serial] [Suite:k8s]", + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: (delete policy)] volumegroupsnapshottable [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots for multiple volumes in a pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] pvc-deletion-performance should delete volumes at scale within performance constraints [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], rwop pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] read-write-once-pod [MinimumKubeletVersion:1.27] should block a second pod from using an in-use ReadWriteOncePod volume on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] read-write-once-pod [MinimumKubeletVersion:1.27] should preempt lower priority pods using ReadWriteOncePod volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem volmode)] volume-lifecycle-performance should provision volumes at scale within performance constraints [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable-stress [Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable-stress [Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Ephemeral Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Ephemeral Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Ephemeral Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Ephemeral Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-storage] PV Protection Verify \"immediate\" deletion of a PV that is not bound to a PVC": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-storage] PV Protection Verify that PV bound to a PVC is not removed immediately": " [Suite:openshift/conformance/parallel] [Suite:k8s]", diff --git a/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go b/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go index 42676c8ce51c..52339d3a87a9 100644 --- a/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go +++ b/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go @@ -26,6 +26,7 @@ var ( `\[Feature:RelaxedDNSSearchValidation\]`, `\[Feature:PodLogsQuerySplitStreams\]`, `\[Feature:PodLifecycleSleepActionAllowZero\]`, + `\[Feature:OrderedNamespaceDeletion\]`, // disabled Beta }, // tests for features that are not implemented in openshift "[Disabled:Unimplemented]": { @@ -66,6 +67,11 @@ var ( // host. Enabling the test would result in the bastion being created for every parallel test execution. // Given that we have existing oc and WMCO tests that cover this functionality, we can safely disable it. `\[Feature:NodeLogQuery\]`, + + // volumegroupsnapshot in csi-hostpath tests requires changes in the test yaml files, + // which are done by a script upstream. In OCP, we added a separate driver csi-hostpath-groupsnapshot, + // that will not be skipped by any rule here. + `\[Driver: csi-hostpath\].*\[Feature:volumegroupsnapshot\]`, }, // tests that are known broken and need to be fixed upstream or in openshift // always add an issue here @@ -169,22 +175,8 @@ var ( // https://issues.redhat.com/browse/OCPBUGS-17194 `\[sig-node\] ImageCredentialProvider \[Feature:KubeletCredentialProviders\] should be able to create pod with image credentials fetched from external credential provider`, - // https://issues.redhat.com/browse/OCPBUGS-45214 - // Even though this feature is not GA in k/k, it will be GA in OCP 4.19, so we should fix it and unskip this test - `\[Feature:volumegroupsnapshot\]`, - // https://issues.redhat.com/browse/OCPBUGS-45273 `\[sig-network\] Services should implement NodePort and HealthCheckNodePort correctly when ExternalTrafficPolicy changes`, - - // https://issues.redhat.com/browse/OCPBUGS-45273 - `\[sig-cli\] Kubectl Port forwarding Shutdown client connection while the remote stream is writing data to the port-forward connection port-forward should keep working after detect broken connection`, - - // https://issues.redhat.com/browse/OCPBUGS-45274 - // https://github.com/kubernetes/kubernetes/issues/129056 - `\[sig-node\] PodRejectionStatus Kubelet should reject pod when the node didn't have enough resource`, - - // https://issues.redhat.com/browse/OCPBUGS-45359 - `\[Feature:RecoverVolumeExpansionFailure\]`, }, // tests that may work, but we don't support them "[Disabled:Unsupported]": { diff --git a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/node/restrict_extreme_worker_latency_profile.go b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/node/restrict_extreme_worker_latency_profile.go deleted file mode 100644 index b4b63914f8d7..000000000000 --- a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/node/restrict_extreme_worker_latency_profile.go +++ /dev/null @@ -1,124 +0,0 @@ -package node - -import ( - "context" - "fmt" - "io" - - "k8s.io/apimachinery/pkg/api/validation" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/apiserver/pkg/admission" - - configv1 "github.com/openshift/api/config/v1" - "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" -) - -var rejectionScenarios = []struct { - fromProfile configv1.WorkerLatencyProfileType - toProfile configv1.WorkerLatencyProfileType -}{ - {fromProfile: "", toProfile: configv1.LowUpdateSlowReaction}, - {fromProfile: configv1.LowUpdateSlowReaction, toProfile: ""}, - {fromProfile: configv1.DefaultUpdateDefaultReaction, toProfile: configv1.LowUpdateSlowReaction}, - {fromProfile: configv1.LowUpdateSlowReaction, toProfile: configv1.DefaultUpdateDefaultReaction}, -} - -const PluginName = "config.openshift.io/RestrictExtremeWorkerLatencyProfile" - -// Register registers a plugin -func Register(plugins *admission.Plugins) { - plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { - return customresourcevalidation.NewValidator( - map[schema.GroupResource]bool{ - configv1.Resource("nodes"): true, - }, - map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ - configv1.GroupVersion.WithKind("Node"): configNodeV1{}, - }) - }) -} - -func toConfigNodeV1(uncastObj runtime.Object) (*configv1.Node, field.ErrorList) { - if uncastObj == nil { - return nil, nil - } - - allErrs := field.ErrorList{} - - obj, ok := uncastObj.(*configv1.Node) - if !ok { - return nil, append(allErrs, - field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Node"}), - field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) - } - - return obj, nil -} - -type configNodeV1 struct{} - -func validateConfigNodeForExtremeLatencyProfile(obj, oldObj *configv1.Node) *field.Error { - fromProfile := oldObj.Spec.WorkerLatencyProfile - toProfile := obj.Spec.WorkerLatencyProfile - - for _, rejectionScenario := range rejectionScenarios { - if fromProfile == rejectionScenario.fromProfile && toProfile == rejectionScenario.toProfile { - return field.Invalid(field.NewPath("spec", "workerLatencyProfile"), obj.Spec.WorkerLatencyProfile, - fmt.Sprintf( - "cannot update worker latency profile from %q to %q as extreme profile transition is unsupported, please select any other profile with supported transition such as %q", - oldObj.Spec.WorkerLatencyProfile, - obj.Spec.WorkerLatencyProfile, - configv1.MediumUpdateAverageReaction, - ), - ) - } - } - return nil -} - -func (configNodeV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { - obj, allErrs := toConfigNodeV1(uncastObj) - if len(allErrs) > 0 { - return allErrs - } - - allErrs = append(allErrs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) - - return allErrs -} - -func (configNodeV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { - obj, allErrs := toConfigNodeV1(uncastObj) - if len(allErrs) > 0 { - return allErrs - } - oldObj, allErrs := toConfigNodeV1(uncastOldObj) - if len(allErrs) > 0 { - return allErrs - } - - allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) - if err := validateConfigNodeForExtremeLatencyProfile(obj, oldObj); err != nil { - allErrs = append(allErrs, err) - } - - return allErrs -} - -func (configNodeV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { - obj, errs := toConfigNodeV1(uncastObj) - if len(errs) > 0 { - return errs - } - oldObj, errs := toConfigNodeV1(uncastOldObj) - if len(errs) > 0 { - return errs - } - - // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. - errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) - - return errs -} diff --git a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/node/validate_node_config.go b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/node/validate_node_config.go new file mode 100644 index 000000000000..355317a362fc --- /dev/null +++ b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/node/validate_node_config.go @@ -0,0 +1,218 @@ +package node + +import ( + "context" + "errors" + "fmt" + "io" + + configv1 "github.com/openshift/api/config/v1" + nodelib "github.com/openshift/library-go/pkg/apiserver/node" + + openshiftfeatures "github.com/openshift/api/features" + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/informers" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/component-base/featuregate" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +var rejectionScenarios = []struct { + fromProfile configv1.WorkerLatencyProfileType + toProfile configv1.WorkerLatencyProfileType +}{ + {fromProfile: "", toProfile: configv1.LowUpdateSlowReaction}, + {fromProfile: configv1.LowUpdateSlowReaction, toProfile: ""}, + {fromProfile: configv1.DefaultUpdateDefaultReaction, toProfile: configv1.LowUpdateSlowReaction}, + {fromProfile: configv1.LowUpdateSlowReaction, toProfile: configv1.DefaultUpdateDefaultReaction}, +} + +const PluginName = "config.openshift.io/ValidateConfigNodeV1" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + ret := &configNodeV1Wrapper{} + delegate, err := customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.Resource("nodes"): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("Node"): &configNodeV1{ + nodeListerFn: ret.getNodeLister, + waitForNodeInformerSyncedFn: ret.waitForNodeInformerSyncedFn, + minimumKubeletVersionEnabled: feature.DefaultFeatureGate.Enabled(featuregate.Feature(openshiftfeatures.FeatureGateMinimumKubeletVersion)), + }, + }) + if err != nil { + return nil, err + } + ret.delegate = delegate + return ret, nil + }) +} + +func toConfigNodeV1(uncastObj runtime.Object) (*configv1.Node, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.Node) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Node"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type configNodeV1 struct { + nodeListerFn func() corev1listers.NodeLister + waitForNodeInformerSyncedFn func() bool + minimumKubeletVersionEnabled bool +} + +func validateConfigNodeForExtremeLatencyProfile(obj, oldObj *configv1.Node) *field.Error { + fromProfile := oldObj.Spec.WorkerLatencyProfile + toProfile := obj.Spec.WorkerLatencyProfile + + for _, rejectionScenario := range rejectionScenarios { + if fromProfile == rejectionScenario.fromProfile && toProfile == rejectionScenario.toProfile { + return field.Invalid(field.NewPath("spec", "workerLatencyProfile"), obj.Spec.WorkerLatencyProfile, + fmt.Sprintf( + "cannot update worker latency profile from %q to %q as extreme profile transition is unsupported, please select any other profile with supported transition such as %q", + oldObj.Spec.WorkerLatencyProfile, + obj.Spec.WorkerLatencyProfile, + configv1.MediumUpdateAverageReaction, + ), + ) + } + } + return nil +} + +func (c *configNodeV1) ValidateCreate(_ context.Context, uncastObj runtime.Object) field.ErrorList { + obj, allErrs := toConfigNodeV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + if err := c.validateMinimumKubeletVersion(obj); err != nil { + allErrs = append(allErrs, err) + } + + return allErrs +} + +func (c *configNodeV1) ValidateUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, allErrs := toConfigNodeV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + oldObj, allErrs := toConfigNodeV1(uncastOldObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + if err := validateConfigNodeForExtremeLatencyProfile(obj, oldObj); err != nil { + allErrs = append(allErrs, err) + } + if err := c.validateMinimumKubeletVersion(obj); err != nil { + allErrs = append(allErrs, err) + } + + return allErrs +} +func (c *configNodeV1) validateMinimumKubeletVersion(obj *configv1.Node) *field.Error { + if !c.minimumKubeletVersionEnabled { + return nil + } + fieldPath := field.NewPath("spec", "minimumKubeletVersion") + if !c.waitForNodeInformerSyncedFn() { + return field.InternalError(fieldPath, fmt.Errorf("caches not synchronized, cannot validate minimumKubeletVersion")) + } + + nodes, err := c.nodeListerFn().List(labels.Everything()) + if err != nil { + return field.NotFound(fieldPath, fmt.Sprintf("Getting nodes to compare minimum version %v", err.Error())) + } + + if err := nodelib.ValidateMinimumKubeletVersion(nodes, obj.Spec.MinimumKubeletVersion); err != nil { + if errors.Is(err, nodelib.ErrKubeletOutdated) { + return field.Forbidden(fieldPath, err.Error()) + } + return field.Invalid(fieldPath, obj.Spec.MinimumKubeletVersion, err.Error()) + } + return nil +} + +func (*configNodeV1) ValidateStatusUpdate(_ context.Context, uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toConfigNodeV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toConfigNodeV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} + +type configNodeV1Wrapper struct { + // handler is only used to know if the plugin is ready to process requests. + handler admission.Handler + + nodeLister corev1listers.NodeLister + delegate admission.ValidationInterface +} + +var ( + _ = initializer.WantsExternalKubeInformerFactory(&configNodeV1Wrapper{}) + _ = admission.ValidationInterface(&configNodeV1Wrapper{}) +) + +func (c *configNodeV1Wrapper) SetExternalKubeInformerFactory(kubeInformers informers.SharedInformerFactory) { + nodeInformer := kubeInformers.Core().V1().Nodes() + c.nodeLister = nodeInformer.Lister() + c.handler.SetReadyFunc(nodeInformer.Informer().HasSynced) +} + +func (c *configNodeV1Wrapper) ValidateInitialization() error { + if c.nodeLister == nil { + return fmt.Errorf("%s needs a nodes lister", PluginName) + } + + return nil +} + +func (c *configNodeV1Wrapper) getNodeLister() corev1listers.NodeLister { + return c.nodeLister +} + +func (c *configNodeV1Wrapper) waitForNodeInformerSyncedFn() bool { + return c.handler.WaitForReady() +} + +func (c *configNodeV1Wrapper) Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) (err error) { + return c.delegate.Validate(ctx, a, o) +} + +func (c *configNodeV1Wrapper) Handles(operation admission.Operation) bool { + return c.delegate.Handles(operation) +} diff --git a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/authorization/minimumkubeletversion/minimum_kubelet_version.go b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/authorization/minimumkubeletversion/minimum_kubelet_version.go new file mode 100644 index 000000000000..f28ff0e72877 --- /dev/null +++ b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/authorization/minimumkubeletversion/minimum_kubelet_version.go @@ -0,0 +1,90 @@ +package minimumkubeletversion + +import ( + "context" + "errors" + "fmt" + + "github.com/blang/semver/v4" + openshiftfeatures "github.com/openshift/api/features" + nodelib "github.com/openshift/library-go/pkg/apiserver/node" + authorizationv1 "k8s.io/api/authorization/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/util/feature" + v1listers "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + "k8s.io/component-base/featuregate" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/auth/nodeidentifier" +) + +type minimumKubeletVersionAuth struct { + nodeIdentifier nodeidentifier.NodeIdentifier + nodeLister v1listers.NodeLister + minVersion *semver.Version + hasNodeInformerSyncedFn func() bool // factored for unit tests +} + +// Creates a new minimumKubeletVersionAuth object, which is an authorizer that checks +// whether nodes are new enough to be authorized. +func NewMinimumKubeletVersion(minVersion *semver.Version, + nodeIdentifier nodeidentifier.NodeIdentifier, + nodeInformer cache.SharedIndexInformer, + nodeLister v1listers.NodeLister, +) *minimumKubeletVersionAuth { + if !feature.DefaultFeatureGate.Enabled(featuregate.Feature(openshiftfeatures.FeatureGateMinimumKubeletVersion)) { + minVersion = nil + } + + return &minimumKubeletVersionAuth{ + nodeIdentifier: nodeIdentifier, + nodeLister: nodeLister, + hasNodeInformerSyncedFn: nodeInformer.HasSynced, + minVersion: minVersion, + } +} + +func (m *minimumKubeletVersionAuth) Authorize(ctx context.Context, attrs authorizer.Attributes) (authorizer.Decision, string, error) { + if m.minVersion == nil { + return authorizer.DecisionNoOpinion, "", nil + } + + // Short-circut if "subjectaccessreviews", or a "get" or "update" on the node object. + // Regardless of kubelet version, it should be allowed to do these things. + if attrs.IsResourceRequest() { + requestResource := schema.GroupResource{Group: attrs.GetAPIGroup(), Resource: attrs.GetResource()} + switch requestResource { + case api.Resource("nodes"): + if v := attrs.GetVerb(); v == "get" || v == "update" { + return authorizer.DecisionNoOpinion, "", nil + } + case authorizationv1.Resource("subjectaccessreviews"): + return authorizer.DecisionNoOpinion, "", nil + } + } + + nodeName, isNode := m.nodeIdentifier.NodeIdentity(attrs.GetUser()) + if !isNode { + // ignore requests from non-nodes + return authorizer.DecisionNoOpinion, "", nil + } + + if !m.hasNodeInformerSyncedFn() { + return authorizer.DecisionDeny, "", fmt.Errorf("node informer not synced, cannot check if node %s is new enough", nodeName) + } + + node, err := m.nodeLister.Get(nodeName) + if err != nil { + return authorizer.DecisionDeny, "", err + } + + if err := nodelib.IsNodeTooOld(node, m.minVersion); err != nil { + if errors.Is(err, nodelib.ErrKubeletOutdated) { + return authorizer.DecisionDeny, err.Error(), nil + } + return authorizer.DecisionDeny, "", err + } + + return authorizer.DecisionNoOpinion, "", nil +} diff --git a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/enablement/intialization.go b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/enablement/intialization.go index 52794bec4b64..3aff4daf24c9 100644 --- a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/enablement/intialization.go +++ b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/enablement/intialization.go @@ -87,6 +87,11 @@ func ForceGlobalInitializationForOpenShift() { // we need to have the authorization chain place something before system:masters // SkipSystemMastersAuthorizer disable implicitly added system/master authz, and turn it into another authz mode "SystemMasters", to be added via authorization-mode authorizer.SkipSystemMastersAuthorizer() + + // Set the minimum kubelet version + // If the OpenshiftConfig wasn't configured by this point, it's a programming error, + // and this should panic. + authorizer.SetMinimumKubeletVersion(OpenshiftConfig().MinimumKubeletVersion) } var SCCAdmissionPlugin = sccadmission.NewConstraint() diff --git a/vendor/k8s.io/kubernetes/pkg/api/pod/util.go b/vendor/k8s.io/kubernetes/pkg/api/pod/util.go index 621438091b2f..e03ca7fc0941 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/pod/util.go +++ b/vendor/k8s.io/kubernetes/pkg/api/pod/util.go @@ -415,7 +415,7 @@ func GetValidationOptionsFromPodSpecAndMeta(podSpec, oldPodSpec *api.PodSpec, po } } - opts.AllowPodLifecycleSleepActionZeroValue = opts.AllowPodLifecycleSleepActionZeroValue || podLifecycleSleepActionZeroValueInUse(podSpec) + opts.AllowPodLifecycleSleepActionZeroValue = opts.AllowPodLifecycleSleepActionZeroValue || podLifecycleSleepActionZeroValueInUse(oldPodSpec) // If oldPod has resize policy set on the restartable init container, we must allow it opts.AllowSidecarResizePolicy = hasRestartableInitContainerResizePolicy(oldPodSpec) } @@ -772,7 +772,7 @@ func podLifecycleSleepActionZeroValueInUse(podSpec *api.PodSpec) bool { inUse = true return false } - if c.Lifecycle.PostStart != nil && c.Lifecycle.PostStart.Sleep != nil && c.Lifecycle.PreStop.Sleep.Seconds == 0 { + if c.Lifecycle.PostStart != nil && c.Lifecycle.PostStart.Sleep != nil && c.Lifecycle.PostStart.Sleep.Seconds == 0 { inUse = true return false } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/resource/types.go b/vendor/k8s.io/kubernetes/pkg/apis/resource/types.go index fca9bc6a6b03..c603811690f3 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/resource/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/resource/types.go @@ -141,6 +141,10 @@ type ResourceSliceSpec struct { Devices []Device } +// DriverNameMaxLength is the maximum valid length of a driver name in the +// ResourceSliceSpec and other places. It's the same as for CSI driver names. +const DriverNameMaxLength = 63 + // ResourcePool describes the pool that ResourceSlices belong to. type ResourcePool struct { // Name is used to identify the pool. For node-local devices, this diff --git a/vendor/k8s.io/kubernetes/pkg/controlplane/apiserver/options/options.go b/vendor/k8s.io/kubernetes/pkg/controlplane/apiserver/options/options.go index 2266be0cbed3..28741c84c185 100644 --- a/vendor/k8s.io/kubernetes/pkg/controlplane/apiserver/options/options.go +++ b/vendor/k8s.io/kubernetes/pkg/controlplane/apiserver/options/options.go @@ -203,7 +203,7 @@ func (s *Options) AddFlags(fss *cliflag.NamedFlagSets) { "Path to socket where a external JWT signer is listening. This flag is mutually exclusive with --service-account-signing-key-file and --service-account-key-file. Requires enabling feature gate (ExternalServiceAccountTokenSigner)") } -func (o *Options) Complete(ctx context.Context, fss cliflag.NamedFlagSets, alternateDNS []string, alternateIPs []net.IP) (CompletedOptions, error) { +func (o *Options) Complete(ctx context.Context, alternateDNS []string, alternateIPs []net.IP) (CompletedOptions, error) { if o == nil { return CompletedOptions{completedOptions: &completedOptions{}}, nil } @@ -259,8 +259,6 @@ func (o *Options) Complete(ctx context.Context, fss cliflag.NamedFlagSets, alter } } - completed.Flagz = flagz.NamedFlagSetsReader{FlagSets: fss} - return CompletedOptions{ completedOptions: &completed, }, nil diff --git a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go index d0645862102c..bc8d3b3ab279 100644 --- a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go +++ b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go @@ -456,6 +456,12 @@ const ( // Permits kubelet to run with swap enabled. NodeSwap featuregate.Feature = "NodeSwap" + // owner: @cici37 + // kep: https://kep.k8s.io/5080 + // + // Enables ordered namespace deletion. + OrderedNamespaceDeletion featuregate.Feature = "OrderedNamespaceDeletion" + // owner: @mortent, @atiratree, @ravig // kep: http://kep.k8s.io/3018 // diff --git a/vendor/k8s.io/kubernetes/pkg/features/openshift_features.go b/vendor/k8s.io/kubernetes/pkg/features/openshift_features.go index 6325606ee886..3ba55fdac903 100644 --- a/vendor/k8s.io/kubernetes/pkg/features/openshift_features.go +++ b/vendor/k8s.io/kubernetes/pkg/features/openshift_features.go @@ -5,6 +5,7 @@ import ( ) var RouteExternalCertificate featuregate.Feature = "RouteExternalCertificate" +var MinimumKubeletVersion featuregate.Feature = "MinimumKubeletVersion" // registerOpenshiftFeatures injects openshift-specific feature gates func registerOpenshiftFeatures() { @@ -12,4 +13,8 @@ func registerOpenshiftFeatures() { Default: false, PreRelease: featuregate.Alpha, } + defaultKubernetesFeatureGates[MinimumKubeletVersion] = featuregate.FeatureSpec{ + Default: false, + PreRelease: featuregate.Alpha, + } } diff --git a/vendor/k8s.io/kubernetes/pkg/features/versioned_kube_features.go b/vendor/k8s.io/kubernetes/pkg/features/versioned_kube_features.go index 9079b9b03e9e..348c04df2ba0 100644 --- a/vendor/k8s.io/kubernetes/pkg/features/versioned_kube_features.go +++ b/vendor/k8s.io/kubernetes/pkg/features/versioned_kube_features.go @@ -311,7 +311,7 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate }, genericfeatures.ResilientWatchCacheInitialization: { - {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, }, genericfeatures.RetryGenerateName: { @@ -556,6 +556,10 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta}, }, + OrderedNamespaceDeletion: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta}, + }, + PDBUnhealthyPodEvictionPolicy: { {Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha}, {Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta}, diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes/patch.go b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes/patch.go index bc892601ebe6..830982e5b714 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes/patch.go +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes/patch.go @@ -2,7 +2,8 @@ package modes var ModeScope = "Scope" var ModeSystemMasters = "SystemMasters" +var ModeMinimumKubeletVersion = "MinimumKubeletVersion" func init() { - AuthorizationModeChoices = append(AuthorizationModeChoices, ModeScope, ModeSystemMasters) + AuthorizationModeChoices = append(AuthorizationModeChoices, ModeScope, ModeSystemMasters, ModeMinimumKubeletVersion) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/patch.go b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/patch.go index 8a095efcf98d..7d44be996488 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/patch.go +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/patch.go @@ -1,8 +1,54 @@ package authorizer +import ( + "sync" + + "github.com/blang/semver/v4" +) + var skipSystemMastersAuthorizer = false // SkipSystemMastersAuthorizer disable implicitly added system/master authz, and turn it into another authz mode "SystemMasters", to be added via authorization-mode func SkipSystemMastersAuthorizer() { skipSystemMastersAuthorizer = true } + +var ( + minimumKubeletVersion *semver.Version + versionLock sync.Mutex + versionSet bool +) + +// GetMinimumKubeletVersion retrieves the set global minimum kubelet version in a safe way. +// It ensures it is only retrieved once, and is set before it's retrieved. +// The global value should only be gotten through this function. +// It is valid for the version to be unset. It will be treated the same as explicitly setting version to "". +// This function (and the corresponding functions/variables) are added to avoid a import cycle between the +// ./openshift-kube-apiserver/enablement and ./pkg/kubeapiserver/authorizer packages +func GetMinimumKubeletVersion() *semver.Version { + versionLock.Lock() + defer versionLock.Unlock() + if !versionSet { + panic("coding error: MinimumKubeletVersion not set yet") + } + return minimumKubeletVersion +} + +// SetMinimumKubeletVersion sets the global minimum kubelet version in a safe way. +// It ensures it is only set once, and the passed version is valid. +// If will panic on any error. +// The global value should only be set through this function. +// Passing an empty string for version is valid, and means there is no minimum version. +func SetMinimumKubeletVersion(version string) { + versionLock.Lock() + defer versionLock.Unlock() + if versionSet { + panic("coding error: MinimumKubeletVersion already set") + } + versionSet = true + if len(version) == 0 { + return + } + v := semver.MustParse(version) + minimumKubeletVersion = &v +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/reload.go b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/reload.go index 381765d81c36..3de2fff36a8d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/reload.go +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/reload.go @@ -28,6 +28,7 @@ import ( "time" "k8s.io/kubernetes/openshift-kube-apiserver/authorization/browsersafe" + "k8s.io/kubernetes/openshift-kube-apiserver/authorization/minimumkubeletversion" "k8s.io/apimachinery/pkg/util/sets" authzconfig "k8s.io/apiserver/pkg/apis/apiserver" @@ -43,6 +44,7 @@ import ( webhookmetrics "k8s.io/apiserver/plugin/pkg/authorizer/webhook/metrics" "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/auth/authorizer/abac" + "k8s.io/kubernetes/pkg/auth/nodeidentifier" "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes" "k8s.io/kubernetes/pkg/util/filesystem" "k8s.io/kubernetes/plugin/pkg/auth/authorizer/node" @@ -175,6 +177,15 @@ func (r *reloadableAuthorizerResolver) newForConfig(authzConfig *authzconfig.Aut case authzconfig.AuthorizerType(modes.ModeSystemMasters): // no browsersafeauthorizer here becase that rewrites the resources. This authorizer matches no matter which resource matches. authorizers = append(authorizers, authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup)) + case authzconfig.AuthorizerType(modes.ModeMinimumKubeletVersion): + // Add MinimumKubeletVerison authorizer, to block a node from being able to access most resources if it's not new enough. + // We must do so here instead of in pkg/apiserver because it relies on a node informer, which is not present in generic control planes. + authorizers = append(authorizers, minimumkubeletversion.NewMinimumKubeletVersion( + GetMinimumKubeletVersion(), + nodeidentifier.NewDefaultNodeIdentifier(), + r.initialConfig.VersionedInformerFactory.Core().V1().Nodes().Informer(), + r.initialConfig.VersionedInformerFactory.Core().V1().Nodes().Lister(), + )) default: return nil, nil, fmt.Errorf("unknown authorization mode %s specified", configuredAuthorizer.Type) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go index 15937cb839e9..f20ad4759fad 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go @@ -1287,12 +1287,6 @@ type Kubelet struct { // status to master. It is only used when node lease feature is enabled. nodeStatusReportFrequency time.Duration - // delayAfterNodeStatusChange is the one-time random duration that we add to the next node status report interval - // every time when there's an actual node status change. But all future node status update that is not caused by - // real status change will stick with nodeStatusReportFrequency. The random duration is a uniform distribution over - // [-0.5*nodeStatusReportFrequency, 0.5*nodeStatusReportFrequency] - delayAfterNodeStatusChange time.Duration - // lastStatusReportTime is the time when node status was last reported. lastStatusReportTime time.Time diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go index 7b34b0a48c6e..ebf549abe838 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go @@ -19,7 +19,6 @@ package kubelet import ( "context" "fmt" - "math/rand" "net" goruntime "runtime" "sort" @@ -629,21 +628,13 @@ func (kl *Kubelet) tryUpdateNodeStatus(ctx context.Context, tryNumber int) error } node, changed := kl.updateNode(ctx, originalNode) - // no need to update the status yet - if !changed && !kl.isUpdateStatusPeriodExperid() { + shouldPatchNodeStatus := changed || kl.clock.Since(kl.lastStatusReportTime) >= kl.nodeStatusReportFrequency + + if !shouldPatchNodeStatus { kl.markVolumesFromNode(node) return nil } - // We need to update the node status, if this is caused by a node change we want to calculate a new - // random delay so we avoid all the nodes to reach the apiserver at the same time. If the update is not related - // to a node change, because we run over the period, we reset the random delay so the node keeps updating - // its status at the same cadence - if changed { - kl.delayAfterNodeStatusChange = kl.calculateDelay() - } else { - kl.delayAfterNodeStatusChange = 0 - } updatedNode, err := kl.patchNodeStatus(originalNode, node) if err == nil { kl.markVolumesFromNode(updatedNode) @@ -651,20 +642,6 @@ func (kl *Kubelet) tryUpdateNodeStatus(ctx context.Context, tryNumber int) error return err } -func (kl *Kubelet) isUpdateStatusPeriodExperid() bool { - if kl.lastStatusReportTime.IsZero() { - return false - } - if kl.clock.Since(kl.lastStatusReportTime) >= kl.nodeStatusReportFrequency+kl.delayAfterNodeStatusChange { - return true - } - return false -} - -func (kl *Kubelet) calculateDelay() time.Duration { - return time.Duration(float64(kl.nodeStatusReportFrequency) * (-0.5 + rand.Float64())) -} - // updateNode creates a copy of originalNode and runs update logic on it. // It returns the updated node object and a bool indicating if anything has been changed. func (kl *Kubelet) updateNode(ctx context.Context, originalNode *v1.Node) (*v1.Node, bool) { diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go index 6fa9e05bab40..a788d97d4021 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go @@ -95,7 +95,7 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding) rbacv1helpers.NewRule("get", "list", "watch", "create", "update", "delete", "patch").Groups(batchGroup).Resources("jobs").RuleOrDie(), rbacv1helpers.NewRule("update").Groups(batchGroup).Resources("cronjobs/status").RuleOrDie(), rbacv1helpers.NewRule("update").Groups(batchGroup).Resources("cronjobs/finalizers").RuleOrDie(), - rbacv1helpers.NewRule("list", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), + rbacv1helpers.NewRule("list", "watch", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), eventsRule(), }, }) @@ -146,7 +146,7 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding) ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "endpoint-controller"}, Rules: []rbacv1.PolicyRule{ rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("services", "pods").RuleOrDie(), - rbacv1helpers.NewRule("get", "list", "create", "update", "delete").Groups(legacyGroup).Resources("endpoints").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch", "create", "update", "delete").Groups(legacyGroup).Resources("endpoints").RuleOrDie(), rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("endpoints/restricted").RuleOrDie(), eventsRule(), }, @@ -159,7 +159,7 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding) // The controller needs to be able to set a service's finalizers to be able to create an EndpointSlice // resource that is owned by the service and sets blockOwnerDeletion=true in its ownerRef. rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("services/finalizers").RuleOrDie(), - rbacv1helpers.NewRule("get", "list", "create", "update", "delete").Groups(discoveryGroup).Resources("endpointslices").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch", "create", "update", "delete").Groups(discoveryGroup).Resources("endpointslices").RuleOrDie(), rbacv1helpers.NewRule("create").Groups(discoveryGroup).Resources("endpointslices/restricted").RuleOrDie(), eventsRule(), }, @@ -176,7 +176,7 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding) // resource that is owned by the endpoint and sets blockOwnerDeletion=true in its ownerRef. // see https://github.com/openshift/kubernetes/blob/8691466059314c3f7d6dcffcbb76d14596ca716c/pkg/controller/endpointslicemirroring/utils.go#L87-L88 rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("endpoints/finalizers").RuleOrDie(), - rbacv1helpers.NewRule("get", "list", "create", "update", "delete").Groups(discoveryGroup).Resources("endpointslices").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch", "create", "update", "delete").Groups(discoveryGroup).Resources("endpointslices").RuleOrDie(), rbacv1helpers.NewRule("create").Groups(discoveryGroup).Resources("endpointslices/restricted").RuleOrDie(), eventsRule(), }, @@ -233,11 +233,11 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding) rbacv1helpers.NewRule("get", "list", "watch").Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(), rbacv1helpers.NewRule("update").Groups(autoscalingGroup).Resources("horizontalpodautoscalers/status").RuleOrDie(), rbacv1helpers.NewRule("get", "update").Groups("*").Resources("*/scale").RuleOrDie(), - rbacv1helpers.NewRule("list").Groups(legacyGroup).Resources("pods").RuleOrDie(), + rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(), // allow listing resource, custom, and external metrics - rbacv1helpers.NewRule("list").Groups(resMetricsGroup).Resources("pods").RuleOrDie(), - rbacv1helpers.NewRule("get", "list").Groups(customMetricsGroup).Resources("*").RuleOrDie(), - rbacv1helpers.NewRule("get", "list").Groups(externalMetricsGroup).Resources("*").RuleOrDie(), + rbacv1helpers.NewRule("list", "watch").Groups(resMetricsGroup).Resources("pods").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch").Groups(customMetricsGroup).Resources("*").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch").Groups(externalMetricsGroup).Resources("*").RuleOrDie(), eventsRule(), }, }) @@ -263,11 +263,11 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding) role := rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "node-controller"}, Rules: []rbacv1.PolicyRule{ - rbacv1helpers.NewRule("get", "list", "update", "delete", "patch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch", "update", "delete", "patch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), rbacv1helpers.NewRule("patch", "update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(), // used for pod deletion rbacv1helpers.NewRule("patch", "update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(), - rbacv1helpers.NewRule("list", "get", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), + rbacv1helpers.NewRule("list", "watch", "get", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), eventsRule(), }, } @@ -297,7 +297,7 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding) ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "pod-garbage-collector"}, Rules: []rbacv1.PolicyRule{ rbacv1helpers.NewRule("list", "watch", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), - rbacv1helpers.NewRule("get", "list").Groups(legacyGroup).Resources("nodes").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), rbacv1helpers.NewRule("patch").Groups(legacyGroup).Resources("pods/status").RuleOrDie(), }, } @@ -516,7 +516,7 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding) // need list to get current RV for any resource // need patch for SSA of any resource // need create because SSA of a deleted resource will be interpreted as a create request, these always fail with a conflict error because UID is set - rbacv1helpers.NewRule("list", "create", "patch").Groups("*").Resources("*").RuleOrDie(), + rbacv1helpers.NewRule("list", "watch", "create", "patch").Groups("*").Resources("*").RuleOrDie(), rbacv1helpers.NewRule("update").Groups(storageVersionMigrationGroup).Resources("storageversionmigrations/status").RuleOrDie(), }, }) diff --git a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/namespace.go b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/namespace.go index 121f4be1cc30..7c3910af1f08 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/namespace.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/namespace.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "fmt" + "k8s.io/kubernetes/pkg/features" "strings" "sync" "time" @@ -475,3 +476,124 @@ func unstructuredToNamespace(obj *unstructured.Unstructured) (*v1.Namespace, err return ns, err } + +var _ = SIGDescribe("OrderedNamespaceDeletion", func() { + f := framework.NewDefaultFramework("namespacedeletion") + f.NamespacePodSecurityLevel = admissionapi.LevelBaseline + + f.It("namespace deletion should delete pod first", feature.OrderedNamespaceDeletion, framework.WithFeatureGate(features.OrderedNamespaceDeletion), func(ctx context.Context) { + ensurePodsAreRemovedFirstInOrderedNamespaceDeletion(ctx, f) + }) +}) + +func ensurePodsAreRemovedFirstInOrderedNamespaceDeletion(ctx context.Context, f *framework.Framework) { + ginkgo.By("Creating a test namespace") + namespaceName := "nsdeletetest" + namespace, err := f.CreateNamespace(ctx, namespaceName, nil) + framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName) + nsName := namespace.Name + + ginkgo.By("Waiting for a default service account to be provisioned in namespace") + err = framework.WaitForDefaultServiceAccountInNamespace(ctx, f.ClientSet, nsName) + framework.ExpectNoError(err, "failure while waiting for a default service account to be provisioned in namespace: %s", nsName) + + ginkgo.By("Creating a pod with finalizer in the namespace") + podName := "test-pod" + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Finalizers: []string{ + "e2e.example.com/finalizer", + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "nginx", + Image: imageutils.GetPauseImageName(), + }, + }, + }, + } + pod, err = f.ClientSet.CoreV1().Pods(nsName).Create(ctx, pod, metav1.CreateOptions{}) + framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", podName, nsName) + + ginkgo.By("Waiting for the pod to have running status") + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod)) + + configMapName := "test-configmap" + ginkgo.By(fmt.Sprintf("Creating a configmap %q in namespace %q", configMapName, nsName)) + configMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName, + Namespace: nsName, + }, + Data: map[string]string{ + "key": "value", + }, + } + _, err = f.ClientSet.CoreV1().ConfigMaps(nsName).Create(ctx, configMap, metav1.CreateOptions{}) + framework.ExpectNoError(err, "failed to create configmap %q in namespace %q", configMapName, nsName) + + ginkgo.By("Deleting the namespace") + err = f.ClientSet.CoreV1().Namespaces().Delete(ctx, nsName, metav1.DeleteOptions{}) + framework.ExpectNoError(err, "failed to delete namespace: %s", nsName) + // wait 10 seconds to allow the namespace controller to process + time.Sleep(10 * time.Second) + ginkgo.By("the pod should be deleted before processing deletion for other resources") + framework.ExpectNoError(wait.PollUntilContextTimeout(ctx, 2*time.Second, 60*time.Second, true, + func(ctx context.Context) (bool, error) { + _, err = f.ClientSet.CoreV1().ConfigMaps(nsName).Get(ctx, configMapName, metav1.GetOptions{}) + framework.ExpectNoError(err, "configmap %q should still exist in namespace %q", configMapName, nsName) + // the pod should exist and has a deletionTimestamp set + pod, err = f.ClientSet.CoreV1().Pods(nsName).Get(ctx, pod.Name, metav1.GetOptions{}) + framework.ExpectNoError(err, "failed to get pod %q in namespace %q", pod.Name, nsName) + if pod.DeletionTimestamp == nil { + framework.Logf("Pod %q in namespace %q does not yet have a metadata.deletionTimestamp set, retrying...", pod.Name, nsName) + return false, nil + } + ns, err := f.ClientSet.CoreV1().Namespaces().Get(ctx, nsName, metav1.GetOptions{}) + if err != nil && apierrors.IsNotFound(err) { + return false, fmt.Errorf("namespace %s was deleted unexpectedly", nsName) + } + ginkgo.By("Read namespace status") + nsResource := v1.SchemeGroupVersion.WithResource("namespaces") + unstruct, err := f.DynamicClient.Resource(nsResource).Get(ctx, ns.Name, metav1.GetOptions{}, "status") + framework.ExpectNoError(err, "failed to fetch NamespaceStatus %s", ns) + nsStatus, err := unstructuredToNamespace(unstruct) + framework.ExpectNoError(err, "Getting the status of the namespace %s", ns) + gomega.Expect(nsStatus.Status.Phase).To(gomega.Equal(v1.NamespaceTerminating), "The phase returned was %v", nsStatus.Status.Phase) + hasContextFailure := false + for _, cond := range nsStatus.Status.Conditions { + if cond.Type == v1.NamespaceDeletionContentFailure { + hasContextFailure = true + } + } + if !hasContextFailure { + framework.Logf("Namespace %q does not yet have a NamespaceDeletionContentFailure condition, retrying...", nsName) + return false, nil + } + return true, nil + })) + + ginkgo.By(fmt.Sprintf("Removing finalizer from pod %q in namespace %q", podName, nsName)) + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + pod, err = f.ClientSet.CoreV1().Pods(nsName).Get(ctx, podName, metav1.GetOptions{}) + framework.ExpectNoError(err, "failed to get pod %q in namespace %q", pod.Name, nsName) + pod.Finalizers = []string{} + _, err = f.ClientSet.CoreV1().Pods(nsName).Update(ctx, pod, metav1.UpdateOptions{}) + return err + }) + framework.ExpectNoError(err, "failed to update pod %q and remove finalizer in namespace %q", podName, nsName) + + ginkgo.By("Waiting for the namespace to be removed.") + maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds + framework.ExpectNoError(wait.PollUntilContextTimeout(ctx, 1*time.Second, time.Duration(maxWaitSeconds)*time.Second, true, + func(ctx context.Context) (bool, error) { + _, err = f.ClientSet.CoreV1().Namespaces().Get(ctx, namespace.Name, metav1.GetOptions{}) + if err != nil && apierrors.IsNotFound(err) { + return true, nil + } + return false, nil + })) +} diff --git a/vendor/k8s.io/kubernetes/test/e2e/feature/feature.go b/vendor/k8s.io/kubernetes/test/e2e/feature/feature.go index 77a77ad7b916..6a34ecb77b13 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/feature/feature.go +++ b/vendor/k8s.io/kubernetes/test/e2e/feature/feature.go @@ -270,6 +270,10 @@ var ( // TODO: document the feature (owning SIG, when to use this feature for a test) NodeOutOfServiceVolumeDetach = framework.WithFeature(framework.ValidFeatures.Add("NodeOutOfServiceVolumeDetach")) + // Owner: sig-api-machinery + // Marks tests that enforce ordered namespace deletion. + OrderedNamespaceDeletion = framework.WithFeature(framework.ValidFeatures.Add("OrderedNamespaceDeletion")) + // Owner: sig-network // Marks a single test that tests cluster DNS performance with many services. PerformanceDNS = framework.WithFeature(framework.ValidFeatures.Add("PerformanceDNS")) diff --git a/vendor/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go b/vendor/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go index 5dae2b8f4e77..a05e33b3642b 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go +++ b/vendor/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go @@ -500,6 +500,16 @@ var _ = SIGDescribe("Kubectl client", func() { } }) + // https://issues.k8s.io/128314 + f.It(f.WithSlow(), "should support exec idle connections", func(ctx context.Context) { + ginkgo.By("executing a command in the container") + + execOutput := e2ekubectl.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodName, "--", "/bin/sh", "-c", "sleep 320 && echo running in container") + if expected, got := "running in container", strings.TrimSpace(execOutput); expected != got { + framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expected, got) + } + }) + ginkgo.It("should support exec through kubectl proxy", func(ctx context.Context) { _ = getTestContextHost() diff --git a/vendor/k8s.io/kubernetes/test/e2e/node/pod_admission.go b/vendor/k8s.io/kubernetes/test/e2e/node/pod_admission.go index bd53f33c88a1..1891c5252dbe 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/node/pod_admission.go +++ b/vendor/k8s.io/kubernetes/test/e2e/node/pod_admission.go @@ -21,6 +21,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/onsi/gomega/gstruct" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -97,14 +98,24 @@ var _ = SIGDescribe("PodRejectionStatus", func() { // This detects if there are any new fields in Status that were dropped by the pod rejection. // These new fields either should be kept by kubelet's admission or added explicitly in the list of fields that are having a different value or must be cleared. - expectedStatus := pod.Status.DeepCopy() - expectedStatus.Phase = gotPod.Status.Phase - expectedStatus.Conditions = nil - expectedStatus.Message = gotPod.Status.Message - expectedStatus.Reason = gotPod.Status.Reason - expectedStatus.StartTime = gotPod.Status.StartTime - // expectedStatus.QOSClass keep it as is - gomega.Expect(gotPod.Status).To(gomega.Equal(*expectedStatus)) + gomega.Expect(gotPod.Status).To(gstruct.MatchAllFields(gstruct.Fields{ + "Phase": gstruct.Ignore(), + "Conditions": gstruct.Ignore(), + "Message": gstruct.Ignore(), + "Reason": gstruct.Ignore(), + "NominatedNodeName": gstruct.Ignore(), + "HostIP": gstruct.Ignore(), + "HostIPs": gstruct.Ignore(), + "PodIP": gstruct.Ignore(), + "PodIPs": gstruct.Ignore(), + "StartTime": gstruct.Ignore(), + "InitContainerStatuses": gstruct.Ignore(), + "ContainerStatuses": gstruct.Ignore(), + "QOSClass": gomega.Equal(pod.Status.QOSClass), // QOSClass should be kept + "EphemeralContainerStatuses": gstruct.Ignore(), + "Resize": gstruct.Ignore(), + "ResourceClaimStatuses": gstruct.Ignore(), + })) }) }) }) diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/drivers/openshift_group_snapshot_driver.go b/vendor/k8s.io/kubernetes/test/e2e/storage/drivers/openshift_group_snapshot_driver.go new file mode 100644 index 000000000000..cdbbbf5484f4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/drivers/openshift_group_snapshot_driver.go @@ -0,0 +1,285 @@ +package drivers + +import ( + "context" + "fmt" + "time" + + "github.com/onsi/ginkgo/v2" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + storagev1beta1 "k8s.io/api/storage/v1beta1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" +) + +// Special test driver for volume group snapshots. +// +// Upstream uses a script to install csi-driver-hostpath with group snapshots enabled in its CSI sidecars. +// We can't use that in OCP, so let's create a new test driver based on [Driver: csi-hospath], +// only with the group snapshots enabled. + +// The rest of the file is a copy of Kubernete's HostPath test driver from test/e2e/storage/drivers/csi.go +// Differences: +// - the tests driver name is: [Driver: csi-hospath-groupsnapshot]. +// - enabled group snapshots in the external-snapshotter sidecar. +// - still use "csi-hostpath" as PatchCSIOptions.OldDriverName, because it's a name of a directory than needs to be replaced in the driver yaml files. + +type groupSnapshotHostpathCSIDriver struct { + driverInfo storageframework.DriverInfo + manifests []string + volumeAttributes []map[string]string +} + +func initGroupSnapshotHostpathCSIDriver(name string, capabilities map[storageframework.Capability]bool, volumeAttributes []map[string]string, manifests ...string) storageframework.TestDriver { + return &groupSnapshotHostpathCSIDriver{ + driverInfo: storageframework.DriverInfo{ + Name: name, + MaxFileSize: storageframework.FileSizeMedium, + SupportedFsType: sets.NewString( + "", // Default fsType + ), + SupportedSizeRange: e2evolume.SizeRange{ + Min: "1Mi", + }, + Capabilities: capabilities, + StressTestOptions: &storageframework.StressTestOptions{ + NumPods: 10, + NumRestarts: 10, + }, + VolumeSnapshotStressTestOptions: &storageframework.VolumeSnapshotStressTestOptions{ + NumPods: 10, + NumSnapshots: 10, + }, + PerformanceTestOptions: &storageframework.PerformanceTestOptions{ + ProvisioningOptions: &storageframework.PerformanceTestProvisioningOptions{ + VolumeSize: "1Mi", + Count: 300, + // Volume provisioning metrics are compared to a high baseline. + // Failure to pass would suggest a performance regression. + ExpectedMetrics: &storageframework.Metrics{ + AvgLatency: 2 * time.Minute, + Throughput: 0.5, + }, + }, + }, + TestTags: []interface{}{"[OCPFeatureGate:VolumeGroupSnapshot]"}, + }, + manifests: manifests, + volumeAttributes: volumeAttributes, + } +} + +var _ storageframework.TestDriver = &groupSnapshotHostpathCSIDriver{} +var _ storageframework.DynamicPVTestDriver = &groupSnapshotHostpathCSIDriver{} +var _ storageframework.SnapshottableTestDriver = &groupSnapshotHostpathCSIDriver{} +var _ storageframework.EphemeralTestDriver = &groupSnapshotHostpathCSIDriver{} + +// InitgroupSnapshotHostpathCSIDriver returns groupSnapshotHostpathCSIDriver that implements TestDriver interface +func InitGroupSnapshotHostpathCSIDriver() storageframework.TestDriver { + capabilities := map[storageframework.Capability]bool{ + storageframework.CapPersistence: true, + storageframework.CapSnapshotDataSource: true, + storageframework.CapMultiPODs: true, + storageframework.CapBlock: true, + storageframework.CapPVCDataSource: true, + storageframework.CapControllerExpansion: true, + storageframework.CapOfflineExpansion: true, + storageframework.CapOnlineExpansion: true, + storageframework.CapSingleNodeVolume: true, + storageframework.CapReadWriteOncePod: true, + storageframework.CapMultiplePVsSameID: true, + storageframework.CapFSResizeFromSourceNotSupported: true, + storageframework.CapVolumeGroupSnapshot: true, + + // This is needed for the + // testsuites/volumelimits.go `should support volume limits` + // test. --maxvolumespernode=10 gets + // added when patching the deployment. + storageframework.CapVolumeLimits: true, + } + // OCP specific code: a different driver name (csi-hostpath-groupsnapshot) + return initGroupSnapshotHostpathCSIDriver("csi-hostpath-groupsnapshot", + capabilities, + // Volume attributes don't matter, but we have to provide at least one map. + []map[string]string{ + {"foo": "bar"}, + }, + "test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml", + "test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml", + "test/e2e/testing-manifests/storage-csi/external-snapshotter/csi-snapshotter/rbac-csi-snapshotter.yaml", + "test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-controller/rbac.yaml", + "test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml", + "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-driverinfo.yaml", + "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml", + "test/e2e/testing-manifests/storage-csi/hostpath/hostpath/e2e-test-rbac.yaml", + ) +} + +func (h *groupSnapshotHostpathCSIDriver) GetDriverInfo() *storageframework.DriverInfo { + return &h.driverInfo +} + +func (h *groupSnapshotHostpathCSIDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { + if pattern.VolType == storageframework.CSIInlineVolume && len(h.volumeAttributes) == 0 { + e2eskipper.Skipf("%s has no volume attributes defined, doesn't support ephemeral inline volumes", h.driverInfo.Name) + } +} + +func (h *groupSnapshotHostpathCSIDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass { + provisioner := config.GetUniqueDriverName() + parameters := map[string]string{} + ns := config.Framework.Namespace.Name + + return storageframework.GetStorageClass(provisioner, parameters, nil, ns) +} + +func (h *groupSnapshotHostpathCSIDriver) GetVolume(config *storageframework.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) { + return h.volumeAttributes[volumeNumber%len(h.volumeAttributes)], false /* not shared */, false /* read-write */ +} + +func (h *groupSnapshotHostpathCSIDriver) GetCSIDriverName(config *storageframework.PerTestConfig) string { + return config.GetUniqueDriverName() +} + +func (h *groupSnapshotHostpathCSIDriver) GetSnapshotClass(ctx context.Context, config *storageframework.PerTestConfig, parameters map[string]string) *unstructured.Unstructured { + snapshotter := config.GetUniqueDriverName() + ns := config.Framework.Namespace.Name + + return utils.GenerateSnapshotClassSpec(snapshotter, parameters, ns) +} + +func (h *groupSnapshotHostpathCSIDriver) GetVolumeAttributesClass(_ context.Context, config *storageframework.PerTestConfig) *storagev1beta1.VolumeAttributesClass { + return storageframework.CopyVolumeAttributesClass(&storagev1beta1.VolumeAttributesClass{ + DriverName: config.GetUniqueDriverName(), + Parameters: map[string]string{ + hostpathCSIDriverMutableParameterName: hostpathCSIDriverMutableParameterValue, + }, + }, config.Framework.Namespace.Name, "e2e-vac-hostpath") +} +func (h *groupSnapshotHostpathCSIDriver) GetVolumeGroupSnapshotClass(ctx context.Context, config *storageframework.PerTestConfig, parameters map[string]string) *unstructured.Unstructured { + snapshotter := config.GetUniqueDriverName() + ns := config.Framework.Namespace.Name + + return utils.GenerateVolumeGroupSnapshotClassSpec(snapshotter, parameters, ns) +} + +func (h *groupSnapshotHostpathCSIDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig { + // Create secondary namespace which will be used for creating driver + driverNamespace := utils.CreateDriverNamespace(ctx, f) + driverns := driverNamespace.Name + testns := f.Namespace.Name + + ginkgo.By(fmt.Sprintf("deploying %s driver", h.driverInfo.Name)) + cancelLogging := utils.StartPodLogs(ctx, f, driverNamespace) + cs := f.ClientSet + + // The hostpath CSI driver only works when everything runs on the same node. + node, err := e2enode.GetRandomReadySchedulableNode(ctx, cs) + framework.ExpectNoError(err) + config := &storageframework.PerTestConfig{ + Driver: h, + Prefix: "hostpath", + Framework: f, + ClientNodeSelection: e2epod.NodeSelection{Name: node.Name}, + DriverNamespace: driverNamespace, + } + + patches := []utils.PatchCSIOptions{} + + patches = append(patches, utils.PatchCSIOptions{ + OldDriverName: "csi-hostpath", // OCP: hardcode csi-hostpath here, it specifies directories in yaml files that need to be replaced with the unique driver name. + NewDriverName: config.GetUniqueDriverName(), + DriverContainerName: "hostpath", + DriverContainerArguments: []string{"--drivername=" + config.GetUniqueDriverName(), + // This is needed for the + // testsuites/volumelimits.go `should support volume limits` + // test. + "--maxvolumespernode=10", + // Enable volume lifecycle checks, to report failure if + // the volume is not unpublished / unstaged correctly. + "--check-volume-lifecycle=true", + }, + ProvisionerContainerName: "csi-provisioner", + SnapshotterContainerName: "csi-snapshotter", + NodeName: node.Name, + }) + + // VAC E2E HostPath patch + // Enables ModifyVolume support in the hostpath CSI driver, and adds an enabled parameter name + patches = append(patches, utils.PatchCSIOptions{ + DriverContainerName: "hostpath", + DriverContainerArguments: []string{"--enable-controller-modify-volume=true", "--accepted-mutable-parameter-names=e2eVacTest"}, + }) + + // VAC E2E FeatureGate patches + // TODO: These can be removed after the VolumeAttributesClass feature is default enabled + patches = append(patches, utils.PatchCSIOptions{ + DriverContainerName: "csi-provisioner", + DriverContainerArguments: []string{"--feature-gates=VolumeAttributesClass=true"}, + }) + patches = append(patches, utils.PatchCSIOptions{ + DriverContainerName: "csi-resizer", + DriverContainerArguments: []string{"--feature-gates=VolumeAttributesClass=true"}, + }) + + // OCP specific code: enable group snapshot + patches = append(patches, utils.PatchCSIOptions{ + DriverContainerName: "csi-snapshotter", + DriverContainerArguments: []string{"--feature-gates=CSIVolumeGroupSnapshot=true"}, + }) + + err = utils.CreateFromManifests(ctx, config.Framework, driverNamespace, func(item interface{}) error { + for _, o := range patches { + if err := utils.PatchCSIDeployment(config.Framework, o, item); err != nil { + return err + } + } + + // Remove csi-external-health-monitor-agent and + // csi-external-health-monitor-controller + // containers. The agent is obsolete. + // The controller is not needed for any of the + // tests and is causing too much overhead when + // running in a large cluster (see + // https://github.com/kubernetes/kubernetes/issues/102452#issuecomment-856991009). + switch item := item.(type) { + case *appsv1.StatefulSet: + var containers []v1.Container + for _, container := range item.Spec.Template.Spec.Containers { + switch container.Name { + case "csi-external-health-monitor-agent", "csi-external-health-monitor-controller": + // Remove these containers. + default: + // Keep the others. + containers = append(containers, container) + } + } + item.Spec.Template.Spec.Containers = containers + } + return nil + }, h.manifests...) + + if err != nil { + framework.Failf("deploying %s driver: %v", h.driverInfo.Name, err) + } + + cleanupFunc := generateDriverCleanupFunc( + f, + h.driverInfo.Name, + testns, + driverns, + cancelLogging) + ginkgo.DeferCleanup(cleanupFunc) + + return config +} diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/framework/volume_group_snapshot_resource.go b/vendor/k8s.io/kubernetes/test/e2e/storage/framework/volume_group_snapshot_resource.go index 02d0a2cb8a2f..842b2da3c68b 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/framework/volume_group_snapshot_resource.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/framework/volume_group_snapshot_resource.go @@ -21,7 +21,7 @@ import ( "fmt" "github.com/onsi/ginkgo/v2" - + "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/kubernetes/test/e2e/framework" @@ -56,15 +56,15 @@ type VolumeGroupSnapshotResource struct { Config *PerTestConfig Pattern TestPattern - Vgs *unstructured.Unstructured - Vgscontent *unstructured.Unstructured - Vgsclass *unstructured.Unstructured + VGS *unstructured.Unstructured + VGSContent *unstructured.Unstructured + VGSClass *unstructured.Unstructured } // CreateVolumeGroupSnapshot creates a VolumeGroupSnapshotClass with given SnapshotDeletionPolicy and a VolumeGroupSnapshot // from the VolumeGroupSnapshotClass using a dynamic client. // Returns the unstructured VolumeGroupSnapshotClass and VolumeGroupSnapshot objects. -func CreateVolumeGroupSnapshot(ctx context.Context, sDriver VoulmeGroupSnapshottableTestDriver, config *PerTestConfig, pattern TestPattern, groupName string, pvcNamespace string, timeouts *framework.TimeoutContext, parameters map[string]string) (*unstructured.Unstructured, *unstructured.Unstructured) { +func CreateVolumeGroupSnapshot(ctx context.Context, sDriver VoulmeGroupSnapshottableTestDriver, config *PerTestConfig, pattern TestPattern, groupName string, pvcNamespace string, timeouts *framework.TimeoutContext, parameters map[string]string) (*unstructured.Unstructured, *unstructured.Unstructured, *unstructured.Unstructured) { defer ginkgo.GinkgoRecover() var err error if pattern.SnapshotType != VolumeGroupSnapshot { @@ -99,28 +99,35 @@ func CreateVolumeGroupSnapshot(ctx context.Context, sDriver VoulmeGroupSnapshott ginkgo.By("Getting group snapshot and content") volumeGroupSnapshot, err = dc.Resource(utils.VolumeGroupSnapshotGVR).Namespace(volumeGroupSnapshot.GetNamespace()).Get(ctx, volumeGroupSnapshot.GetName(), metav1.GetOptions{}) framework.ExpectNoError(err, "Failed to get volume group snapshot after creation") - - return gsclass, volumeGroupSnapshot + status := volumeGroupSnapshot.Object["status"] + err = framework.Gomega().Expect(status).NotTo(gomega.BeNil()) + framework.ExpectNoError(err, "Failed to get status of volume group snapshot") + vgscName := status.(map[string]interface{})["boundVolumeGroupSnapshotContentName"].(string) + err = framework.Gomega().Expect(vgscName).NotTo(gomega.BeNil()) + framework.ExpectNoError(err, "Failed to get content name of volume group snapshot") + vgsc, err := dc.Resource(utils.VolumeGroupSnapshotContentGVR).Get(ctx, vgscName, metav1.GetOptions{}) + framework.ExpectNoError(err, "failed to get content of group snapshot") + return gsclass, volumeGroupSnapshot, vgsc } // CleanupResource deletes the VolumeGroupSnapshotClass and VolumeGroupSnapshot objects using a dynamic client. func (r *VolumeGroupSnapshotResource) CleanupResource(ctx context.Context, timeouts *framework.TimeoutContext) error { defer ginkgo.GinkgoRecover() dc := r.Config.Framework.DynamicClient - err := dc.Resource(utils.VolumeGroupSnapshotClassGVR).Delete(ctx, r.Vgsclass.GetName(), metav1.DeleteOptions{}) + err := dc.Resource(utils.VolumeGroupSnapshotClassGVR).Delete(ctx, r.VGSClass.GetName(), metav1.DeleteOptions{}) framework.ExpectNoError(err, "Failed to delete volume group snapshot class") return nil } // CreateVolumeGroupSnapshotResource creates a VolumeGroupSnapshotResource object with the given parameters. func CreateVolumeGroupSnapshotResource(ctx context.Context, sDriver VoulmeGroupSnapshottableTestDriver, config *PerTestConfig, pattern TestPattern, pvcName string, pvcNamespace string, timeouts *framework.TimeoutContext, parameters map[string]string) *VolumeGroupSnapshotResource { - vgsclass, snapshot := CreateVolumeGroupSnapshot(ctx, sDriver, config, pattern, pvcName, pvcNamespace, timeouts, parameters) + vgsClass, snapshot, vgsc := CreateVolumeGroupSnapshot(ctx, sDriver, config, pattern, pvcName, pvcNamespace, timeouts, parameters) vgs := &VolumeGroupSnapshotResource{ Config: config, Pattern: pattern, - Vgs: snapshot, - Vgsclass: vgsclass, - Vgscontent: nil, + VGS: snapshot, + VGSClass: vgsClass, + VGSContent: vgsc, } return vgs } diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/openshift_csi_volumes.go b/vendor/k8s.io/kubernetes/test/e2e/storage/openshift_csi_volumes.go new file mode 100644 index 000000000000..4ebc74be6e9a --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/openshift_csi_volumes.go @@ -0,0 +1,45 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This is a copy of csi_volumes.go with OpenShift specific test driver. +// Used a copy of the file to avoid conflicts when editing the existing file. +package storage + +import ( + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/drivers" + storageframework "k8s.io/kubernetes/test/e2e/storage/framework" + "k8s.io/kubernetes/test/e2e/storage/testsuites" + "k8s.io/kubernetes/test/e2e/storage/utils" +) + +// List of testDrivers to be executed in below loop +var ocpCSITestDrivers = []func() storageframework.TestDriver{ + drivers.InitGroupSnapshotHostpathCSIDriver, +} + +// This executes testSuites for csi volumes. +var _ = utils.SIGDescribe("OCP CSI Volumes", func() { + for _, initDriver := range ocpCSITestDrivers { + curDriver := initDriver() + + args := storageframework.GetDriverNameWithFeatureTags(curDriver) + args = append(args, func() { + storageframework.DefineTestSuites(curDriver, testsuites.CSISuites) + }) + framework.Context(args...) + } +}) diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_group_snapshottable.go b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_group_snapshottable.go index 7b2277d320f8..ebbd9cc619e6 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_group_snapshottable.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/testsuites/volume_group_snapshottable.go @@ -18,6 +18,8 @@ package testsuites import ( "context" + "crypto/sha256" + "fmt" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" @@ -176,18 +178,30 @@ func (s *VolumeGroupSnapshottableTestSuite) DefineTests(driver storageframework. snapshot := storageframework.CreateVolumeGroupSnapshotResource(ctx, snapshottableDriver, groupTest.config, pattern, labelValue, groupTest.volumeGroup[0][0].Pvc.GetNamespace(), f.Timeouts, map[string]string{"deletionPolicy": pattern.SnapshotDeletionPolicy.String()}) groupTest.snapshots = append(groupTest.snapshots, snapshot) ginkgo.By("verifying the snapshots in the group are ready to use") - status := snapshot.Vgs.Object["status"] + status := snapshot.VGS.Object["status"] err := framework.Gomega().Expect(status).NotTo(gomega.BeNil()) framework.ExpectNoError(err, "failed to get status of group snapshot") - volumes := status.(map[string]interface{})["pvcVolumeSnapshotRefList"] - err = framework.Gomega().Expect(volumes).NotTo(gomega.BeNil()) + + volumeListMap := snapshot.VGSContent.Object["status"].(map[string]interface{}) + err = framework.Gomega().Expect(volumeListMap).NotTo(gomega.BeNil()) + framework.ExpectNoError(err, "failed to get volume snapshot list") + volumeSnapshotHandlePairList := volumeListMap["volumeSnapshotHandlePairList"].([]interface{}) + err = framework.Gomega().Expect(volumeSnapshotHandlePairList).NotTo(gomega.BeNil()) framework.ExpectNoError(err, "failed to get volume snapshot list") - volumeList := volumes.([]interface{}) - err = framework.Gomega().Expect(len(volumeList)).To(gomega.Equal(groupTest.numVolumes)) + err = framework.Gomega().Expect(len(volumeSnapshotHandlePairList)).To(gomega.Equal(groupTest.numVolumes)) framework.ExpectNoError(err, "failed to get volume snapshot list") claimSize := groupTest.volumeGroup[0][0].Pvc.Spec.Resources.Requests.Storage().String() - for _, volume := range volumeList { + for _, volume := range volumeSnapshotHandlePairList { // Create a PVC from the snapshot + volumeHandle := volume.(map[string]interface{})["volumeHandle"].(string) + err = framework.Gomega().Expect(volumeHandle).NotTo(gomega.BeNil()) + framework.ExpectNoError(err, "failed to get volume handle from volume") + uid := snapshot.VGSContent.Object["metadata"].(map[string]interface{})["uid"].(string) + err = framework.Gomega().Expect(uid).NotTo(gomega.BeNil()) + framework.ExpectNoError(err, "failed to get uuid from content") + volumeSnapshotName := fmt.Sprintf("snapshot-%x", sha256.Sum256([]byte( + uid+volumeHandle))) + pvc := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ StorageClassName: &groupTest.volumeGroup[0][0].Sc.Name, ClaimSize: claimSize, @@ -198,7 +212,7 @@ func (s *VolumeGroupSnapshottableTestSuite) DefineTests(driver storageframework. pvc.Spec.DataSource = &v1.TypedLocalObjectReference{ APIGroup: &group, Kind: "VolumeSnapshot", - Name: volume.(map[string]interface{})["volumeSnapshotRef"].(map[string]interface{})["name"].(string), + Name: volumeSnapshotName, } volSrc := v1.VolumeSource{ @@ -208,8 +222,9 @@ func (s *VolumeGroupSnapshottableTestSuite) DefineTests(driver storageframework. }, }, } - pvc, err := cs.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(ctx, pvc, metav1.CreateOptions{}) + pvc, err = cs.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create PVC from snapshot") + pod := StartInPodWithVolumeSource(ctx, cs, volSrc, pvc.Namespace, "snapshot-pod", "sleep 300", groupTest.config.ClientNodeSelection) ginkgo.DeferCleanup(e2epod.DeletePodWithWait, cs, pod) framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, cs, pod.Name, pod.Namespace, f.Timeouts.PodStartSlow), "Pod did not start in expected time") diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/utils/volume_group_snapshot.go b/vendor/k8s.io/kubernetes/test/e2e/storage/utils/volume_group_snapshot.go index b8a9fca9c454..b5515e87922e 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/utils/volume_group_snapshot.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/utils/volume_group_snapshot.go @@ -33,15 +33,16 @@ const ( // VolumeGroupSnapshot is the group snapshot api VolumeGroupSnapshotAPIGroup = "groupsnapshot.storage.k8s.io" // VolumeGroupSnapshotAPIVersion is the group snapshot api version - VolumeGroupSnapshotAPIVersion = "groupsnapshot.storage.k8s.io/v1alpha1" + VolumeGroupSnapshotAPIVersion = "groupsnapshot.storage.k8s.io/v1beta1" ) var ( // VolumeGroupSnapshotGVR is GroupVersionResource for volumegroupsnapshots - VolumeGroupSnapshotGVR = schema.GroupVersionResource{Group: VolumeGroupSnapshotAPIGroup, Version: "v1alpha1", Resource: "volumegroupsnapshots"} + VolumeGroupSnapshotGVR = schema.GroupVersionResource{Group: VolumeGroupSnapshotAPIGroup, Version: "v1beta1", Resource: "volumegroupsnapshots"} // VolumeGroupSnapshotClassGVR is GroupVersionResource for volumegroupsnapshotsclasses - VolumeGroupSnapshotClassGVR = schema.GroupVersionResource{Group: VolumeGroupSnapshotAPIGroup, Version: "v1alpha1", Resource: "volumegroupsnapshotclasses"} + VolumeGroupSnapshotClassGVR = schema.GroupVersionResource{Group: VolumeGroupSnapshotAPIGroup, Version: "v1beta1", Resource: "volumegroupsnapshotclasses"} + VolumeGroupSnapshotContentGVR = schema.GroupVersionResource{Group: VolumeGroupSnapshotAPIGroup, Version: "v1beta1", Resource: "volumegroupsnapshotcontents"} ) // WaitForVolumeGroupSnapshotReady waits for a VolumeGroupSnapshot to be ready to use or until timeout occurs, whichever comes first. diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml index aff3a5719dcb..e552f81b57d5 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/814" + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1150" controller-gen.kubebuilder.io/version: v0.15.0 name: volumegroupsnapshotclasses.groupsnapshot.storage.k8s.io spec: @@ -31,7 +31,7 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date - name: v1alpha1 + name: v1beta1 schema: openAPIV3Schema: description: |- diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshotcontents.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshotcontents.yaml index 28584e56bfcf..a6d15d8adc61 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshotcontents.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshotcontents.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1068" + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1150" controller-gen.kubebuilder.io/version: v0.15.0 name: volumegroupsnapshotcontents.groupsnapshot.storage.k8s.io spec: @@ -53,7 +53,7 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date - name: v1alpha1 + name: v1beta1 schema: openAPIV3Schema: description: |- @@ -237,8 +237,6 @@ spec: - message: both volumeGroupSnapshotRef.name and volumeGroupSnapshotRef.namespace must be set rule: has(self.name) && has(self.__namespace__) - - message: volumeGroupSnapshotRef is immutable - rule: self == oldSelf required: - deletionPolicy - driver @@ -257,8 +255,9 @@ spec: The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command date +%s%N returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. - format: int64 - type: integer + This field is the source for the CreationTime field in VolumeGroupSnapshotStatus + format: date-time + type: string error: description: |- Error is the last observed error during group snapshot creation, if any. @@ -276,42 +275,6 @@ spec: format: date-time type: string type: object - pvVolumeSnapshotContentList: - description: |- - PVVolumeSnapshotContentList is the list of pairs of PV and - VolumeSnapshotContent for this group snapshot - The maximum number of allowed snapshots in the group is 100. - items: - description: |- - PVVolumeSnapshotContentPair represent a pair of PV names and - VolumeSnapshotContent names - properties: - persistentVolumeRef: - description: PersistentVolumeRef is a reference to the persistent - volume resource - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - volumeSnapshotContentRef: - description: VolumeSnapshotContentRef is a reference to the - volume snapshot content resource - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - type: object - type: array readyToUse: description: |- ReadyToUse indicates if all the individual snapshots in the group are ready to be @@ -325,6 +288,32 @@ spec: If a storage system does not provide such an id, the CSI driver can choose to return the VolumeGroupSnapshot name. type: string + volumeSnapshotHandlePairList: + description: |- + VolumeSnapshotHandlePairList is a list of CSI "volume_id" and "snapshot_id" + pair returned by the CSI driver to identify snapshots and their source volumes + on the storage system. + items: + description: VolumeSnapshotHandlePair defines a pair of a source + volume handle and a snapshot handle + properties: + snapshotHandle: + description: |- + SnapshotHandle is a unique id returned by the CSI driver to identify a volume + snapshot on the storage system + Required. + type: string + volumeHandle: + description: |- + VolumeHandle is a unique id returned by the CSI driver to identify a volume + on the storage system + Required. + type: string + required: + - snapshotHandle + - volumeHandle + type: object + type: array type: object required: - spec diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml index 3d9a771dea9d..145d1211df49 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1068" + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1150" controller-gen.kubebuilder.io/version: v0.15.0 name: volumegroupsnapshots.groupsnapshot.storage.k8s.io spec: @@ -43,7 +43,7 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date - name: v1alpha1 + name: v1beta1 schema: openAPIV3Schema: description: |- @@ -198,6 +198,7 @@ spec: The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command date +%s%N returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC. + This field is updated based on the CreationTime field in VolumeGroupSnapshotContentStatus format: date-time type: string error: @@ -221,41 +222,6 @@ spec: format: date-time type: string type: object - pvcVolumeSnapshotRefList: - description: |- - VolumeSnapshotRefList is the list of PVC and VolumeSnapshot pairs that - is part of this group snapshot. - The maximum number of allowed snapshots in the group is 100. - items: - description: PVCVolumeSnapshotPair defines a pair of a PVC reference - and a Volume Snapshot Reference - properties: - persistentVolumeClaimRef: - description: PersistentVolumeClaimRef is a reference to the - PVC this pair is referring to - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - volumeSnapshotRef: - description: VolumeSnapshotRef is a reference to the VolumeSnapshot - this pair is referring to - properties: - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - type: object - x-kubernetes-map-type: atomic - type: object - type: array readyToUse: description: |- ReadyToUse indicates if all the individual snapshots in the group are ready diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/volume-group-snapshots/csi-hostpath-plugin.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/volume-group-snapshots/csi-hostpath-plugin.yaml index 44fefe22cef2..2713f764ccbf 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/volume-group-snapshots/csi-hostpath-plugin.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/volume-group-snapshots/csi-hostpath-plugin.yaml @@ -219,7 +219,7 @@ spec: serviceAccountName: csi-hostpathplugin-sa containers: - name: hostpath - image: registry.k8s.io/sig-storage/hostpathplugin:v1.14.0 + image: registry.k8s.io/sig-storage/hostpathplugin:v1.15.0 args: - "--drivername=hostpath.csi.k8s.io" - "--v=5" @@ -354,11 +354,11 @@ spec: name: socket-dir - name: csi-snapshotter - image: registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1 + image: registry.k8s.io/sig-storage/csi-snapshotter:v8.2.0 args: - -v=5 - --csi-address=/csi/csi.sock - - --enable-volume-group-snapshots=true + - --feature-gates=CSIVolumeGroupSnapshot=true securityContext: # This is necessary only for systems with SELinux, where # non-privileged sidecar containers cannot access unix domain socket diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/volume-group-snapshots/run_group_snapshot_e2e.sh b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/volume-group-snapshots/run_group_snapshot_e2e.sh index b6386e8bd166..c2b55c3b71b1 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/volume-group-snapshots/run_group_snapshot_e2e.sh +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/volume-group-snapshots/run_group_snapshot_e2e.sh @@ -278,10 +278,15 @@ run_tests() { kubectl apply -f test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshotcontents.yaml || exit 1 kubectl apply -f test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml || exit 1 + kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/refs/tags/v8.2.0/client/config/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml || exit 1 + kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/refs/tags/v8.2.0/client/config/crd/snapshot.storage.k8s.io_volumesnapshots.yaml || exit 1 + kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/refs/tags/v8.2.0/client/config/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml || exit 1 + - kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/v8.0.0/deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml || exit 1 - curl -s https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/release-8.1/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml | \ -awk '/--leader-election=true/ {print; print " - \"--enable-volume-group-snapshots=true\""; next}1' | \ + kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/refs/tags/v8.2.0/deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml || exit 1 + curl -s https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/refs/tags/v8.2.0/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml | \ +awk '/--leader-election=true/ {print; print " - \"--feature-gates=CSIVolumeGroupSnapshot=true\""; next}1' | \ +sed 's|image: registry.k8s.io/sig-storage/snapshot-controller:v8.0.1|image: registry.k8s.io/sig-storage/snapshot-controller:v8.2.0|' | \ kubectl apply -f - || exit 1 diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml index 3ac441b937bb..81ff6aa85b1f 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml @@ -354,7 +354,7 @@ spec: name: socket-dir - name: csi-snapshotter - image: registry.k8s.io/sig-storage/csi-snapshotter:v8.1.0 + image: registry.k8s.io/sig-storage/csi-snapshotter:v8.2.0 args: - -v=5 - --csi-address=/csi/csi.sock diff --git a/vendor/modules.txt b/vendor/modules.txt index bfee566e15f2..46a6ef7c207a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1061,6 +1061,7 @@ github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout github.com/openshift/library-go/pkg/apiserver/apiserverconfig github.com/openshift/library-go/pkg/apiserver/jsonpatch +github.com/openshift/library-go/pkg/apiserver/node github.com/openshift/library-go/pkg/apps/appsserialization github.com/openshift/library-go/pkg/apps/appsutil github.com/openshift/library-go/pkg/authorization/authorizationutil @@ -1781,7 +1782,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/api v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250320083338-1601b9e27d85 ## explicit; go 1.23.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -1842,7 +1843,7 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apiextensions-apiserver v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/apiextensions-apiserver v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250320083338-1601b9e27d85 ## explicit; go 1.23.0 k8s.io/apiextensions-apiserver/pkg/apihelpers k8s.io/apiextensions-apiserver/pkg/apis/apiextensions @@ -1892,7 +1893,7 @@ k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition k8s.io/apiextensions-apiserver/test/integration k8s.io/apiextensions-apiserver/test/integration/fixtures -# k8s.io/apimachinery v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/apimachinery v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250320083338-1601b9e27d85 ## explicit; go 1.23.0 k8s.io/apimachinery/pkg/api/apitesting k8s.io/apimachinery/pkg/api/equality @@ -1965,7 +1966,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/apiserver v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250320083338-1601b9e27d85 ## explicit; go 1.23.0 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration @@ -2148,13 +2149,13 @@ k8s.io/apiserver/plugin/pkg/authenticator/token/oidc k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook/metrics -# k8s.io/cli-runtime v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/cli-runtime v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20250320083338-1601b9e27d85 ## explicit; go 1.23.0 k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericiooptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/client-go v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250320083338-1601b9e27d85 ## explicit; go 1.23.0 k8s.io/client-go/applyconfigurations k8s.io/client-go/applyconfigurations/admissionregistration/v1 @@ -2518,7 +2519,7 @@ k8s.io/client-go/util/retry k8s.io/client-go/util/testing k8s.io/client-go/util/watchlist k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.31.1 => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/cloud-provider v0.31.1 => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20250320083338-1601b9e27d85 ## explicit; go 1.23.0 k8s.io/cloud-provider k8s.io/cloud-provider/api @@ -2537,13 +2538,13 @@ k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/helpers -# k8s.io/cluster-bootstrap v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/cluster-bootstrap v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20250320083338-1601b9e27d85 ## explicit; go 1.23.0 k8s.io/cluster-bootstrap/token/api k8s.io/cluster-bootstrap/token/util k8s.io/cluster-bootstrap/util/secrets k8s.io/cluster-bootstrap/util/tokens -# k8s.io/component-base v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/component-base v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250320083338-1601b9e27d85 ## explicit; go 1.23.0 k8s.io/component-base/cli/flag k8s.io/component-base/cli/globalflag @@ -2577,7 +2578,7 @@ k8s.io/component-base/version/verflag k8s.io/component-base/zpages/features k8s.io/component-base/zpages/flagz k8s.io/component-base/zpages/statusz -# k8s.io/component-helpers v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/component-helpers v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20250320083338-1601b9e27d85 ## explicit; go 1.23.0 k8s.io/component-helpers/apimachinery/lease k8s.io/component-helpers/apps/poddisruptionbudget @@ -2591,7 +2592,7 @@ k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity k8s.io/component-helpers/storage/ephemeral k8s.io/component-helpers/storage/volume -# k8s.io/controller-manager v0.32.1 => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/controller-manager v0.32.1 => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20250320083338-1601b9e27d85 ## explicit; go 1.23.0 k8s.io/controller-manager/config k8s.io/controller-manager/config/v1 @@ -2603,22 +2604,22 @@ k8s.io/controller-manager/pkg/features k8s.io/controller-manager/pkg/features/register k8s.io/controller-manager/pkg/leadermigration/config k8s.io/controller-manager/pkg/leadermigration/options -# k8s.io/cri-api v0.27.1 => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/cri-api v0.27.1 => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20250320083338-1601b9e27d85 ## explicit; go 1.23.0 k8s.io/cri-api/pkg/apis k8s.io/cri-api/pkg/apis/runtime/v1 k8s.io/cri-api/pkg/errors -# k8s.io/cri-client v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cri-client v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/cri-client v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cri-client v0.0.0-20250320083338-1601b9e27d85 ## explicit; go 1.23.0 k8s.io/cri-client/pkg k8s.io/cri-client/pkg/internal k8s.io/cri-client/pkg/logs k8s.io/cri-client/pkg/util -# k8s.io/csi-translation-lib v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/csi-translation-lib v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20250320083338-1601b9e27d85 ## explicit; go 1.23.0 k8s.io/csi-translation-lib k8s.io/csi-translation-lib/plugins -# k8s.io/dynamic-resource-allocation v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/dynamic-resource-allocation v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20250320083338-1601b9e27d85 ## explicit; go 1.23.0 k8s.io/dynamic-resource-allocation/api k8s.io/dynamic-resource-allocation/cel @@ -2626,7 +2627,7 @@ k8s.io/dynamic-resource-allocation/kubeletplugin k8s.io/dynamic-resource-allocation/resourceclaim k8s.io/dynamic-resource-allocation/resourceslice k8s.io/dynamic-resource-allocation/structured -# k8s.io/externaljwt v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/externaljwt v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/externaljwt v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/externaljwt v0.0.0-20250320083338-1601b9e27d85 ## explicit; go 1.23.0 k8s.io/externaljwt/apis/v1alpha1 # k8s.io/klog/v2 v2.130.1 @@ -2647,7 +2648,7 @@ k8s.io/kms/apis/v1beta1 k8s.io/kms/apis/v2 k8s.io/kms/pkg/service k8s.io/kms/pkg/util -# k8s.io/kube-aggregator v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/kube-aggregator v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20250320083338-1601b9e27d85 ## explicit; go 1.23.0 k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration/install @@ -2706,11 +2707,11 @@ k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson k8s.io/kube-openapi/pkg/validation/validate -# k8s.io/kube-scheduler v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/kube-scheduler v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20250320083338-1601b9e27d85 ## explicit; go 1.23.0 k8s.io/kube-scheduler/config/v1 k8s.io/kube-scheduler/extender/v1 -# k8s.io/kubectl v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/kubectl v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20250320083338-1601b9e27d85 ## explicit; go 1.23.0 k8s.io/kubectl/pkg/apps k8s.io/kubectl/pkg/cmd/util @@ -2735,7 +2736,7 @@ k8s.io/kubectl/pkg/util/storage k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/validation -# k8s.io/kubelet v0.31.1 => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/kubelet v0.31.1 => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20250320083338-1601b9e27d85 ## explicit; go 1.23.0 k8s.io/kubelet/config/v1 k8s.io/kubelet/config/v1alpha1 @@ -2757,7 +2758,7 @@ k8s.io/kubelet/pkg/cri/streaming k8s.io/kubelet/pkg/cri/streaming/portforward k8s.io/kubelet/pkg/cri/streaming/remotecommand k8s.io/kubelet/pkg/types -# k8s.io/kubernetes v1.32.2 => github.com/openshift/kubernetes v1.30.1-0.20250220043805-86db063ce6f2 +# k8s.io/kubernetes v1.32.2 => github.com/openshift/kubernetes v1.30.1-0.20250320083338-1601b9e27d85 ## explicit; go 1.23.0 k8s.io/kubernetes/cmd/kube-apiserver/app k8s.io/kubernetes/cmd/kube-apiserver/app/options @@ -2823,6 +2824,7 @@ k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/nodeenv k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/podnodeconstraints k8s.io/kubernetes/openshift-kube-apiserver/admission/storage/csiinlinevolumesecurity k8s.io/kubernetes/openshift-kube-apiserver/authorization/browsersafe +k8s.io/kubernetes/openshift-kube-apiserver/authorization/minimumkubeletversion k8s.io/kubernetes/openshift-kube-apiserver/authorization/scopeauthorizer k8s.io/kubernetes/openshift-kube-apiserver/configdefault k8s.io/kubernetes/openshift-kube-apiserver/enablement @@ -3580,10 +3582,10 @@ k8s.io/kubernetes/third_party/forked/gonum/graph/simple k8s.io/kubernetes/third_party/forked/gonum/graph/traverse k8s.io/kubernetes/third_party/forked/libcontainer/apparmor k8s.io/kubernetes/third_party/forked/libcontainer/utils -# k8s.io/mount-utils v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/mount-utils v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20250320083338-1601b9e27d85 ## explicit; go 1.23.0 k8s.io/mount-utils -# k8s.io/pod-security-admission v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/pod-security-admission v0.32.3 => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20250320083338-1601b9e27d85 ## explicit; go 1.23.0 k8s.io/pod-security-admission/admission k8s.io/pod-security-admission/admission/api @@ -3596,7 +3598,7 @@ k8s.io/pod-security-admission/admission/api/validation k8s.io/pod-security-admission/api k8s.io/pod-security-admission/metrics k8s.io/pod-security-admission/policy -# k8s.io/sample-apiserver v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/sample-apiserver v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20250320083338-1601b9e27d85 ## explicit; go 1.23.0 k8s.io/sample-apiserver/pkg/apis/wardle k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1 @@ -3839,35 +3841,35 @@ sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 sigs.k8s.io/yaml/goyaml.v3 # github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 -# k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/cri-client => github.com/openshift/kubernetes/staging/src/k8s.io/cri-client v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/dynamic-resource-allocation => github.com/openshift/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/endpointslice => github.com/openshift/kubernetes/staging/src/k8s.io/endpointslice v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/externaljwt => github.com/openshift/kubernetes/staging/src/k8s.io/externaljwt v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/kubernetes => github.com/openshift/kubernetes v1.30.1-0.20250220043805-86db063ce6f2 -# k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/pod-security-admission => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20250220043805-86db063ce6f2 -# k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20250220043805-86db063ce6f2 +# k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/cri-client => github.com/openshift/kubernetes/staging/src/k8s.io/cri-client v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/dynamic-resource-allocation => github.com/openshift/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/endpointslice => github.com/openshift/kubernetes/staging/src/k8s.io/endpointslice v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/externaljwt => github.com/openshift/kubernetes/staging/src/k8s.io/externaljwt v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/kubernetes => github.com/openshift/kubernetes v1.30.1-0.20250320083338-1601b9e27d85 +# k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/pod-security-admission => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20250320083338-1601b9e27d85 +# k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20250320083338-1601b9e27d85 # github.com/containerd/errdefs => github.com/containerd/errdefs v0.1.0 From a3059048adf4c00e39bbbfff783f7b45d7e34dc7 Mon Sep 17 00:00:00 2001 From: Allen Ray Date: Thu, 20 Mar 2025 10:12:21 -0400 Subject: [PATCH 4/4] Run make update --- test/extended/util/image/zz_generated.txt | 25 +- zz_generated.manifests/test-reporting.yaml | 1149 ++++++++++++++++++++ 2 files changed, 1160 insertions(+), 14 deletions(-) diff --git a/test/extended/util/image/zz_generated.txt b/test/extended/util/image/zz_generated.txt index 9c040a60fbae..3ec93589b28f 100644 --- a/test/extended/util/image/zz_generated.txt +++ b/test/extended/util/image/zz_generated.txt @@ -7,11 +7,11 @@ quay.io/redhat-developer/nfs-server:1.1 quay.io/openshift/community-e2e-images:e quay.io/redhat-developer/test-build-roots2i:1.2 quay.io/openshift/community-e2e-images:e2e-quay-io-redhat-developer-test-build-roots2i-1-2-gLJ7WcnS2TSllJ32 quay.io/redhat-developer/test-build-simples2i:1.2 quay.io/openshift/community-e2e-images:e2e-quay-io-redhat-developer-test-build-simples2i-1-2-thirLMR-JKplfkmE registry.k8s.io/build-image/distroless-iptables:v0.6.8 quay.io/openshift/community-e2e-images:e2e-8-registry-k8s-io-build-image-distroless-iptables-v0-6-8-9OClvbaDfYqoqVhI -registry.k8s.io/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.2.2 quay.io/openshift/community-e2e-images:e2e-48-registry-k8s-io-cloud-provider-gcp-gcp-compute-persistent-disk-csi-driver-v1-2-2-fk3Ddr8np00iPF9c -registry.k8s.io/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.4.0 quay.io/openshift/community-e2e-images:e2e-46-registry-k8s-io-cloud-provider-gcp-gcp-compute-persistent-disk-csi-driver-v1-4-0-mUHHjVVuv0UQiTyf +registry.k8s.io/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.2.2 quay.io/openshift/community-e2e-images:e2e-47-registry-k8s-io-cloud-provider-gcp-gcp-compute-persistent-disk-csi-driver-v1-2-2-fk3Ddr8np00iPF9c +registry.k8s.io/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.4.0 quay.io/openshift/community-e2e-images:e2e-45-registry-k8s-io-cloud-provider-gcp-gcp-compute-persistent-disk-csi-driver-v1-4-0-mUHHjVVuv0UQiTyf registry.k8s.io/e2e-test-images/agnhost:2.53 quay.io/openshift/community-e2e-images:e2e-1-registry-k8s-io-e2e-test-images-agnhost-2-53-S5hiptYgC5MyFXZH registry.k8s.io/e2e-test-images/apparmor-loader:1.4 quay.io/openshift/community-e2e-images:e2e-4-registry-k8s-io-e2e-test-images-apparmor-loader-1-4-m-K7F-syWFeA4t03 -registry.k8s.io/e2e-test-images/busybox:1.29-2 quay.io/openshift/community-e2e-images:e2e-53-registry-k8s-io-e2e-test-images-busybox-1-29-2-ZYWRth-o9U_JR2ZE +registry.k8s.io/e2e-test-images/busybox:1.29-2 quay.io/openshift/community-e2e-images:e2e-50-registry-k8s-io-e2e-test-images-busybox-1-29-2-ZYWRth-o9U_JR2ZE registry.k8s.io/e2e-test-images/busybox:1.36.1-1 quay.io/openshift/community-e2e-images:e2e-7-registry-k8s-io-e2e-test-images-busybox-1-36-1-1-n3BezCOfxp98l84K registry.k8s.io/e2e-test-images/httpd:2.4.38-4 quay.io/openshift/community-e2e-images:e2e-10-registry-k8s-io-e2e-test-images-httpd-2-4-38-4-lYFH2l3oSS5xEICa registry.k8s.io/e2e-test-images/httpd:2.4.39-4 quay.io/openshift/community-e2e-images:e2e-11-registry-k8s-io-e2e-test-images-httpd-2-4-39-4-Hgo23C6O-Y8DPv5N @@ -35,22 +35,19 @@ registry.k8s.io/e2e-test-images/volume/iscsi:2.6 quay.io/openshift/community-e2e registry.k8s.io/e2e-test-images/volume/nfs:1.4 quay.io/openshift/community-e2e-images:e2e-30-registry-k8s-io-e2e-test-images-volume-nfs-1-4-u7V8iW5QIcWM2i6h registry.k8s.io/etcd:3.5.16-0 quay.io/openshift/community-e2e-images:e2e-9-registry-k8s-io-etcd-3-5-16-0-ExW1ETJqOZa6gx2F registry.k8s.io/sig-storage/csi-attacher:v4.6.1 quay.io/openshift/community-e2e-images:e2e-38-registry-k8s-io-sig-storage-csi-attacher-v4-6-1-NP4z4EcSo-N1xk_4 -registry.k8s.io/sig-storage/csi-attacher:v4.8.0 quay.io/openshift/community-e2e-images:e2e-44-registry-k8s-io-sig-storage-csi-attacher-v4-8-0-S1cGDJYg9N-xpVnU +registry.k8s.io/sig-storage/csi-attacher:v4.8.0 quay.io/openshift/community-e2e-images:e2e-43-registry-k8s-io-sig-storage-csi-attacher-v4-8-0-S1cGDJYg9N-xpVnU registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.12.1 quay.io/openshift/community-e2e-images:e2e-35-registry-k8s-io-sig-storage-csi-external-health-monitor-controller-v0-12-1--7VXdNUMsJt30kdU registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1 quay.io/openshift/community-e2e-images:e2e-36-registry-k8s-io-sig-storage-csi-node-driver-registrar-v2-10-1-bVz-v06gRSvh6Rp3 -registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.13.0 quay.io/openshift/community-e2e-images:e2e-50-registry-k8s-io-sig-storage-csi-node-driver-registrar-v2-13-0-Yz3cC3wjWoQESAfV -registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1 quay.io/openshift/community-e2e-images:e2e-47-registry-k8s-io-sig-storage-csi-node-driver-registrar-v2-5-1-PNOgzdxTQbWunm33 +registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.13.0 quay.io/openshift/community-e2e-images:e2e-48-registry-k8s-io-sig-storage-csi-node-driver-registrar-v2-13-0-Yz3cC3wjWoQESAfV +registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1 quay.io/openshift/community-e2e-images:e2e-46-registry-k8s-io-sig-storage-csi-node-driver-registrar-v2-5-1-PNOgzdxTQbWunm33 registry.k8s.io/sig-storage/csi-provisioner:v5.0.1 quay.io/openshift/community-e2e-images:e2e-39-registry-k8s-io-sig-storage-csi-provisioner-v5-0-1-wPw2vjyYX1LWVmkn -registry.k8s.io/sig-storage/csi-provisioner:v5.1.0 quay.io/openshift/community-e2e-images:e2e-43-registry-k8s-io-sig-storage-csi-provisioner-v5-1-0-9nVNb-KrN4Qb7WGv +registry.k8s.io/sig-storage/csi-provisioner:v5.1.0 quay.io/openshift/community-e2e-images:e2e-42-registry-k8s-io-sig-storage-csi-provisioner-v5-1-0-9nVNb-KrN4Qb7WGv registry.k8s.io/sig-storage/csi-resizer:v1.11.1 quay.io/openshift/community-e2e-images:e2e-40-registry-k8s-io-sig-storage-csi-resizer-v1-11-1-6jB55ZThgstz1GrW -registry.k8s.io/sig-storage/csi-resizer:v1.13.1 quay.io/openshift/community-e2e-images:e2e-45-registry-k8s-io-sig-storage-csi-resizer-v1-13-1-YKcEWbi0FydNavn_ -registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1 quay.io/openshift/community-e2e-images:e2e-41-registry-k8s-io-sig-storage-csi-snapshotter-v8-0-1-vAVT_GHf7Vm-TXyx -registry.k8s.io/sig-storage/csi-snapshotter:v8.1.0 quay.io/openshift/community-e2e-images:e2e-52-registry-k8s-io-sig-storage-csi-snapshotter-v8-1-0-3cVspluN_7tfQqYd -registry.k8s.io/sig-storage/csi-snapshotter:v8.2.0 quay.io/openshift/community-e2e-images:e2e-42-registry-k8s-io-sig-storage-csi-snapshotter-v8-2-0-d_U3bM1nPZDqelWL +registry.k8s.io/sig-storage/csi-resizer:v1.13.1 quay.io/openshift/community-e2e-images:e2e-44-registry-k8s-io-sig-storage-csi-resizer-v1-13-1-YKcEWbi0FydNavn_ +registry.k8s.io/sig-storage/csi-snapshotter:v8.2.0 quay.io/openshift/community-e2e-images:e2e-41-registry-k8s-io-sig-storage-csi-snapshotter-v8-2-0-d_U3bM1nPZDqelWL registry.k8s.io/sig-storage/hello-populator:v1.0.1 quay.io/openshift/community-e2e-images:e2e-32-registry-k8s-io-sig-storage-hello-populator-v1-0-1-Ei7libli17J5IWn- -registry.k8s.io/sig-storage/hostpathplugin:v1.14.0 quay.io/openshift/community-e2e-images:e2e-34-registry-k8s-io-sig-storage-hostpathplugin-v1-14-0-LWjla55lyZB4CQu0 -registry.k8s.io/sig-storage/hostpathplugin:v1.15.0 quay.io/openshift/community-e2e-images:e2e-49-registry-k8s-io-sig-storage-hostpathplugin-v1-15-0-YS6opQN6AdImbOb6 +registry.k8s.io/sig-storage/hostpathplugin:v1.15.0 quay.io/openshift/community-e2e-images:e2e-34-registry-k8s-io-sig-storage-hostpathplugin-v1-15-0-YS6opQN6AdImbOb6 registry.k8s.io/sig-storage/livenessprobe:v2.12.0 quay.io/openshift/community-e2e-images:e2e-37-registry-k8s-io-sig-storage-livenessprobe-v2-12-0-wCYz5fsB0ew8MCS0 -registry.k8s.io/sig-storage/livenessprobe:v2.15.0 quay.io/openshift/community-e2e-images:e2e-51-registry-k8s-io-sig-storage-livenessprobe-v2-15-0-4bLc1k1ifxb_KkX9 +registry.k8s.io/sig-storage/livenessprobe:v2.15.0 quay.io/openshift/community-e2e-images:e2e-49-registry-k8s-io-sig-storage-livenessprobe-v2-15-0-4bLc1k1ifxb_KkX9 registry.k8s.io/sig-storage/nfs-provisioner:v4.0.8 quay.io/openshift/community-e2e-images:e2e-17-registry-k8s-io-sig-storage-nfs-provisioner-v4-0-8-W5pbwDbNliDm1x4k registry.k8s.io/sig-storage/volume-data-source-validator:v1.0.0 quay.io/openshift/community-e2e-images:e2e-33-registry-k8s-io-sig-storage-volume-data-source-validator-v1-0-0-pJwTeQGTDmAV8753 diff --git a/zz_generated.manifests/test-reporting.yaml b/zz_generated.manifests/test-reporting.yaml index ecaae900dc10..fe39c7454837 100644 --- a/zz_generated.manifests/test-reporting.yaml +++ b/zz_generated.manifests/test-reporting.yaml @@ -391,6 +391,11 @@ spec: installation should fail to install a non-existing cluster extension' - testName: '[sig-olmv1][OCPFeatureGate:NewOLM][Skipped:Disconnected] OLMv1 operator installation should install a cluster extension' + - featureGate: OrderedNamespaceDeletion + tests: + - testName: '[sig-api-machinery] OrderedNamespaceDeletion namespace deletion should + delete pod first [Feature:OrderedNamespaceDeletion] [FeatureGate:OrderedNamespaceDeletion] + [Beta]' - featureGate: PersistentIPsForVirtualization tests: - testName: '[sig-network][OCPFeatureGate:PersistentIPsForVirtualization][Feature:Layer2LiveMigration] @@ -619,6 +624,42 @@ spec: - testName: '[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] + [Beta] should create a volume with VAC' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] + [Beta] should modify volume that already has a VAC' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] + [Beta] should modify volume with no VAC' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] + [Beta] should create a volume with VAC' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] + [Beta] should modify volume that already has a VAC' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] + [Beta] should modify volume with no VAC' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] + [Beta] should create a volume with VAC' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] + [Beta] should modify volume that already has a VAC' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] + [Beta] should modify volume with no VAC' - testName: '[sig-storage] VolumeAttributesClass [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should run through the lifecycle of a VolumeAttributesClass' @@ -630,4 +671,1112 @@ spec: unbound pvc count metrics for pvc controller with volume attributes class dimension after creating pvc only [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta]' + - featureGate: VolumeGroupSnapshot + tests: + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: (delete policy)] volumegroupsnapshottable + [Feature:volumegroupsnapshot] VolumeGroupSnapshottable should create snapshots + for multiple volumes in a pod' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: CSI Ephemeral-volume (default + fs)] ephemeral should create read-only inline ephemeral volume' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: CSI Ephemeral-volume (default + fs)] ephemeral should create read/write inline ephemeral volume' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: CSI Ephemeral-volume (default + fs)] ephemeral should support expansion of pvcs created for ephemeral pvcs' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: CSI Ephemeral-volume (default + fs)] ephemeral should support multiple inline ephemeral volumes' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: CSI Ephemeral-volume (default + fs)] ephemeral should support two pods which have the same volume definition' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] + volume-expand Verify if offline PVC expansion works' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] + volume-expand should resize volume when PVC is edited while pod is using it' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that + is deleted while the kubelet is down cleans up when the kubelet returns.' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that + is force deleted while the kubelet is down cleans up when the kubelet returns.' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet + restart is readable after restart.' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + multiVolume [Slow] should access to two volumes with different volume mode + and retain data across pod recreation on different node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + multiVolume [Slow] should access to two volumes with different volume mode + and retain data across pod recreation on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + multiVolume [Slow] should access to two volumes with the same volume mode + and retain data across pod recreation on different node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + multiVolume [Slow] should access to two volumes with the same volume mode + and retain data across pod recreation on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + multiVolume [Slow] should concurrently access the single read-only volume + from pods on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + multiVolume [Slow] should concurrently access the single volume from pods + on different node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + multiVolume [Slow] should concurrently access the single volume from pods + on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + multiVolume [Slow] should concurrently access the volume and its clone from + pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + multiVolume [Slow] should concurrently access the volume and restored snapshot + from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] + [Feature:VolumeSourceXFS]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + provisioning should mount multiple PV pointing to the same storage on the + same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + provisioning should provision correct filesystem size when restoring snapshot + to larger size pvc [Feature:VolumeSnapshotDataSource]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + provisioning should provision storage with any volume data source [Serial]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + provisioning should provision storage with mount options' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + provisioning should provision storage with pvc data source' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + provisioning should provision storage with pvc data source (ROX mode)' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + provisioning should provision storage with pvc data source in parallel [Slow]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + provisioning should provision storage with snapshot data source (ROX mode) + [Feature:VolumeSnapshotDataSource]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + pvc-deletion-performance should delete volumes at scale within performance + constraints [Slow] [Serial]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + volume-expand should not allow expansion of pvcs without AllowVolumeExpansion + property' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] + [Beta] should create a volume with VAC' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] + [Beta] should modify volume that already has a VAC' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] + [Beta] should modify volume with no VAC' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + volume-stress multiple pods should access different volumes repeatedly [Slow] + [Serial]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + volumeMode should fail to use a volume in a pod with mismatched mode [Slow]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + volumeMode should not mount / map unused volumes in a pod [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (block volmode)] + volumes should store data' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)(allowExpansion)] + volume-expand Verify if offline PVC expansion works' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)(allowExpansion)] + volume-expand should resize volume when PVC is edited while pod is using it' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + capacity provides storage capacity information' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, + new pod fsgroup applied to volume contents' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, + volume contents ownership changed via chgrp in first pod, new pod with different + fsgroup applied to the volume contents' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, + volume contents ownership changed via chgrp in first pod, new pod with same + fsgroup applied to the volume contents' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + fsgroupchangepolicy (Always)[LinuxOnly], rwop pod created with an initial + fsgroup, new pod fsgroup applied to volume contents' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial + fsgroup, new pod fsgroup applied to volume contents' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial + fsgroup, volume contents ownership changed via chgrp in first pod, new pod + with different fsgroup applied to the volume contents' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial + fsgroup, volume contents ownership changed via chgrp in first pod, new pod + with same fsgroup skips ownership changes to the volume contents' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + provisioning should mount multiple PV pointing to the same storage on the + same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + provisioning should provision correct filesystem size when restoring snapshot + to larger size pvc [Feature:VolumeSnapshotDataSource]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + provisioning should provision storage with any volume data source [Serial]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + provisioning should provision storage with mount options' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + provisioning should provision storage with pvc data source' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + provisioning should provision storage with pvc data source (ROX mode)' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + provisioning should provision storage with pvc data source in parallel [Slow]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + provisioning should provision storage with snapshot data source (ROX mode) + [Feature:VolumeSnapshotDataSource]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + read-write-once-pod [MinimumKubeletVersion:1.27] should block a second pod + from using an in-use ReadWriteOncePod volume on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + read-write-once-pod [MinimumKubeletVersion:1.27] should preempt lower priority + pods using ReadWriteOncePod volumes' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + subPath should fail if subpath with backstepping is outside the volume [Slow] + [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + subPath should support creating multiple subpath from same volumes [Slow]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + subPath should support existing directories when readOnly specified in the + volumeSource' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + subPath should support existing directory' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + subPath should support existing single file [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + subPath should support file as subpath [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + subPath should support non-existent path' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + subPath should support readOnly directory specified in the volumeMount' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + subPath should support readOnly file specified in the volumeMount [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + subPath should support restarting containers using directory as subpath [Slow]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + subPath should support restarting containers using file as subpath [Slow] + [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + subPath should unmount if pod is force deleted while kubelet is down [Disruptive] + [Slow] [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + subPath should unmount if pod is gracefully deleted while kubelet is down + [Disruptive] [Slow] [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + subPath should verify container cannot write to subpath readonly volumes [Slow]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + volume-expand should not allow expansion of pvcs without AllowVolumeExpansion + property' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] + [Beta] should create a volume with VAC' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] + [Beta] should modify volume that already has a VAC' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] + [Beta] should modify volume with no VAC' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + volume-stress multiple pods should access different volumes repeatedly [Slow] + [Serial]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + volumeIO should write files of various sizes, verify size, validate content + [Slow]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + volumes should allow exec of files on the volume' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (default fs)] + volumes should store data' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (delayed binding)] + topology should fail to schedule a pod which has topologies that conflict + with AllowedTopologies' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (delayed binding)] + topology should provision a volume and schedule a pod with AllowedTopologies' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext3)] volumes + should allow exec of files on the volume' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext3)] volumes + should store data' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext4)] multiVolume + [Slow] should access to two volumes with different volume mode and retain + data across pod recreation on different node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext4)] multiVolume + [Slow] should access to two volumes with different volume mode and retain + data across pod recreation on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext4)] multiVolume + [Slow] should access to two volumes with the same volume mode and retain data + across pod recreation on different node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext4)] multiVolume + [Slow] should access to two volumes with the same volume mode and retain data + across pod recreation on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext4)] multiVolume + [Slow] should concurrently access the single read-only volume from pods on + the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext4)] multiVolume + [Slow] should concurrently access the single volume from pods on different + node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext4)] multiVolume + [Slow] should concurrently access the single volume from pods on the same + node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext4)] multiVolume + [Slow] should concurrently access the volume and its clone from pods on the + same node [LinuxOnly] [Feature:VolumeSourceXFS]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext4)] multiVolume + [Slow] should concurrently access the volume and restored snapshot from pods + on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext4)] volumes + should allow exec of files on the volume' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ext4)] volumes + should store data' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem + volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in + a pod that is deleted while the kubelet is down is usable by a new pod when + kubelet returns [Feature:SELinux]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem + volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in + a pod that is deleted while the kubelet is down is usable by a new pod with + a different SELinux context when kubelet returns [Feature:SELinux]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem + volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in + a pod that is force deleted while the kubelet is down is usable by a new pod + when kubelet returns [Feature:SELinux]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem + volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in + a pod that is force deleted while the kubelet is down is usable by a new pod + with a different SELinux context when kubelet returns [Feature:SELinux]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem + volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written + before kubelet restart is readable after restart.' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem + volmode)] multiVolume [Slow] should access to two volumes with different volume + mode and retain data across pod recreation on different node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem + volmode)] multiVolume [Slow] should access to two volumes with different volume + mode and retain data across pod recreation on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem + volmode)] multiVolume [Slow] should access to two volumes with the same volume + mode and retain data across pod recreation on different node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem + volmode)] multiVolume [Slow] should access to two volumes with the same volume + mode and retain data across pod recreation on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem + volmode)] multiVolume [Slow] should concurrently access the single read-only + volume from pods on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem + volmode)] multiVolume [Slow] should concurrently access the single volume + from pods on different node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem + volmode)] multiVolume [Slow] should concurrently access the single volume + from pods on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem + volmode)] multiVolume [Slow] should concurrently access the volume and its + clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem + volmode)] multiVolume [Slow] should concurrently access the volume and restored + snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] + [Feature:VolumeSourceXFS]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem + volmode)] volume-lifecycle-performance should provision volumes at scale within + performance constraints [Slow] [Serial]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem + volmode)] volumeLimits should support volume limits [Serial]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem + volmode)] volumeLimits should verify that all csinodes have volume limits' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem + volmode)] volumeMode should fail to use a volume in a pod with mismatched + mode [Slow]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (filesystem + volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (immediate binding)] + topology should fail to schedule a pod which has topologies that conflict + with AllowedTopologies' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (immediate binding)] + topology should provision a volume and schedule a pod with AllowedTopologies' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] + [Feature:Windows] volume-expand Verify if offline PVC expansion works' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] + [Feature:Windows] volume-expand should resize volume when PVC is edited while + pod is using it' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + multiVolume [Slow] should access to two volumes with different volume mode + and retain data across pod recreation on different node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + multiVolume [Slow] should access to two volumes with different volume mode + and retain data across pod recreation on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + multiVolume [Slow] should access to two volumes with the same volume mode + and retain data across pod recreation on different node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + multiVolume [Slow] should access to two volumes with the same volume mode + and retain data across pod recreation on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + multiVolume [Slow] should concurrently access the single read-only volume + from pods on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + multiVolume [Slow] should concurrently access the single volume from pods + on different node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + multiVolume [Slow] should concurrently access the single volume from pods + on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + multiVolume [Slow] should concurrently access the volume and its clone from + pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + multiVolume [Slow] should concurrently access the volume and restored snapshot + from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] + [Feature:VolumeSourceXFS]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + provisioning should mount multiple PV pointing to the same storage on the + same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + provisioning should provision correct filesystem size when restoring snapshot + to larger size pvc [Feature:VolumeSnapshotDataSource]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + provisioning should provision storage with any volume data source [Serial]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + provisioning should provision storage with mount options' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + provisioning should provision storage with pvc data source' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + provisioning should provision storage with pvc data source (ROX mode)' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + provisioning should provision storage with pvc data source in parallel [Slow]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + provisioning should provision storage with snapshot data source (ROX mode) + [Feature:VolumeSnapshotDataSource]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + subPath should fail if subpath with backstepping is outside the volume [Slow] + [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + subPath should support creating multiple subpath from same volumes [Slow]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + subPath should support existing directories when readOnly specified in the + volumeSource' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + subPath should support existing directory' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + subPath should support existing single file [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + subPath should support file as subpath [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + subPath should support non-existent path' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + subPath should support readOnly directory specified in the volumeMount' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + subPath should support readOnly file specified in the volumeMount [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + subPath should support restarting containers using directory as subpath [Slow]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + subPath should support restarting containers using file as subpath [Slow] + [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + subPath should unmount if pod is force deleted while kubelet is down [Disruptive] + [Slow] [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + subPath should unmount if pod is gracefully deleted while kubelet is down + [Disruptive] [Slow] [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + subPath should verify container cannot write to subpath readonly volumes [Slow]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + volume-expand should not allow expansion of pvcs without AllowVolumeExpansion + property' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] + [Beta] should create a volume with VAC' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] + [Beta] should modify volume that already has a VAC' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] + [Beta] should modify volume with no VAC' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + volumeIO should write files of various sizes, verify size, validate content + [Slow]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + volumes should allow exec of files on the volume' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] + volumes should store data' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (xfs)] [Slow] + multiVolume [Slow] should access to two volumes with different volume mode + and retain data across pod recreation on different node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (xfs)] [Slow] + multiVolume [Slow] should access to two volumes with different volume mode + and retain data across pod recreation on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (xfs)] [Slow] + multiVolume [Slow] should access to two volumes with the same volume mode + and retain data across pod recreation on different node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (xfs)] [Slow] + multiVolume [Slow] should access to two volumes with the same volume mode + and retain data across pod recreation on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (xfs)] [Slow] + multiVolume [Slow] should concurrently access the single read-only volume + from pods on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (xfs)] [Slow] + multiVolume [Slow] should concurrently access the single volume from pods + on different node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (xfs)] [Slow] + multiVolume [Slow] should concurrently access the single volume from pods + on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (xfs)] [Slow] + multiVolume [Slow] should concurrently access the volume and its clone from + pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (xfs)] [Slow] + multiVolume [Slow] should concurrently access the volume and restored snapshot + from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] + [Feature:VolumeSourceXFS]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (xfs)] [Slow] + volumes should allow exec of files on the volume' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic PV (xfs)] [Slow] + volumes should store data' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic Snapshot (delete + policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot + controller should check snapshot fields, check restore correctly works after + modifying source data, check deletion (persistent)' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic Snapshot (delete + policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot + controller should check snapshot fields, check restore correctly works, check + deletion (ephemeral)' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic Snapshot (delete + policy)] snapshottable-stress [Feature:VolumeSnapshotDataSource] should support + snapshotting of many volumes repeatedly [Slow] [Serial]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic Snapshot (retain + policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot + controller should check snapshot fields, check restore correctly works after + modifying source data, check deletion (persistent)' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic Snapshot (retain + policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot + controller should check snapshot fields, check restore correctly works, check + deletion (ephemeral)' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Dynamic Snapshot (retain + policy)] snapshottable-stress [Feature:VolumeSnapshotDataSource] should support + snapshotting of many volumes repeatedly [Slow] [Serial]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Ephemeral Snapshot (delete + policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot + controller should check snapshot fields, check restore correctly works after + modifying source data, check deletion (persistent)' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Ephemeral Snapshot (delete + policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot + controller should check snapshot fields, check restore correctly works, check + deletion (ephemeral)' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Ephemeral Snapshot (retain + policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot + controller should check snapshot fields, check restore correctly works after + modifying source data, check deletion (persistent)' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Ephemeral Snapshot (retain + policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot + controller should check snapshot fields, check restore correctly works, check + deletion (ephemeral)' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume + (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral + volume' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume + (block volmode) (late-binding)] ephemeral should create read/write inline + ephemeral volume' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume + (block volmode) (late-binding)] ephemeral should support expansion of pvcs + created for ephemeral pvcs' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume + (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral + volumes' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume + (block volmode) (late-binding)] ephemeral should support two pods which have + the same volume definition' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume + (default fs) (immediate-binding)] ephemeral should create read-only inline + ephemeral volume' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume + (default fs) (immediate-binding)] ephemeral should create read/write inline + ephemeral volume' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume + (default fs) (immediate-binding)] ephemeral should support expansion of pvcs + created for ephemeral pvcs' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume + (default fs) (immediate-binding)] ephemeral should support multiple inline + ephemeral volumes' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume + (default fs) (immediate-binding)] ephemeral should support two pods which + have the same volume definition' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume + (default fs) (late-binding)] ephemeral should create read-only inline ephemeral + volume' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume + (default fs) (late-binding)] ephemeral should create read/write inline ephemeral + volume' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume + (default fs) (late-binding)] ephemeral should support expansion of pvcs created + for ephemeral pvcs' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume + (default fs) (late-binding)] ephemeral should support multiple inline ephemeral + volumes' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume + (default fs) (late-binding)] ephemeral should support two pods which have + the same volume definition' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume + (default fs)] volumeLimits should support volume limits [Serial]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Generic Ephemeral-volume + (default fs)] volumeLimits should verify that all csinodes have volume limits' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default + fs)] subPath should be able to unmount after the subpath directory is deleted + [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default + fs)] subPath should fail if non-existent subpath is outside the volume [Slow] + [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default + fs)] subPath should fail if subpath directory is outside the volume [Slow] + [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default + fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default + fs)] subPath should fail if subpath with backstepping is outside the volume + [Slow] [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default + fs)] subPath should support creating multiple subpath from same volumes [Slow]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default + fs)] subPath should support existing directories when readOnly specified in + the volumeSource' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default + fs)] subPath should support existing directory' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default + fs)] subPath should support existing single file [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default + fs)] subPath should support file as subpath [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default + fs)] subPath should support non-existent path' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default + fs)] subPath should support readOnly directory specified in the volumeMount' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default + fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default + fs)] subPath should support restarting containers using directory as subpath + [Slow]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default + fs)] subPath should support restarting containers using file as subpath [Slow] + [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default + fs)] subPath should unmount if pod is force deleted while kubelet is down + [Disruptive] [Slow] [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default + fs)] subPath should unmount if pod is gracefully deleted while kubelet is + down [Disruptive] [Slow] [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default + fs)] subPath should verify container cannot write to subpath readonly volumes + [Slow]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default + fs)] volumeIO should write files of various sizes, verify size, validate content + [Slow]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default + fs)] volumes should allow exec of files on the volume' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (default + fs)] volumes should store data' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (ext3)] volumes + should allow exec of files on the volume' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (ext3)] volumes + should store data' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (ext4)] volumes + should allow exec of files on the volume' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (ext4)] volumes + should store data' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] + volumes should allow exec of files on the volume' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] + volumes should store data' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (xfs)] [Slow] + volumes should allow exec of files on the volume' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Inline-volume (xfs)] [Slow] + volumes should store data' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block + volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in + a pod that is deleted while the kubelet is down cleans up when the kubelet + returns.' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block + volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in + a pod that is force deleted while the kubelet is down cleans up when the kubelet + returns.' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block + volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written + before kubelet restart is readable after restart.' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block + volmode)] multiVolume [Slow] should access to two volumes with different volume + mode and retain data across pod recreation on different node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block + volmode)] multiVolume [Slow] should access to two volumes with different volume + mode and retain data across pod recreation on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block + volmode)] multiVolume [Slow] should access to two volumes with the same volume + mode and retain data across pod recreation on different node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block + volmode)] multiVolume [Slow] should access to two volumes with the same volume + mode and retain data across pod recreation on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block + volmode)] multiVolume [Slow] should concurrently access the single read-only + volume from pods on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block + volmode)] multiVolume [Slow] should concurrently access the single volume + from pods on different node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block + volmode)] multiVolume [Slow] should concurrently access the single volume + from pods on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block + volmode)] multiVolume [Slow] should concurrently access the volume and its + clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block + volmode)] multiVolume [Slow] should concurrently access the volume and restored + snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] + [Feature:VolumeSourceXFS]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block + volmode)] volumeMode should fail to use a volume in a pod with mismatched + mode [Slow]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block + volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (block + volmode)] volumes should store data' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default + fs)] subPath should be able to unmount after the subpath directory is deleted + [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default + fs)] subPath should fail if non-existent subpath is outside the volume [Slow] + [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default + fs)] subPath should fail if subpath directory is outside the volume [Slow] + [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default + fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default + fs)] subPath should fail if subpath with backstepping is outside the volume + [Slow] [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default + fs)] subPath should support creating multiple subpath from same volumes [Slow]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default + fs)] subPath should support existing directories when readOnly specified in + the volumeSource' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default + fs)] subPath should support existing directory' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default + fs)] subPath should support existing single file [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default + fs)] subPath should support file as subpath [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default + fs)] subPath should support non-existent path' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default + fs)] subPath should support readOnly directory specified in the volumeMount' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default + fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default + fs)] subPath should support restarting containers using directory as subpath + [Slow]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default + fs)] subPath should support restarting containers using file as subpath [Slow] + [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default + fs)] subPath should unmount if pod is force deleted while kubelet is down + [Disruptive] [Slow] [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default + fs)] subPath should unmount if pod is gracefully deleted while kubelet is + down [Disruptive] [Slow] [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default + fs)] subPath should verify container cannot write to subpath readonly volumes + [Slow]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default + fs)] volumeIO should write files of various sizes, verify size, validate content + [Slow]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default + fs)] volumes should allow exec of files on the volume' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (default + fs)] volumes should store data' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (ext3)] + volumes should allow exec of files on the volume' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (ext3)] + volumes should store data' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (ext4)] + volumes should allow exec of files on the volume' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (ext4)] + volumes should store data' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem + volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in + a pod that is deleted while the kubelet is down is usable by a new pod when + kubelet returns [Feature:SELinux]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem + volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in + a pod that is deleted while the kubelet is down is usable by a new pod with + a different SELinux context when kubelet returns [Feature:SELinux]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem + volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in + a pod that is force deleted while the kubelet is down is usable by a new pod + when kubelet returns [Feature:SELinux]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem + volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in + a pod that is force deleted while the kubelet is down is usable by a new pod + with a different SELinux context when kubelet returns [Feature:SELinux]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem + volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written + before kubelet restart is readable after restart.' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem + volmode)] multiVolume [Slow] should access to two volumes with different volume + mode and retain data across pod recreation on different node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem + volmode)] multiVolume [Slow] should access to two volumes with different volume + mode and retain data across pod recreation on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem + volmode)] multiVolume [Slow] should access to two volumes with the same volume + mode and retain data across pod recreation on different node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem + volmode)] multiVolume [Slow] should access to two volumes with the same volume + mode and retain data across pod recreation on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem + volmode)] multiVolume [Slow] should concurrently access the single read-only + volume from pods on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem + volmode)] multiVolume [Slow] should concurrently access the single volume + from pods on different node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem + volmode)] multiVolume [Slow] should concurrently access the single volume + from pods on the same node' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem + volmode)] multiVolume [Slow] should concurrently access the volume and its + clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem + volmode)] multiVolume [Slow] should concurrently access the volume and restored + snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] + [Feature:VolumeSourceXFS]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem + volmode)] volumeMode should fail to use a volume in a pod with mismatched + mode [Slow]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (filesystem + volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (ntfs)] + [Feature:Windows] volumes should allow exec of files on the volume' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (ntfs)] + [Feature:Windows] volumes should store data' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (xfs)] + [Slow] volumes should allow exec of files on the volume' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned PV (xfs)] + [Slow] volumes should store data' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned Snapshot + (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot + controller should check snapshot fields, check restore correctly works after + modifying source data, check deletion (persistent)' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned Snapshot + (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot + controller should check snapshot fields, check restore correctly works, check + deletion (ephemeral)' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned Snapshot + (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot + controller should check snapshot fields, check restore correctly works after + modifying source data, check deletion (persistent)' + - testName: '[sig-storage] OCP CSI Volumes [Driver: csi-hostpath-groupsnapshot] + [OCPFeatureGate:VolumeGroupSnapshot] [Testpattern: Pre-provisioned Snapshot + (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot + controller should check snapshot fields, check restore correctly works, check + deletion (ephemeral)' status: {}