|
| 1 | +package main |
| 2 | + |
| 3 | +import ( |
| 4 | + et "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests" |
| 5 | + "k8s.io/apimachinery/pkg/util/sets" |
| 6 | +) |
| 7 | + |
| 8 | +// filterOutDisabledSpecs returns the specs with those that are disabled removed from the list |
| 9 | +func filterOutDisabledSpecs(specs et.ExtensionTestSpecs) et.ExtensionTestSpecs { |
| 10 | + var disabledByReason = map[string][]string{ |
| 11 | + "Alpha": { // alpha features that are not gated |
| 12 | + "[Feature:StorageVersionAPI]", |
| 13 | + "[Feature:ClusterTrustBundle]", |
| 14 | + "[Feature:SELinuxMount]", |
| 15 | + "[FeatureGate:SELinuxMount]", |
| 16 | + "[Feature:UserNamespacesPodSecurityStandards]", |
| 17 | + "[Feature:DynamicResourceAllocation]", |
| 18 | + "[Feature:VolumeAttributesClass]", // disabled Beta |
| 19 | + "[sig-cli] Kubectl client Kubectl prune with applyset should apply and prune objects", // Alpha feature since k8s 1.27 |
| 20 | + // 4.19 |
| 21 | + "[Feature:PodLevelResources]", |
| 22 | + "[Feature:PodLogsQuerySplitStreams]", |
| 23 | + "[Feature:volumegroupsnapshot]", |
| 24 | + // 4.20 |
| 25 | + "[Feature:OffByDefault]", |
| 26 | + }, |
| 27 | + // tests for features that are not implemented in openshift |
| 28 | + "Unimplemented": { |
| 29 | + "Monitoring", // Not installed, should be |
| 30 | + "Cluster level logging", // Not installed yet |
| 31 | + "Kibana", // Not installed |
| 32 | + "Ubernetes", // Can't set zone labels today |
| 33 | + "kube-ui", // Not installed by default |
| 34 | + "Kubernetes Dashboard", // Not installed by default (also probably slow image pull) |
| 35 | + "should proxy to cadvisor", // we don't expose cAdvisor port directly for security reasons |
| 36 | + "[Feature:BootstrapTokens]", // we don't serve cluster-info configmap |
| 37 | + "[Feature:KubeProxyDaemonSetMigration]", // upgrades are run separately |
| 38 | + "[Feature:BoundServiceAccountTokenVolume]", // upgrades are run separately |
| 39 | + "[Feature:StatefulUpgrade]", // upgrades are run separately |
| 40 | + }, |
| 41 | + // tests that rely on special configuration that we do not yet support |
| 42 | + "SpecialConfig": { |
| 43 | + // GPU node needs to be available |
| 44 | + "[Feature:GPUDevicePlugin]", |
| 45 | + "[sig-scheduling] GPUDevicePluginAcrossRecreate [Feature:Recreate]", |
| 46 | + |
| 47 | + "[Feature:LocalStorageCapacityIsolation]", // relies on a separate daemonset? |
| 48 | + "[sig-cloud-provider-gcp]", // these test require a different configuration - note that GCE tests from the sig-cluster-lifecycle were moved to the sig-cloud-provider-gcpcluster lifecycle see https://github.com/kubernetes/kubernetes/commit/0b3d50b6dccdc4bbd0b3e411c648b092477d79ac#diff-3b1910d08fb8fd8b32956b5e264f87cb |
| 49 | + |
| 50 | + "kube-dns-autoscaler", // Don't run kube-dns |
| 51 | + "should check if Kubernetes master services is included in cluster-info", // Don't run kube-dns |
| 52 | + "DNS configMap", // this tests dns federation configuration via configmap, which we don't support yet |
| 53 | + |
| 54 | + "NodeProblemDetector", // requires a non-master node to run on |
| 55 | + "Advanced Audit should audit API calls", // expects to be able to call /logs |
| 56 | + |
| 57 | + "Firewall rule should have correct firewall rules for e2e cluster", // Upstream-install specific |
| 58 | + |
| 59 | + // https://bugzilla.redhat.com/show_bug.cgi?id=2079958 |
| 60 | + "[sig-network] [Feature:Topology Hints] should distribute endpoints evenly", |
| 61 | + |
| 62 | + // Tests require SSH configuration and is part of the parallel suite, which does not create the bastion |
| 63 | + // host. Enabling the test would result in the bastion being created for every parallel test execution. |
| 64 | + // Given that we have existing oc and WMCO tests that cover this functionality, we can safely disable it. |
| 65 | + "[Feature:NodeLogQuery]", |
| 66 | + }, |
| 67 | + // tests that are known broken and need to be fixed upstream or in openshift |
| 68 | + // always add an issue here |
| 69 | + "Broken": { |
| 70 | + "mount an API token into pods", // We add 6 secrets, not 1 |
| 71 | + "ServiceAccounts should ensure a single API token exists", // We create lots of secrets |
| 72 | + "unchanging, static URL paths for kubernetes api services", // the test needs to exclude URLs that are not part of conformance (/logs) |
| 73 | + "Services should be able to up and down services", // we don't have wget installed on nodes |
| 74 | + "KubeProxy should set TCP CLOSE_WAIT timeout", // the test require communication to port 11302 in the cluster nodes |
| 75 | + "should check kube-proxy urls", // previously this test was skipped b/c we reported -1 as the number of nodes, now we report proper number and test fails |
| 76 | + "SSH", // TRIAGE |
| 77 | + "should implement service.kubernetes.io/service-proxy-name", // this is an optional test that requires SSH. sig-network |
| 78 | + "recreate nodes and ensure they function upon restart", // https://bugzilla.redhat.com/show_bug.cgi?id=1756428 |
| 79 | + "[Driver: iscsi]", // https://bugzilla.redhat.com/show_bug.cgi?id=1711627 |
| 80 | + |
| 81 | + "RuntimeClass should reject", |
| 82 | + |
| 83 | + "Services should implement service.kubernetes.io/headless", // requires SSH access to function, needs to be refactored |
| 84 | + "ClusterDns [Feature:Example] should create pod that uses dns", // doesn't use bindata, not part of kube test binary |
| 85 | + "Simple pod should return command exit codes should handle in-cluster config", // kubectl cp doesn't work or is not preserving executable bit, we have this test already |
| 86 | + |
| 87 | + // TODO(node): configure the cri handler for the runtime class to make this work |
| 88 | + "should run a Pod requesting a RuntimeClass with a configured handler", |
| 89 | + "should reject a Pod requesting a RuntimeClass with conflicting node selector", |
| 90 | + "should run a Pod requesting a RuntimeClass with scheduling", |
| 91 | + |
| 92 | + // A fix is in progress: https://github.com/openshift/origin/pull/24709 |
| 93 | + "Multi-AZ Clusters should spread the pods of a replication controller across zones", |
| 94 | + |
| 95 | + // Upstream assumes all control plane pods are in kube-system namespace and we should revert the change |
| 96 | + // https://github.com/kubernetes/kubernetes/commit/176c8e219f4c7b4c15d34b92c50bfa5ba02b3aba#diff-28a3131f96324063dd53e17270d435a3b0b3bd8f806ee0e33295929570eab209R78 |
| 97 | + "MetricsGrabber should grab all metrics from a Kubelet", |
| 98 | + "MetricsGrabber should grab all metrics from API server", |
| 99 | + "MetricsGrabber should grab all metrics from a ControllerManager", |
| 100 | + "MetricsGrabber should grab all metrics from a Scheduler", |
| 101 | + |
| 102 | + // https://bugzilla.redhat.com/show_bug.cgi?id=1906808 |
| 103 | + "ServiceAccounts should support OIDC discovery of service account issuer", |
| 104 | + |
| 105 | + // NFS umount is broken in kernels 5.7+ |
| 106 | + // https://bugzilla.redhat.com/show_bug.cgi?id=1854379 |
| 107 | + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted", |
| 108 | + |
| 109 | + // https://bugzilla.redhat.com/show_bug.cgi?id=1986306 |
| 110 | + "[sig-cli] Kubectl client kubectl wait should ignore not found error with --for=delete", |
| 111 | + |
| 112 | + // https://bugzilla.redhat.com/show_bug.cgi?id=1980141 |
| 113 | + "Netpol NetworkPolicy between server and client should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector", |
| 114 | + "Netpol NetworkPolicy between server and client should enforce policy to allow traffic from pods within server namespace based on PodSelector", |
| 115 | + "Netpol NetworkPolicy between server and client should enforce policy based on NamespaceSelector with MatchExpressions", |
| 116 | + "Netpol NetworkPolicy between server and client should enforce policy based on PodSelector with MatchExpressions", |
| 117 | + "Netpol NetworkPolicy between server and client should enforce policy based on PodSelector or NamespaceSelector", |
| 118 | + "Netpol NetworkPolicy between server and client should deny ingress from pods on other namespaces", |
| 119 | + "Netpol NetworkPolicy between server and client should enforce updated policy", |
| 120 | + "Netpol NetworkPolicy between server and client should enforce multiple, stacked policies with overlapping podSelectors", |
| 121 | + "Netpol NetworkPolicy between server and client should enforce policy based on any PodSelectors", |
| 122 | + "Netpol NetworkPolicy between server and client should enforce policy to allow traffic only from a different namespace, based on NamespaceSelector", |
| 123 | + "Netpol [LinuxOnly] NetworkPolicy between server and client using UDP should support a 'default-deny-ingress' policy", |
| 124 | + "Netpol [LinuxOnly] NetworkPolicy between server and client using UDP should enforce policy based on Ports", |
| 125 | + "Netpol [LinuxOnly] NetworkPolicy between server and client using UDP should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector", |
| 126 | + |
| 127 | + "Topology Hints should distribute endpoints evenly", |
| 128 | + |
| 129 | + // https://bugzilla.redhat.com/show_bug.cgi?id=1908645 |
| 130 | + "[sig-network] Networking Granular Checks: Services should function for service endpoints using hostNetwork", |
| 131 | + "[sig-network] Networking Granular Checks: Services should function for pod-Service(hostNetwork)", |
| 132 | + |
| 133 | + // https://bugzilla.redhat.com/show_bug.cgi?id=1952460 |
| 134 | + "[sig-network] Firewall rule control plane should not expose well-known ports", |
| 135 | + |
| 136 | + // https://bugzilla.redhat.com/show_bug.cgi?id=1988272 |
| 137 | + "[sig-network] Networking should provide Internet connection for containers [Feature:Networking-IPv6]", |
| 138 | + "[sig-network] Networking should provider Internet connection for containers using DNS", |
| 139 | + |
| 140 | + // https://bugzilla.redhat.com/show_bug.cgi?id=1957894 |
| 141 | + "[sig-node] Container Runtime blackbox test when running a container with a new image should be able to pull from private registry with secret", |
| 142 | + |
| 143 | + // https://bugzilla.redhat.com/show_bug.cgi?id=1952457 |
| 144 | + "[sig-node] crictl should be able to run crictl on the node", |
| 145 | + |
| 146 | + // https://bugzilla.redhat.com/show_bug.cgi?id=1953478 |
| 147 | + "[sig-storage] Dynamic Provisioning Invalid AWS KMS key should report an error and create no PV", |
| 148 | + |
| 149 | + // https://issues.redhat.com/browse/OCPBUGS-34577 |
| 150 | + "[sig-storage] Multi-AZ Cluster Volumes should schedule pods in the same zones as statically provisioned PVs", |
| 151 | + |
| 152 | + // https://issues.redhat.com/browse/OCPBUGS-34594 |
| 153 | + "[sig-node] [Feature:PodLifecycleSleepAction] when create a pod with lifecycle hook using sleep action valid prestop hook using sleep action", |
| 154 | + |
| 155 | + // https://issues.redhat.com/browse/OCPBUGS-38839 |
| 156 | + "[sig-network] Traffic Distribution", |
| 157 | + |
| 158 | + // https://issues.redhat.com/browse/OCPBUGS-45273 |
| 159 | + "[sig-network] Services should implement NodePort and HealthCheckNodePort correctly when ExternalTrafficPolicy changes", |
| 160 | + }, |
| 161 | + // tests that need to be temporarily disabled while the rebase is in progress. |
| 162 | + "RebaseInProgress": { |
| 163 | + // https://issues.redhat.com/browse/OCPBUGS-7297 |
| 164 | + "DNS HostNetwork should resolve DNS of partial qualified names for services on hostNetwork pods with dnsPolicy", |
| 165 | + |
| 166 | + // https://issues.redhat.com/browse/OCPBUGS-45275 |
| 167 | + "[sig-network] Connectivity Pod Lifecycle should be able to connect to other Pod from a terminating Pod", |
| 168 | + |
| 169 | + // https://issues.redhat.com/browse/OCPBUGS-17194 |
| 170 | + "[sig-node] ImageCredentialProvider [Feature:KubeletCredentialProviders] should be able to create pod with image credentials fetched from external credential provider", |
| 171 | + }, |
| 172 | + // tests that may work, but we don't support them |
| 173 | + "Unsupported": { |
| 174 | + "[Driver: rbd]", // OpenShift 4.x does not support Ceph RBD (use CSI instead) |
| 175 | + "[Driver: ceph]", // OpenShift 4.x does not support CephFS (use CSI instead) |
| 176 | + "[Driver: gluster]", // OpenShift 4.x does not support Gluster |
| 177 | + "Volumes GlusterFS", // OpenShift 4.x does not support Gluster |
| 178 | + "GlusterDynamicProvisioner", // OpenShift 4.x does not support Gluster |
| 179 | + |
| 180 | + // Also, our CI doesn't support topology, so disable those tests |
| 181 | + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies", |
| 182 | + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies", |
| 183 | + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies", |
| 184 | + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies", |
| 185 | + }, |
| 186 | + } |
| 187 | + |
| 188 | + var disabledSpecs et.ExtensionTestSpecs |
| 189 | + for _, disabledList := range disabledByReason { |
| 190 | + var selectFunctions []et.SelectFunction |
| 191 | + for _, disabledName := range disabledList { |
| 192 | + selectFunctions = append(selectFunctions, et.NameContains(disabledName)) |
| 193 | + } |
| 194 | + |
| 195 | + disabledSpecs = append(disabledSpecs, specs.SelectAny(selectFunctions)...) |
| 196 | + } |
| 197 | + |
| 198 | + disabledNames := sets.New[string]() |
| 199 | + for _, disabledSpec := range disabledSpecs { |
| 200 | + disabledNames.Insert(disabledSpec.Name) |
| 201 | + } |
| 202 | + |
| 203 | + enabledSpecs := specs[:0] |
| 204 | + for _, spec := range specs { |
| 205 | + if !disabledNames.Has(spec.Name) { |
| 206 | + enabledSpecs = append(enabledSpecs, spec) |
| 207 | + } |
| 208 | + } |
| 209 | + |
| 210 | + return enabledSpecs |
| 211 | +} |
0 commit comments