|
| 1 | +package arbiter_topology |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "fmt" |
| 6 | + "time" |
| 7 | + |
| 8 | + g "github.com/onsi/ginkgo/v2" |
| 9 | + o "github.com/onsi/gomega" |
| 10 | + |
| 11 | + v1 "github.com/openshift/api/config/v1" |
| 12 | + exutil "github.com/openshift/origin/test/extended/util" |
| 13 | + appv1 "k8s.io/api/apps/v1" |
| 14 | + corev1 "k8s.io/api/core/v1" |
| 15 | + "k8s.io/apimachinery/pkg/api/resource" |
| 16 | + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 17 | + "k8s.io/apimachinery/pkg/labels" |
| 18 | +) |
| 19 | + |
| 20 | +const ( |
| 21 | + labelNodeRoleMaster = "node-role.kubernetes.io/master" |
| 22 | + labelNodeRoleArbiter = "node-role.kubernetes.io/arbiter" |
| 23 | +) |
| 24 | + |
| 25 | +var ( |
| 26 | + defaultExpectedPodCount = 17 |
| 27 | + expectedPodCountsPerPlatform = map[v1.PlatformType]int{ |
| 28 | + v1.BareMetalPlatformType: 17, |
| 29 | + // Add more platforms as needed |
| 30 | + } |
| 31 | +) |
| 32 | + |
| 33 | +var _ = g.Describe("[sig-node][apigroup:config.openshift.io] expected Master and Arbiter node counts", func() { |
| 34 | + defer g.GinkgoRecover() |
| 35 | + oc := exutil.NewCLIWithoutNamespace("") |
| 36 | + |
| 37 | + g.BeforeEach(func() { |
| 38 | + infraStatus := getInfraStatus(oc) |
| 39 | + if infraStatus.ControlPlaneTopology != v1.HighlyAvailableArbiterMode { |
| 40 | + g.Skip("Cluster is not in HighlyAvailableArbiterMode skipping test") |
| 41 | + } |
| 42 | + }) |
| 43 | + |
| 44 | + g.It("Should validate that there are Master and Arbiter nodes as specified in the cluster", func() { |
| 45 | + g.By("Counting nodes dynamically based on labels") |
| 46 | + // TODO: instead of manually comparing 2 with mcp node count we want to get the number from install config and compare it with mcp count |
| 47 | + // yaml comparation |
| 48 | + const ( |
| 49 | + expectedMasterNodes = 2 |
| 50 | + expectedArbiterNodes = 1 |
| 51 | + ) |
| 52 | + masterNodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{ |
| 53 | + LabelSelector: labelNodeRoleMaster, |
| 54 | + }) |
| 55 | + o.Expect(err).To(o.BeNil(), "Expected to retrieve Master nodes without error") |
| 56 | + o.Expect(len(masterNodes.Items)).To(o.Equal(expectedMasterNodes)) |
| 57 | + |
| 58 | + arbiterNodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{ |
| 59 | + LabelSelector: labelNodeRoleArbiter, |
| 60 | + }) |
| 61 | + o.Expect(err).To(o.BeNil(), "Expected to retrieve Arbiter nodes without error") |
| 62 | + o.Expect(len(arbiterNodes.Items)).To(o.Equal(expectedArbiterNodes)) |
| 63 | + }) |
| 64 | +}) |
| 65 | + |
| 66 | +var _ = g.Describe("[sig-node][apigroup:config.openshift.io] required pods on the Arbiter node", func() { |
| 67 | + defer g.GinkgoRecover() |
| 68 | + |
| 69 | + var ( |
| 70 | + oc = exutil.NewCLIWithoutNamespace("") |
| 71 | + infraStatus v1.InfrastructureStatus |
| 72 | + ) |
| 73 | + |
| 74 | + g.BeforeEach(func() { |
| 75 | + infraStatus = getInfraStatus(oc) |
| 76 | + if infraStatus.ControlPlaneTopology != v1.HighlyAvailableArbiterMode { |
| 77 | + g.Skip("Cluster is not in HighlyAvailableArbiterMode skipping test") |
| 78 | + } |
| 79 | + }) |
| 80 | + g.It("Should verify that the correct number of pods are running on the Arbiter node", func() { |
| 81 | + g.By("inferring platform type") |
| 82 | + |
| 83 | + // Default to baremetal count of 17 expected Pods, if platform type does not exist in map |
| 84 | + if expectedCount, exists := expectedPodCountsPerPlatform[infraStatus.PlatformStatus.Type]; exists { |
| 85 | + defaultExpectedPodCount = expectedCount |
| 86 | + } |
| 87 | + g.By("Retrieving the Arbiter node name") |
| 88 | + nodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{ |
| 89 | + LabelSelector: labelNodeRoleArbiter, |
| 90 | + }) |
| 91 | + o.Expect(err).To(o.BeNil(), "Expected to retrieve nodes without error") |
| 92 | + o.Expect(len(nodes.Items)).To(o.Equal(1)) |
| 93 | + g.By("by comparing pod counts") |
| 94 | + podCount := 0 |
| 95 | + for _, node := range nodes.Items { |
| 96 | + pods, err := oc.AdminKubeClient().CoreV1().Pods("").List(context.Background(), metav1.ListOptions{ |
| 97 | + FieldSelector: "spec.nodeName=" + node.Name + ",status.phase=Running", |
| 98 | + }) |
| 99 | + o.Expect(err).To(o.BeNil(), "Expected to retrieve pods without error") |
| 100 | + podCount = len(pods.Items) + podCount |
| 101 | + } |
| 102 | + o.Expect(podCount).To(o.Equal(defaultExpectedPodCount), "Expected the correct number of running pods on the Arbiter node") |
| 103 | + }) |
| 104 | +}) |
| 105 | + |
| 106 | +var _ = g.Describe("[sig-apps][apigroup:apps.openshift.io] Deployments on HighlyAvailableArbiterMode topology", func() { |
| 107 | + defer g.GinkgoRecover() |
| 108 | + |
| 109 | + oc := exutil.NewCLI("arbiter-pod-validation").SetManagedNamespace().AsAdmin() |
| 110 | + g.BeforeEach(func() { |
| 111 | + skipNonArbiterCluster(oc) |
| 112 | + }) |
| 113 | + |
| 114 | + g.It("should be created on arbiter nodes when arbiter node is selected", func() { |
| 115 | + g.By("Waiting for Arbiter node to become Ready") |
| 116 | + var arbiterNodeName string |
| 117 | + timeout := time.Now().Add(2 * time.Minute) // Maximum wait time of 2 minutes |
| 118 | + |
| 119 | + for time.Now().Before(timeout) { |
| 120 | + arbiterNodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{ |
| 121 | + LabelSelector: labelNodeRoleArbiter, |
| 122 | + }) |
| 123 | + o.Expect(err).To(o.BeNil(), "Expected to retrieve Arbiter nodes without error") |
| 124 | + |
| 125 | + if len(arbiterNodes.Items) != 1 { |
| 126 | + time.Sleep(5 * time.Second) |
| 127 | + continue |
| 128 | + } |
| 129 | + if isNodeReady(arbiterNodes.Items[0]) { |
| 130 | + arbiterNodeName = arbiterNodes.Items[0].Name |
| 131 | + break |
| 132 | + } |
| 133 | + |
| 134 | + time.Sleep(5 * time.Second) |
| 135 | + } |
| 136 | + |
| 137 | + o.Expect(arbiterNodeName).NotTo(o.BeEmpty(), "Timed out waiting for the Arbiter node to become Ready") |
| 138 | + |
| 139 | + g.By("Creating an Arbiter deployment (on Arbiter node)") |
| 140 | + _, err := createArbiterDeployment(oc, arbiterNodeName) |
| 141 | + o.Expect(err).To(o.BeNil(), "Expected Arbiter busybox deployment creation to succeed") |
| 142 | + |
| 143 | + g.By("Validating Arbiter deployment") |
| 144 | + arbiterSelector, err := labels.Parse("app=busybox-arbiter") |
| 145 | + o.Expect(err).To(o.BeNil(), "Expected to parse Arbiter label selector without error") |
| 146 | + |
| 147 | + arbiterPods, err := exutil.WaitForPods(oc.AdminKubeClient().CoreV1().Pods(oc.Namespace()), arbiterSelector, isPodRunning, 1, time.Second*30) |
| 148 | + o.Expect(err).To(o.BeNil(), "Expected Arbiter pods to be running") |
| 149 | + o.Expect(len(arbiterPods)).To(o.Equal(1), "Expected exactly one Arbiter pod to be running on Arbiter node") |
| 150 | + |
| 151 | + arbiterPod, err := oc.AdminKubeClient().CoreV1().Pods(oc.Namespace()).Get(context.Background(), arbiterPods[0], metav1.GetOptions{}) |
| 152 | + o.Expect(err).To(o.BeNil(), "Expected to retrieve Arbiter pod without error") |
| 153 | + o.Expect(arbiterPod.Spec.NodeName).To(o.Equal(arbiterNodeName), "Expected Arbiter deployment to run on Arbiter node") |
| 154 | + }) |
| 155 | + |
| 156 | + g.It("should be created on master nodes when no node selected", func() { |
| 157 | + g.By("Retrieving Master nodes") |
| 158 | + masterNodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{ |
| 159 | + LabelSelector: labelNodeRoleMaster, |
| 160 | + }) |
| 161 | + o.Expect(err).To(o.BeNil(), "Expected to retrieve Master nodes without error") |
| 162 | + o.Expect(len(masterNodes.Items)).To(o.Equal(2), "Expected to find two Master nodes") |
| 163 | + |
| 164 | + // Create a map for Master nodes |
| 165 | + masterNodeMap := make(map[string]struct{}) |
| 166 | + for _, node := range masterNodes.Items { |
| 167 | + masterNodeMap[node.Name] = struct{}{} |
| 168 | + } |
| 169 | + |
| 170 | + g.By("Creating a Normal deployment (on Master nodes)") |
| 171 | + _, err = createNormalDeployment(oc) |
| 172 | + o.Expect(err).To(o.BeNil(), "Expected Master busybox deployment creation to succeed") |
| 173 | + |
| 174 | + g.By("Validating Normal deployment on Master nodes") |
| 175 | + normalSelector, err := labels.Parse("app=busybox-master") |
| 176 | + o.Expect(err).To(o.BeNil(), "Expected to parse Master label selector without error") |
| 177 | + |
| 178 | + normalPods, err := exutil.WaitForPods(oc.AdminKubeClient().CoreV1().Pods(oc.Namespace()), normalSelector, isPodRunning, 1, time.Second*30) |
| 179 | + o.Expect(err).To(o.BeNil(), "Expected Normal pods to be running on Master nodes") |
| 180 | + o.Expect(len(normalPods)).To(o.Equal(1), "Expected exactly one Normal pod to be running on a Master node") |
| 181 | + |
| 182 | + pod, err := oc.AdminKubeClient().CoreV1().Pods(oc.Namespace()).Get(context.Background(), normalPods[0], metav1.GetOptions{}) |
| 183 | + o.Expect(err).To(o.BeNil(), "Expected to retrieve Normal pod without error") |
| 184 | + |
| 185 | + _, exists := masterNodeMap[pod.Spec.NodeName] |
| 186 | + o.Expect(exists).To(o.BeTrue(), "Expected pod to be running on a Master node") |
| 187 | + }) |
| 188 | +}) |
| 189 | + |
| 190 | +var _ = g.Describe("[sig-apps][apigroup:apps.openshift.io] Evaluate DaemonSet placement in HighlyAvailableArbiterMode topology", func() { |
| 191 | + defer g.GinkgoRecover() |
| 192 | + oc := exutil.NewCLI("daemonset-pod-validation").SetManagedNamespace().AsAdmin() |
| 193 | + |
| 194 | + g.BeforeEach(func() { |
| 195 | + skipNonArbiterCluster(oc) |
| 196 | + }) |
| 197 | + |
| 198 | + g.It("should not create a DaemonSet on the Arbiter node", func() { |
| 199 | + g.By("Retrieving the Arbiter node") |
| 200 | + arbiterNodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{ |
| 201 | + LabelSelector: labelNodeRoleArbiter, |
| 202 | + }) |
| 203 | + o.Expect(err).To(o.BeNil(), "Expected to retrieve Arbiter node without error") |
| 204 | + o.Expect(len(arbiterNodes.Items)).To(o.BeNumerically(">", 0), "Expected at least one Arbiter node") |
| 205 | + |
| 206 | + arbiterNodeName := arbiterNodes.Items[0].Name |
| 207 | + |
| 208 | + g.By("Creating a DaemonSet deployment") |
| 209 | + _, err = createDaemonSetDeployment(oc) |
| 210 | + o.Expect(err).To(o.BeNil(), "Expected DaemonSet deployment creation to succeed") |
| 211 | + |
| 212 | + g.By("Waiting for DaemonSet pods to reach Running state") |
| 213 | + daemonSetSelector, err := labels.Parse("app=busybox-daemon") |
| 214 | + o.Expect(err).To(o.BeNil(), "Expected to parse DaemonSet label selector without error") |
| 215 | + |
| 216 | + daemonSetPods, err := exutil.WaitForPods(oc.AdminKubeClient().CoreV1().Pods(oc.Namespace()), daemonSetSelector, isPodRunning, 1, time.Second*30) |
| 217 | + o.Expect(err).To(o.BeNil(), "Expected DaemonSet pods to be running") |
| 218 | + o.Expect(len(daemonSetPods)).To(o.Equal(1), "Expected exactly one DaemonSet pod to be running") |
| 219 | + |
| 220 | + g.By("Validating that DaemonSet pods are NOT scheduled on the Arbiter node") |
| 221 | + for _, podName := range daemonSetPods { |
| 222 | + pod, err := oc.AdminKubeClient().CoreV1().Pods(oc.Namespace()).Get(context.Background(), podName, metav1.GetOptions{}) |
| 223 | + o.Expect(err).To(o.BeNil(), "Expected to retrieve DaemonSet pod without error") |
| 224 | + |
| 225 | + o.Expect(pod.Spec.NodeName).NotTo(o.Equal(arbiterNodeName), |
| 226 | + fmt.Sprintf("DaemonSet pod (%s/%s) should NOT be scheduled on the Arbiter node", pod.Namespace, pod.Name)) |
| 227 | + } |
| 228 | + }) |
| 229 | +}) |
| 230 | + |
| 231 | +var _ = g.Describe("[sig-etcd][apigroup:config.openshift.io] Ensure etcd health and quorum in HighlyAvailableArbiterMode", func() { |
| 232 | + defer g.GinkgoRecover() |
| 233 | + oc := exutil.NewCLIWithoutNamespace("").AsAdmin() |
| 234 | + |
| 235 | + g.BeforeEach(func() { |
| 236 | + skipNonArbiterCluster(oc) |
| 237 | + }) |
| 238 | + |
| 239 | + g.It("should have all etcd pods running and quorum met", func() { |
| 240 | + g.By("Retrieving and validating etcd pods") |
| 241 | + |
| 242 | + const ( |
| 243 | + namespace = "openshift-etcd" |
| 244 | + labelSelector = "app=etcd" |
| 245 | + expectedPods = 3 |
| 246 | + ) |
| 247 | + |
| 248 | + etcdPods, err := oc.AdminKubeClient().CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{ |
| 249 | + LabelSelector: labelSelector, |
| 250 | + }) |
| 251 | + o.Expect(err).To(o.BeNil(), "Expected to retrieve etcd pods without error") |
| 252 | + o.Expect(len(etcdPods.Items)).To(o.Equal(expectedPods), "Expected exactly %d etcd pods in the 2-node + 1 arbiter cluster", expectedPods) |
| 253 | + |
| 254 | + // Ensure each etcd pod is running |
| 255 | + for _, pod := range etcdPods.Items { |
| 256 | + o.ExpectWithOffset(1, pod.Status.Phase).To(o.Equal(corev1.PodRunning), |
| 257 | + fmt.Sprintf("Expected etcd pod %s to be in Running state", pod.Name)) |
| 258 | + } |
| 259 | + |
| 260 | + g.By("Checking etcd ClusterOperator Status") |
| 261 | + etcdOperator, err := oc.AdminConfigClient().ConfigV1().ClusterOperators().Get(context.Background(), "etcd", metav1.GetOptions{}) |
| 262 | + o.Expect(err).To(o.BeNil(), "Expected to retrieve etcd ClusterOperator without error") |
| 263 | + |
| 264 | + g.By("Verifying ClusterOperator conditions for Availability and Degradation") |
| 265 | + o.Expect(isClusterOperatorAvailable(etcdOperator)).To(o.BeTrue(), "Expected etcd operator to be available, indicating quorum is met") |
| 266 | + o.Expect(isClusterOperatorDegraded(etcdOperator)).To(o.BeFalse(), "Expected etcd operator not to be degraded") |
| 267 | + }) |
| 268 | +}) |
| 269 | + |
| 270 | +func createNormalDeployment(oc *exutil.CLI) (*appv1.Deployment, error) { |
| 271 | + var replicas int32 = 1 |
| 272 | + |
| 273 | + container := corev1.Container{ |
| 274 | + Name: "busybox", |
| 275 | + Image: "busybox", |
| 276 | + Command: []string{"sleep", "3600"}, |
| 277 | + Resources: corev1.ResourceRequirements{ |
| 278 | + Requests: corev1.ResourceList{ |
| 279 | + corev1.ResourceCPU: resource.MustParse("20m"), |
| 280 | + corev1.ResourceMemory: resource.MustParse("50Mi"), |
| 281 | + }, |
| 282 | + }, |
| 283 | + } |
| 284 | + |
| 285 | + deployment := &appv1.Deployment{ |
| 286 | + TypeMeta: metav1.TypeMeta{ |
| 287 | + APIVersion: "apps/v1", |
| 288 | + Kind: "Deployment", |
| 289 | + }, |
| 290 | + ObjectMeta: metav1.ObjectMeta{ |
| 291 | + Name: "busybox-deployment-masters", |
| 292 | + Namespace: oc.Namespace(), |
| 293 | + }, |
| 294 | + Spec: appv1.DeploymentSpec{ |
| 295 | + Replicas: &replicas, |
| 296 | + Selector: &metav1.LabelSelector{ |
| 297 | + MatchLabels: map[string]string{"app": "busybox-master"}, |
| 298 | + }, |
| 299 | + Template: corev1.PodTemplateSpec{ |
| 300 | + ObjectMeta: metav1.ObjectMeta{ |
| 301 | + Labels: map[string]string{"app": "busybox-master"}, |
| 302 | + }, |
| 303 | + Spec: corev1.PodSpec{ |
| 304 | + Containers: []corev1.Container{container}, |
| 305 | + }, |
| 306 | + }, |
| 307 | + }, |
| 308 | + } |
| 309 | + |
| 310 | + return oc.KubeClient().AppsV1(). |
| 311 | + Deployments(oc.Namespace()). |
| 312 | + Create(context.Background(), deployment, metav1.CreateOptions{}) |
| 313 | +} |
| 314 | + |
| 315 | +func createArbiterDeployment(oc *exutil.CLI, arbiterNodeName string) (*appv1.Deployment, error) { |
| 316 | + var replicas int32 = 1 |
| 317 | + |
| 318 | + container := corev1.Container{ |
| 319 | + Name: "busybox", |
| 320 | + Image: "busybox", |
| 321 | + Command: []string{"sleep", "3600"}, |
| 322 | + Resources: corev1.ResourceRequirements{ |
| 323 | + Requests: corev1.ResourceList{ |
| 324 | + corev1.ResourceCPU: resource.MustParse("20m"), |
| 325 | + corev1.ResourceMemory: resource.MustParse("50Mi"), |
| 326 | + }, |
| 327 | + }, |
| 328 | + } |
| 329 | + |
| 330 | + deployment := &appv1.Deployment{ |
| 331 | + TypeMeta: metav1.TypeMeta{ |
| 332 | + APIVersion: "apps/v1", |
| 333 | + Kind: "Deployment", |
| 334 | + }, |
| 335 | + ObjectMeta: metav1.ObjectMeta{ |
| 336 | + Name: "busybox-deployment-arbiter", |
| 337 | + Namespace: oc.Namespace(), |
| 338 | + }, |
| 339 | + Spec: appv1.DeploymentSpec{ |
| 340 | + Replicas: &replicas, |
| 341 | + Selector: &metav1.LabelSelector{ |
| 342 | + MatchLabels: map[string]string{"app": "busybox-arbiter"}, |
| 343 | + }, |
| 344 | + Template: corev1.PodTemplateSpec{ |
| 345 | + ObjectMeta: metav1.ObjectMeta{ |
| 346 | + Labels: map[string]string{"app": "busybox-arbiter"}, |
| 347 | + }, |
| 348 | + Spec: corev1.PodSpec{ |
| 349 | + NodeName: arbiterNodeName, |
| 350 | + Containers: []corev1.Container{container}, |
| 351 | + }, |
| 352 | + }, |
| 353 | + }, |
| 354 | + } |
| 355 | + |
| 356 | + return oc.KubeClient().AppsV1(). |
| 357 | + Deployments(oc.Namespace()). |
| 358 | + Create(context.Background(), deployment, metav1.CreateOptions{}) |
| 359 | +} |
| 360 | + |
| 361 | +func createDaemonSetDeployment(oc *exutil.CLI) (*appv1.DaemonSet, error) { |
| 362 | + container := corev1.Container{ |
| 363 | + Name: "busybox", |
| 364 | + Image: "busybox", |
| 365 | + Command: []string{"sleep", "3600"}, |
| 366 | + Resources: corev1.ResourceRequirements{ |
| 367 | + Requests: corev1.ResourceList{ |
| 368 | + corev1.ResourceCPU: resource.MustParse("20m"), |
| 369 | + corev1.ResourceMemory: resource.MustParse("50Mi"), |
| 370 | + }, |
| 371 | + }, |
| 372 | + } |
| 373 | + |
| 374 | + daemonSet := &appv1.DaemonSet{ |
| 375 | + TypeMeta: metav1.TypeMeta{ |
| 376 | + APIVersion: "apps/v1", |
| 377 | + Kind: "DaemonSet", |
| 378 | + }, |
| 379 | + ObjectMeta: metav1.ObjectMeta{ |
| 380 | + Name: "busybox-daemon", |
| 381 | + Namespace: oc.Namespace(), |
| 382 | + }, |
| 383 | + Spec: appv1.DaemonSetSpec{ |
| 384 | + Selector: &metav1.LabelSelector{ |
| 385 | + MatchLabels: map[string]string{"app": "busybox-daemon"}, |
| 386 | + }, |
| 387 | + Template: corev1.PodTemplateSpec{ |
| 388 | + ObjectMeta: metav1.ObjectMeta{ |
| 389 | + Labels: map[string]string{"app": "busybox-daemon"}, |
| 390 | + }, |
| 391 | + Spec: corev1.PodSpec{ |
| 392 | + Containers: []corev1.Container{container}, |
| 393 | + }, |
| 394 | + }, |
| 395 | + }, |
| 396 | + } |
| 397 | + |
| 398 | + return oc.KubeClient().AppsV1(). |
| 399 | + DaemonSets(oc.Namespace()). |
| 400 | + Create(context.Background(), daemonSet, metav1.CreateOptions{}) |
| 401 | +} |
| 402 | + |
| 403 | +func isNodeReady(node corev1.Node) bool { |
| 404 | + for _, condition := range node.Status.Conditions { |
| 405 | + if condition.Type == corev1.NodeReady && condition.Status == corev1.ConditionTrue { |
| 406 | + return true |
| 407 | + } |
| 408 | + } |
| 409 | + return false |
| 410 | +} |
| 411 | + |
| 412 | +func isPodRunning(pod corev1.Pod) bool { |
| 413 | + return pod.Status.Phase == corev1.PodRunning |
| 414 | +} |
| 415 | + |
| 416 | +func isClusterOperatorAvailable(operator *v1.ClusterOperator) bool { |
| 417 | + for _, cond := range operator.Status.Conditions { |
| 418 | + if cond.Type == v1.OperatorAvailable && cond.Status == v1.ConditionTrue { |
| 419 | + return true |
| 420 | + } |
| 421 | + } |
| 422 | + return false |
| 423 | +} |
| 424 | + |
| 425 | +func isClusterOperatorDegraded(operator *v1.ClusterOperator) bool { |
| 426 | + for _, cond := range operator.Status.Conditions { |
| 427 | + if cond.Type == v1.OperatorDegraded && cond.Status == v1.ConditionTrue { |
| 428 | + return true |
| 429 | + } |
| 430 | + } |
| 431 | + return false |
| 432 | +} |
| 433 | + |
| 434 | +func skipNonArbiterCluster(oc *exutil.CLI) { |
| 435 | + infraStatus := getInfraStatus(oc) |
| 436 | + if infraStatus.ControlPlaneTopology != v1.HighlyAvailableArbiterMode { |
| 437 | + g.Skip("Cluster is not in HighlyAvailableArbiterMode, skipping test") |
| 438 | + } |
| 439 | +} |
| 440 | + |
| 441 | +func getInfraStatus(oc *exutil.CLI) v1.InfrastructureStatus { |
| 442 | + infra, err := oc.AdminConfigClient().ConfigV1().Infrastructures().Get(context.Background(), |
| 443 | + "cluster", metav1.GetOptions{}) |
| 444 | + o.Expect(err).NotTo(o.HaveOccurred()) |
| 445 | + return infra.Status |
| 446 | +} |
0 commit comments