Skip to content

Commit da7e5a3

Browse files
committed
OADP-5782: Add hypershift-oadp-plugin E2E tests
Signed-off-by: Juan Manuel Parrilla Madrid <[email protected]>
1 parent 5cc21ce commit da7e5a3

25 files changed

+3711
-25
lines changed

Makefile

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -559,6 +559,7 @@ VELERO_INSTANCE_NAME ?= velero-test
559559
ARTIFACT_DIR ?= /tmp
560560
HCO_UPSTREAM ?= false
561561
TEST_VIRT ?= false
562+
TEST_HCP ?= false
562563
TEST_UPGRADE ?= false
563564
TEST_FILTER = (($(shell echo '! aws && ! gcp && ! azure && ! ibmcloud' | \
564565
$(SED) -r "s/[&]* [!] $(CLUSTER_TYPE)|[!] $(CLUSTER_TYPE) [&]*//")) || $(CLUSTER_TYPE))
@@ -573,6 +574,11 @@ ifeq ($(TEST_UPGRADE),true)
573574
else
574575
TEST_FILTER += && (! upgrade)
575576
endif
577+
ifeq ($(TEST_HCP),true)
578+
TEST_FILTER += && (hcp)
579+
else
580+
TEST_FILTER += && (! hcp)
581+
endif
576582

577583
.PHONY: test-e2e
578584
test-e2e: test-e2e-setup install-ginkgo ## Run E2E tests against OADP operator installed in cluster. For more information, check docs/developer/testing/TESTING.md

bundle/manifests/oadp-operator.clusterserviceversion.yaml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,8 @@ metadata:
4040
"defaultPlugins": [
4141
"openshift",
4242
"aws",
43-
"kubevirt"
43+
"kubevirt",
44+
"hypershift"
4445
],
4546
"disableFsBackup": false
4647
}

config/samples/oadp_v1alpha1_dataprotectionapplication.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ spec:
1010
- openshift
1111
- aws
1212
- kubevirt
13+
- hypershift
1314
nodeAgent:
1415
enable: true
1516
uploaderType: kopia

go.mod

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ require (
1212
github.com/onsi/ginkgo/v2 v2.19.0
1313
github.com/onsi/gomega v1.33.1
1414
github.com/openshift/api v0.0.0-20240524162738-d899f8877d22 // release-4.12
15+
github.com/openshift/hypershift/api v0.0.0-20241128081537-8326d865eaf5
1516
github.com/operator-framework/api v0.10.7
1617
github.com/operator-framework/operator-lib v0.9.0
1718
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.51.2

go.sum

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -615,6 +615,8 @@ github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
615615
github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
616616
github.com/openshift/api v0.0.0-20240524162738-d899f8877d22 h1:AW8KUN4k7qR2egrCCe3x95URHQ3N188+a/b0qpRyAHg=
617617
github.com/openshift/api v0.0.0-20240524162738-d899f8877d22/go.mod h1:7Hm1kLJGxWT6eysOpD2zUztdn+w91eiERn6KtI5o9aw=
618+
github.com/openshift/hypershift/api v0.0.0-20241128081537-8326d865eaf5 h1:z8AkPjlJ/CPqED/EPtlgQKYEt8+Edc30ZR8eQWOEigA=
619+
github.com/openshift/hypershift/api v0.0.0-20241128081537-8326d865eaf5/go.mod h1:3UlUlywmXBCEMF3GACTvMAOvv2lU5qzUDvTYFXeGbKU=
618620
github.com/openshift/velero v0.10.2-0.20250313160323-584cf1148a74 h1:ZHO0O6g1Enel2O4rAk7VfWLHlQKYkOcdWGAmoiZ3fQw=
619621
github.com/openshift/velero v0.10.2-0.20250313160323-584cf1148a74/go.mod h1:sASoDB9pLWqvIi1nD1ZFOpmj5JB+p10lHVm+f+Hp1oU=
620622
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=

tests/e2e/e2e_suite_test.go

Lines changed: 6 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -8,26 +8,17 @@ import (
88
"testing"
99
"time"
1010

11-
volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
1211
"github.com/onsi/ginkgo/v2"
1312
"github.com/onsi/gomega"
14-
openshiftappsv1 "github.com/openshift/api/apps/v1"
15-
openshiftbuildv1 "github.com/openshift/api/build/v1"
16-
openshiftroutev1 "github.com/openshift/api/route/v1"
17-
openshiftsecurityv1 "github.com/openshift/api/security/v1"
18-
openshifttemplatev1 "github.com/openshift/api/template/v1"
19-
operatorsv1 "github.com/operator-framework/api/pkg/operators/v1"
20-
operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
21-
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
22-
corev1 "k8s.io/api/core/v1"
2313
"k8s.io/apimachinery/pkg/util/uuid"
2414
"k8s.io/client-go/dynamic"
2515
"k8s.io/client-go/kubernetes"
2616
"k8s.io/client-go/rest"
17+
ctrl "sigs.k8s.io/controller-runtime"
2718
"sigs.k8s.io/controller-runtime/pkg/client"
2819
"sigs.k8s.io/controller-runtime/pkg/client/config"
20+
"sigs.k8s.io/controller-runtime/pkg/log/zap"
2921

30-
oadpv1alpha1 "github.com/openshift/oadp-operator/api/v1alpha1"
3122
"github.com/openshift/oadp-operator/tests/e2e/lib"
3223
)
3324

@@ -136,21 +127,9 @@ func TestOADPE2E(t *testing.T) {
136127
kubernetesClientForSuiteRun, err = kubernetes.NewForConfig(kubeConfig)
137128
gomega.Expect(err).NotTo(gomega.HaveOccurred())
138129

139-
runTimeClientForSuiteRun, err = client.New(kubeConfig, client.Options{})
130+
runTimeClientForSuiteRun, err = client.New(kubeConfig, client.Options{Scheme: lib.Scheme})
140131
gomega.Expect(err).NotTo(gomega.HaveOccurred())
141132

142-
oadpv1alpha1.AddToScheme(runTimeClientForSuiteRun.Scheme())
143-
velerov1.AddToScheme(runTimeClientForSuiteRun.Scheme())
144-
openshiftappsv1.AddToScheme(runTimeClientForSuiteRun.Scheme())
145-
openshiftbuildv1.AddToScheme(runTimeClientForSuiteRun.Scheme())
146-
openshiftsecurityv1.AddToScheme(runTimeClientForSuiteRun.Scheme())
147-
openshifttemplatev1.AddToScheme(runTimeClientForSuiteRun.Scheme())
148-
openshiftroutev1.AddToScheme(runTimeClientForSuiteRun.Scheme())
149-
corev1.AddToScheme(runTimeClientForSuiteRun.Scheme())
150-
volumesnapshotv1.AddToScheme(runTimeClientForSuiteRun.Scheme())
151-
operatorsv1alpha1.AddToScheme(runTimeClientForSuiteRun.Scheme())
152-
operatorsv1.AddToScheme(runTimeClientForSuiteRun.Scheme())
153-
154133
dynamicClientForSuiteRun, err = dynamic.NewForConfig(kubeConfig)
155134
gomega.Expect(err).NotTo(gomega.HaveOccurred())
156135

@@ -186,6 +165,9 @@ func TestOADPE2E(t *testing.T) {
186165
}
187166

188167
var _ = ginkgo.BeforeSuite(func() {
168+
// Initialize controller-runtime logger
169+
ctrl.SetLogger(zap.New(zap.UseDevMode(true)))
170+
189171
// TODO create logger (hh:mm:ss message) to be used by all functions
190172
log.Printf("Creating Secrets")
191173
bslCredFileData, err := lib.ReadFile(bslCredFile)
Lines changed: 236 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,236 @@
1+
package e2e_test
2+
3+
import (
4+
"context"
5+
"fmt"
6+
"log"
7+
"time"
8+
9+
"github.com/onsi/ginkgo/v2"
10+
"github.com/onsi/gomega"
11+
12+
"github.com/openshift/oadp-operator/tests/e2e/lib"
13+
libhcp "github.com/openshift/oadp-operator/tests/e2e/lib/hcp"
14+
)
15+
16+
type HCPBackupRestoreCase struct {
17+
BackupRestoreCase
18+
Template string
19+
Provider string
20+
}
21+
22+
func runHCPBackupAndRestore(brCase HCPBackupRestoreCase, updateLastBRcase func(brCase HCPBackupRestoreCase), h *libhcp.HCHandler) {
23+
updateLastBRcase(brCase)
24+
25+
log.Printf("Preparing backup and restore")
26+
backupName, restoreName := prepareBackupAndRestore(brCase.BackupRestoreCase, func() {})
27+
28+
err := h.AddHCPPluginToDPA(dpaCR.Namespace, dpaCR.Name, false)
29+
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to add HCP plugin to DPA: %v", err)
30+
// TODO: move the wait for HC just after the DPA modification to allow reconciliation to go ahead without waiting for the HC to be created
31+
32+
//Wait for HCP plugin to be added
33+
gomega.Eventually(libhcp.IsHCPPluginAdded(h.Client, dpaCR.Namespace, dpaCR.Name), 3*time.Minute, 1*time.Second).Should(gomega.BeTrue())
34+
35+
// Create the HostedCluster for the test
36+
h.HCPNamespace = libhcp.GetHCPNamespace(brCase.BackupRestoreCase.Name, libhcp.ClustersNamespace)
37+
h.HostedCluster, err = h.DeployHCManifest(brCase.Template, brCase.Provider, brCase.BackupRestoreCase.Name)
38+
gomega.Expect(err).ToNot(gomega.HaveOccurred())
39+
40+
if brCase.PreBackupVerify != nil {
41+
err := brCase.PreBackupVerify(runTimeClientForSuiteRun, brCase.Namespace)
42+
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to run HCP pre-backup verification: %v", err)
43+
}
44+
45+
// Backup HCP & HC
46+
log.Printf("Backing up HC")
47+
includedResources := libhcp.HCPIncludedResources
48+
excludedResources := libhcp.HCPExcludedResources
49+
includedNamespaces := append(libhcp.HCPIncludedNamespaces, libhcp.GetHCPNamespace(h.HostedCluster.Name, libhcp.ClustersNamespace))
50+
51+
nsRequiresResticDCWorkaround := runHCPBackup(brCase.BackupRestoreCase, backupName, h, includedNamespaces, includedResources, excludedResources)
52+
53+
// Delete everything in HCP namespace
54+
log.Printf("Deleting HCP & HC")
55+
err = h.RemoveHCP(libhcp.Wait10Min)
56+
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to remove HCP: %v", err)
57+
58+
// Restore HC
59+
log.Printf("Restoring HC")
60+
runHCPRestore(brCase.BackupRestoreCase, backupName, restoreName, nsRequiresResticDCWorkaround)
61+
62+
// Wait for HCP to be restored
63+
log.Printf("Validating HC")
64+
err = libhcp.ValidateHCP(libhcp.ValidateHCPTimeout, libhcp.Wait10Min, []string{}, h.HCPNamespace)(h.Client, libhcp.ClustersNamespace)
65+
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to run HCP post-restore verification: %v", err)
66+
}
67+
68+
var _ = ginkgo.Describe("HCP Backup and Restore tests", ginkgo.Ordered, func() {
69+
var (
70+
lastInstallTime time.Time
71+
lastBRCase HCPBackupRestoreCase
72+
h *libhcp.HCHandler
73+
err error
74+
ctx = context.Background()
75+
)
76+
77+
updateLastBRcase := func(brCase HCPBackupRestoreCase) {
78+
lastBRCase = brCase
79+
}
80+
81+
// Before All
82+
var _ = ginkgo.BeforeAll(func() {
83+
reqOperators := []libhcp.RequiredOperator{
84+
{
85+
Name: libhcp.MCEName,
86+
Namespace: libhcp.MCENamespace,
87+
OperatorGroup: libhcp.MCEOperatorGroup,
88+
},
89+
}
90+
91+
// Install MCE and Hypershift operators
92+
h, err = libhcp.InstallRequiredOperators(ctx, runTimeClientForSuiteRun, reqOperators)
93+
gomega.Expect(err).ToNot(gomega.HaveOccurred())
94+
gomega.Expect(h).ToNot(gomega.BeNil())
95+
gomega.Eventually(lib.IsDeploymentReady(h.Client, libhcp.MCENamespace, libhcp.MCEOperatorName), libhcp.Wait10Min, time.Second*5).Should(gomega.BeTrue())
96+
97+
// Deploy the MCE manifest
98+
err = h.DeployMCEManifest()
99+
gomega.Expect(err).ToNot(gomega.HaveOccurred())
100+
101+
// Deploy the MCE and wait for it to be ready
102+
gomega.Eventually(lib.IsDeploymentReady(h.Client, libhcp.MCENamespace, libhcp.MCEOperatorName), libhcp.Wait10Min, time.Second*5).Should(gomega.BeTrue())
103+
gomega.Expect(err).ToNot(gomega.HaveOccurred())
104+
105+
// Validate the Hypershift operator
106+
gomega.Eventually(lib.IsDeploymentReady(h.Client, libhcp.HONamespace, libhcp.HypershiftOperatorName), libhcp.Wait10Min, time.Second*5).Should(gomega.BeTrue())
107+
gomega.Expect(err).ToNot(gomega.HaveOccurred())
108+
})
109+
110+
// After All
111+
var _ = ginkgo.AfterAll(func() {
112+
err := h.RemoveHCP(libhcp.Wait10Min)
113+
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to remove HCP: %v", err)
114+
})
115+
116+
// After Each
117+
var _ = ginkgo.AfterEach(func(ctx ginkgo.SpecContext) {
118+
h.RemoveHCP(libhcp.Wait10Min)
119+
tearDownBackupAndRestore(lastBRCase.BackupRestoreCase, lastInstallTime, ctx.SpecReport())
120+
})
121+
122+
ginkgo.DescribeTable("Basic HCP backup and restore test",
123+
func(brCase HCPBackupRestoreCase, expectedErr error) {
124+
if ginkgo.CurrentSpecReport().NumAttempts > 1 && !knownFlake {
125+
ginkgo.Fail("No known FLAKE found in a previous run, marking test as failed.")
126+
}
127+
runHCPBackupAndRestore(brCase, updateLastBRcase, h)
128+
},
129+
130+
// Test Cases
131+
ginkgo.Entry("None HostedCluster backup and restore", ginkgo.Label("hcp"), HCPBackupRestoreCase{
132+
Template: libhcp.HCPNoneManifest,
133+
Provider: "None",
134+
BackupRestoreCase: BackupRestoreCase{
135+
Namespace: libhcp.GetHCPNamespace(fmt.Sprintf("%s-none", libhcp.HostedClusterPrefix), libhcp.ClustersNamespace),
136+
Name: fmt.Sprintf("%s-none", libhcp.HostedClusterPrefix),
137+
BackupRestoreType: lib.CSIDataMover,
138+
PreBackupVerify: libhcp.ValidateHCP(libhcp.ValidateHCPTimeout, libhcp.Wait10Min, []string{}, libhcp.GetHCPNamespace(fmt.Sprintf("%s-none", libhcp.HostedClusterPrefix), libhcp.ClustersNamespace)),
139+
PostRestoreVerify: libhcp.ValidateHCP(libhcp.ValidateHCPTimeout, libhcp.Wait10Min, []string{}, libhcp.GetHCPNamespace(fmt.Sprintf("%s-none", libhcp.HostedClusterPrefix), libhcp.ClustersNamespace)),
140+
BackupTimeout: libhcp.HCPBackupTimeout,
141+
},
142+
}, nil),
143+
144+
ginkgo.Entry("Agent HostedCluster backup and restore", ginkgo.Label("hcp"), HCPBackupRestoreCase{
145+
Template: libhcp.HCPAgentManifest,
146+
Provider: "Agent",
147+
BackupRestoreCase: BackupRestoreCase{
148+
Namespace: libhcp.GetHCPNamespace(fmt.Sprintf("%s-agent", libhcp.HostedClusterPrefix), libhcp.ClustersNamespace),
149+
Name: fmt.Sprintf("%s-agent", libhcp.HostedClusterPrefix),
150+
BackupRestoreType: lib.CSIDataMover,
151+
PreBackupVerify: libhcp.ValidateHCP(libhcp.ValidateHCPTimeout, libhcp.Wait10Min, []string{}, libhcp.GetHCPNamespace(fmt.Sprintf("%s-agent", libhcp.HostedClusterPrefix), libhcp.ClustersNamespace)),
152+
PostRestoreVerify: libhcp.ValidateHCP(libhcp.ValidateHCPTimeout, libhcp.Wait10Min, []string{}, libhcp.GetHCPNamespace(fmt.Sprintf("%s-agent", libhcp.HostedClusterPrefix), libhcp.ClustersNamespace)),
153+
BackupTimeout: libhcp.HCPBackupTimeout,
154+
},
155+
}, nil),
156+
)
157+
})
158+
159+
// TODO: Modify the runBackup function to inject the filtered error logs to avoid repeating code with this
160+
func runHCPBackup(brCase BackupRestoreCase, backupName string, h *libhcp.HCHandler, namespaces []string, includedResources, excludedResources []string) bool {
161+
nsRequiresResticDCWorkaround, err := lib.NamespaceRequiresResticDCWorkaround(h.Client, brCase.Namespace)
162+
gomega.Expect(err).ToNot(gomega.HaveOccurred())
163+
164+
// create backup
165+
log.Printf("Creating backup %s for case %s", backupName, brCase.Name)
166+
err = lib.CreateCustomBackupForNamespaces(h.Client, namespace, backupName, namespaces, includedResources, excludedResources, brCase.BackupRestoreType == lib.RESTIC || brCase.BackupRestoreType == lib.KOPIA, brCase.BackupRestoreType == lib.CSIDataMover)
167+
gomega.Expect(err).ToNot(gomega.HaveOccurred())
168+
169+
// wait for backup to not be running
170+
gomega.Eventually(lib.IsBackupDone(h.Client, namespace, backupName), brCase.BackupTimeout, time.Second*10).Should(gomega.BeTrue())
171+
// TODO only log on fail?
172+
describeBackup := lib.DescribeBackup(h.Client, namespace, backupName)
173+
ginkgo.GinkgoWriter.Println(describeBackup)
174+
175+
backupLogs := lib.BackupLogs(kubernetesClientForSuiteRun, h.Client, namespace, backupName)
176+
backupErrorLogs := lib.BackupErrorLogs(kubernetesClientForSuiteRun, h.Client, namespace, backupName)
177+
accumulatedTestLogs = append(accumulatedTestLogs, describeBackup, backupLogs)
178+
179+
// Check error logs for non-relevant errors
180+
filteredBackupErrorLogs := libhcp.FilterErrorLogs(backupErrorLogs)
181+
182+
if !brCase.SkipVerifyLogs {
183+
gomega.Expect(filteredBackupErrorLogs).Should(gomega.Equal([]string{}))
184+
}
185+
186+
// check if backup succeeded
187+
succeeded, err := lib.IsBackupCompletedSuccessfully(kubernetesClientForSuiteRun, h.Client, namespace, backupName)
188+
gomega.Expect(err).ToNot(gomega.HaveOccurred())
189+
gomega.Expect(succeeded).To(gomega.Equal(true))
190+
log.Printf("Backup for case %s succeeded", brCase.Name)
191+
192+
if brCase.BackupRestoreType == lib.CSI {
193+
// wait for volume snapshot to be Ready
194+
gomega.Eventually(lib.AreVolumeSnapshotsReady(h.Client, backupName), time.Minute*4, time.Second*10).Should(gomega.BeTrue())
195+
}
196+
197+
return nsRequiresResticDCWorkaround
198+
}
199+
200+
// TODO: Modify the runRestore function to inject the filtered error logs to avoid repeating code with this
201+
func runHCPRestore(brCase BackupRestoreCase, backupName string, restoreName string, nsRequiresResticDCWorkaround bool) {
202+
log.Printf("Creating restore %s for case %s", restoreName, brCase.Name)
203+
err := lib.CreateRestoreFromBackup(dpaCR.Client, namespace, backupName, restoreName)
204+
gomega.Expect(err).ToNot(gomega.HaveOccurred())
205+
gomega.Eventually(lib.IsRestoreDone(dpaCR.Client, namespace, restoreName), time.Minute*60, time.Second*10).Should(gomega.BeTrue())
206+
// TODO only log on fail?
207+
describeRestore := lib.DescribeRestore(dpaCR.Client, namespace, restoreName)
208+
ginkgo.GinkgoWriter.Println(describeRestore)
209+
210+
restoreLogs := lib.RestoreLogs(kubernetesClientForSuiteRun, dpaCR.Client, namespace, restoreName)
211+
restoreErrorLogs := lib.RestoreErrorLogs(kubernetesClientForSuiteRun, dpaCR.Client, namespace, restoreName)
212+
accumulatedTestLogs = append(accumulatedTestLogs, describeRestore, restoreLogs)
213+
214+
// Check error logs for non-relevant errors
215+
filteredRestoreErrorLogs := libhcp.FilterErrorLogs(restoreErrorLogs)
216+
217+
if !brCase.SkipVerifyLogs {
218+
gomega.Expect(filteredRestoreErrorLogs).Should(gomega.Equal([]string{}))
219+
}
220+
221+
// Check if restore succeeded
222+
succeeded, err := lib.IsRestoreCompletedSuccessfully(kubernetesClientForSuiteRun, dpaCR.Client, namespace, restoreName)
223+
gomega.Expect(err).ToNot(gomega.HaveOccurred())
224+
gomega.Expect(succeeded).To(gomega.Equal(true))
225+
226+
if nsRequiresResticDCWorkaround {
227+
// We run the dc-post-restore.sh script for both restic and
228+
// kopia backups and for any DCs with attached volumes,
229+
// regardless of whether it was restic or kopia backup.
230+
// The script is designed to work with labels set by the
231+
// openshift-velero-plugin and can be run without pre-conditions.
232+
log.Printf("Running dc-post-restore.sh script.")
233+
err = lib.RunDcPostRestoreScript(restoreName)
234+
gomega.Expect(err).ToNot(gomega.HaveOccurred())
235+
}
236+
}

tests/e2e/lib/apps.go

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -285,6 +285,7 @@ func IsDeploymentReady(ocClient client.Client, namespace, dName string) wait.Con
285285
if err != nil {
286286
return false, err
287287
}
288+
log.Printf("Deployment %s status: %v", dName, deployment.Status)
288289
if deployment.Status.AvailableReplicas != deployment.Status.Replicas || deployment.Status.Replicas == 0 {
289290
for _, condition := range deployment.Status.Conditions {
290291
if len(condition.Message) > 0 {
@@ -297,6 +298,30 @@ func IsDeploymentReady(ocClient client.Client, namespace, dName string) wait.Con
297298
}
298299
}
299300

301+
// IsStatefulSetReady checks if a StatefulSet is ready
302+
func IsStatefulSetReady(ocClient client.Client, namespace, name string) wait.ConditionFunc {
303+
return func() (bool, error) {
304+
sts := &appsv1.StatefulSet{}
305+
err := ocClient.Get(context.Background(), client.ObjectKey{
306+
Namespace: namespace,
307+
Name: name,
308+
}, sts)
309+
if err != nil {
310+
return false, err
311+
}
312+
log.Printf("StatefulSet %s status: %v", name, sts.Status)
313+
if sts.Status.ReadyReplicas != sts.Status.Replicas || sts.Status.Replicas == 0 {
314+
for _, condition := range sts.Status.Conditions {
315+
if len(condition.Message) > 0 {
316+
ginkgo.GinkgoWriter.Write([]byte(fmt.Sprintf("statefulset not available with condition: %s\n", condition.Message)))
317+
}
318+
}
319+
return false, errors.New("statefulset is not in a ready state")
320+
}
321+
return true, nil
322+
}
323+
}
324+
300325
func AreApplicationPodsRunning(c *kubernetes.Clientset, namespace string) wait.ConditionFunc {
301326
return func() (bool, error) {
302327
podList, err := GetAllPodsWithLabel(c, namespace, e2eAppLabel)

0 commit comments

Comments
 (0)