Skip to content

Commit dada8ea

Browse files
Add blocking end-to-end tests for lifecycle hooks
Signed-off-by: killianmuldoon <[email protected]>
1 parent 7809ffb commit dada8ea

File tree

4 files changed

+158
-42
lines changed

4 files changed

+158
-42
lines changed

test/e2e/cluster_upgrade_runtimesdk.go

Lines changed: 117 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -29,9 +29,12 @@ import (
2929
"github.com/pkg/errors"
3030
corev1 "k8s.io/api/core/v1"
3131
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
32+
"k8s.io/apimachinery/pkg/types"
3233
"k8s.io/utils/pointer"
3334
"sigs.k8s.io/controller-runtime/pkg/client"
3435

36+
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
37+
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
3538
runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1"
3639
"sigs.k8s.io/cluster-api/test/framework"
3740
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
@@ -113,7 +116,7 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
113116
workerMachineCount = *input.WorkerMachineCount
114117
}
115118

116-
// Setup a Namespace where to host objects for this spec and create a watcher for the Namespace events.
119+
// Set up a Namespace where to host objects for this spec and create a watcher for the Namespace events.
117120
namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder)
118121
clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult)
119122
})
@@ -156,6 +159,12 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
156159
ControlPlaneMachineCount: pointer.Int64Ptr(controlPlaneMachineCount),
157160
WorkerMachineCount: pointer.Int64Ptr(workerMachineCount),
158161
},
162+
RuntimeHookTestHandlers: framework.RuntimeHookTestHandlers{
163+
BeforeClusterCreate: func() {
164+
beforeClusterCreateTestHandler(ctx, input.BootstrapClusterProxy.GetClient(), namespace.Name, clusterName, input.E2EConfig.GetIntervals(specName, "wait-cluster"))
165+
},
166+
},
167+
159168
WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"),
160169
WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"),
161170
WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
@@ -176,6 +185,12 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
176185
WaitForKubeProxyUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
177186
WaitForDNSUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
178187
WaitForEtcdUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
188+
RuntimeHookTestHandlers: framework.RuntimeHookTestHandlers{
189+
BeforeClusterUpgrade: func() {
190+
beforeClusterUpgradeTestHandler(ctx, input.BootstrapClusterProxy.GetClient(), namespace.Name, clusterName, input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"))
191+
afterControlPlaneUpgradeTestHandler(ctx, input.BootstrapClusterProxy.GetClient(), namespace.Name, clusterName, input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"))
192+
},
193+
},
179194
})
180195

181196
// Only attempt to upgrade MachinePools if they were provided in the template.
@@ -201,13 +216,13 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
201216
})
202217

203218
By("Checking all lifecycle hooks have been called")
204-
// Assert that each hook passed to this function is marked as "true" in the response configmap
219+
// Assert that each hook has been called and returned "Success" during the test.
205220
err = checkLifecycleHooks(ctx, input.BootstrapClusterProxy.GetClient(), namespace.Name, clusterName, map[string]string{
206-
"BeforeClusterCreate": "",
207-
"BeforeClusterUpgrade": "",
208-
"AfterControlPlaneInitialized": "",
209-
"AfterControlPlaneUpgrade": "",
210-
"AfterClusterUpgrade": "",
221+
"BeforeClusterCreate": "Success",
222+
"BeforeClusterUpgrade": "Success",
223+
"AfterControlPlaneInitialized": "Success",
224+
"AfterControlPlaneUpgrade": "Success",
225+
"AfterClusterUpgrade": "Success",
211226
})
212227
Expect(err).ToNot(HaveOccurred(), "Lifecycle hook calls were not as expected")
213228

@@ -266,13 +281,16 @@ func responsesConfigMap(name string, namespace *corev1.Namespace) *corev1.Config
266281
Name: fmt.Sprintf("%s-hookresponses", name),
267282
Namespace: namespace.Name,
268283
},
269-
// Every response contain only Status:Success. The test checks whether each handler has been called at least once.
284+
// Non-blocking hooks are set to Status:Success initially.
270285
Data: map[string]string{
271-
"BeforeClusterCreate-response": `{"Status": "Success"}`,
272-
"BeforeClusterUpgrade-response": `{"Status": "Success"}`,
273-
"AfterControlPlaneInitialized-response": `{"Status": "Success"}`,
274-
"AfterControlPlaneUpgrade-response": `{"Status": "Success"}`,
275-
"AfterClusterUpgrade-response": `{"Status": "Success"}`,
286+
// Blocking hooks are set to Status:Failure initially. These will be changed during the test.
287+
"BeforeClusterCreate-preloadedResponse": `{"Status": "Failure", "Message": "hook failed"}`,
288+
"BeforeClusterUpgrade-preloadedResponse": `{"Status": "Failure", "Message": "hook failed"}`,
289+
"AfterControlPlaneUpgrade-preloadedResponse": `{"Status": "Failure", "Message": "hook failed"}`,
290+
291+
// Non-blocking hooks are set to Status:Success.
292+
"AfterControlPlaneInitialized-preloadedResponse": `{"Status": "Success"}`,
293+
"AfterClusterUpgrade-preloadedResponse": `{"Status": "Success"}`,
276294
},
277295
}
278296
}
@@ -282,10 +300,94 @@ func checkLifecycleHooks(ctx context.Context, c client.Client, namespace string,
282300
configMapName := clusterName + "-hookresponses"
283301
err := c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: configMapName}, configMap)
284302
Expect(err).ToNot(HaveOccurred(), "Failed to get the hook response configmap")
285-
for hook := range hooks {
286-
if _, ok := configMap.Data[hook+"-called"]; !ok {
303+
for hook, expected := range hooks {
304+
v, ok := configMap.Data[hook+"-actualResponseStatus"]
305+
if !ok {
287306
return errors.Errorf("hook %s call not recorded in configMap %s/%s", hook, namespace, configMapName)
288307
}
308+
if expected != "" && expected != "Success" {
309+
return errors.Errorf("hook %s was expected to be show \"Success\" in configMap got %s", hook, v)
310+
}
289311
}
290312
return nil
291313
}
314+
315+
func beforeClusterCreateTestHandler(ctx context.Context, c client.Client, namespace, clusterName string, intervals []interface{}) {
316+
runtimeHookTestHandler(ctx, c, namespace, clusterName, "BeforeClusterCreate", func() bool {
317+
// This hook should block the Cluster from entering the "Provisioned" state.
318+
cluster := &clusterv1.Cluster{}
319+
Expect(c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: clusterName}, cluster)).To(Succeed())
320+
return cluster.Status.Phase == string(clusterv1.ClusterPhaseProvisioned)
321+
}, intervals)
322+
}
323+
324+
func beforeClusterUpgradeTestHandler(ctx context.Context, c client.Client, namespace, clusterName, version string, intervals []interface{}) {
325+
runtimeHookTestHandler(ctx, c, namespace, clusterName, "BeforeClusterUpgrade", func() bool {
326+
cluster := &clusterv1.Cluster{}
327+
var unblocked bool
328+
329+
// First ensure the Cluster topology has been updated to the target Kubernetes Version.
330+
Eventually(func() bool {
331+
Expect(c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: clusterName}, cluster)).To(Succeed())
332+
return cluster.Spec.Topology.Version == version
333+
}).Should(BeTrue(), "BeforeClusterUpgrade blocking condition false: Cluster topology has not been updated to the target Kubernetes Version")
334+
335+
// Check if the Cluster is showing the RollingUpdateInProgress condition reason. If it has the process is unblocked.
336+
for _, condition := range cluster.GetConditions() {
337+
if condition.Type == clusterv1.ReadyCondition {
338+
if condition.Status == corev1.ConditionFalse && condition.Reason == controlplanev1.RollingUpdateInProgressReason {
339+
unblocked = true
340+
}
341+
}
342+
}
343+
return unblocked
344+
}, intervals)
345+
}
346+
347+
func afterControlPlaneUpgradeTestHandler(ctx context.Context, c client.Client, namespace, clusterName, version string, intervals []interface{}) {
348+
runtimeHookTestHandler(ctx, c, namespace, clusterName, "AfterControlPlaneUpgrade", func() bool {
349+
var unblocked bool
350+
mds := &clusterv1.MachineDeploymentList{}
351+
Expect(c.List(ctx, mds, client.MatchingLabels{
352+
clusterv1.ClusterLabelName: clusterName,
353+
clusterv1.ClusterTopologyOwnedLabel: "",
354+
})).To(Succeed())
355+
356+
// If any of the MachineDeployments have the target Kubernetes Version, the hook is unblocked.
357+
for _, md := range mds.Items {
358+
if *md.Spec.Template.Spec.Version == version {
359+
unblocked = true
360+
}
361+
}
362+
return unblocked
363+
}, intervals)
364+
}
365+
366+
func runtimeHookTestHandler(ctx context.Context, c client.Client, namespace, clusterName, hookName string, blockingCondition func() bool, intervals []interface{}) {
367+
// First check that the LifecycleHook has been called at least once.
368+
Eventually(func() bool {
369+
err := checkLifecycleHooks(ctx, c, namespace, clusterName, map[string]string{hookName: ""})
370+
return err != nil
371+
}, 10*time.Second).Should(BeTrue(), "%s has not been called", hookName)
372+
373+
// Blocking condition should consistently be false as the Runtime hook is returning "Failure".
374+
Consistently(func() bool {
375+
return blockingCondition()
376+
}, 10*time.Second).Should(BeFalse(), fmt.Sprintf("%s hook blocking condition succeeded before unblocking", hookName))
377+
378+
// Patch the ConfigMap to set the hook response to "Success".
379+
By(fmt.Sprintf("Setting %s response to Status:Success", hookName))
380+
381+
configMap := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: clusterName + "-hookresponses", Namespace: namespace}}
382+
Expect(c.Get(ctx, util.ObjectKey(configMap), configMap)).To(Succeed())
383+
patch := client.RawPatch(types.MergePatchType,
384+
[]byte(fmt.Sprintf(`{"data":{"%s-preloadedResponse":%s}}`, hookName, "\"{\\\"Status\\\": \\\"Success\\\"}\"")))
385+
err := c.Patch(ctx, configMap, patch)
386+
Expect(err).ToNot(HaveOccurred())
387+
388+
// Expect the Hook to pass, setting the blocking condition to true before the timeout ends
389+
Eventually(func() bool {
390+
return blockingCondition()
391+
}, intervals...).Should(BeTrue(),
392+
fmt.Sprintf("%s hook blocking condition did not succeed after unblocking", hookName))
393+
}

test/extension/handlers/lifecycle/handlers.go

Lines changed: 14 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -42,18 +42,15 @@ func (h *Handler) DoBeforeClusterCreate(ctx context.Context, request *runtimehoo
4242
log := ctrl.LoggerFrom(ctx)
4343
log.Info("BeforeClusterCreate is called")
4444
cluster := request.Cluster
45-
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterCreate); err != nil {
45+
46+
if err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterCreate, response); err != nil {
4647
response.Status = runtimehooksv1.ResponseStatusFailure
4748
response.Message = err.Error()
4849
return
4950
}
50-
log.Info("BeforeClusterCreate has been recorded in configmap", "cm", cluster.Name+"-hookresponses")
51-
52-
err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterCreate, response)
53-
if err != nil {
51+
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterCreate, response); err != nil {
5452
response.Status = runtimehooksv1.ResponseStatusFailure
5553
response.Message = err.Error()
56-
return
5754
}
5855
}
5956

@@ -62,16 +59,14 @@ func (h *Handler) DoBeforeClusterUpgrade(ctx context.Context, request *runtimeho
6259
log := ctrl.LoggerFrom(ctx)
6360
log.Info("BeforeClusterUpgrade is called")
6461
cluster := request.Cluster
65-
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterUpgrade); err != nil {
62+
if err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterUpgrade, response); err != nil {
6663
response.Status = runtimehooksv1.ResponseStatusFailure
6764
response.Message = err.Error()
6865
return
6966
}
70-
err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterUpgrade, response)
71-
if err != nil {
67+
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterUpgrade, response); err != nil {
7268
response.Status = runtimehooksv1.ResponseStatusFailure
7369
response.Message = err.Error()
74-
return
7570
}
7671
}
7772

@@ -80,16 +75,14 @@ func (h *Handler) DoAfterControlPlaneInitialized(ctx context.Context, request *r
8075
log := ctrl.LoggerFrom(ctx)
8176
log.Info("AfterControlPlaneInitialized is called")
8277
cluster := request.Cluster
83-
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneInitialized); err != nil {
78+
if err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneInitialized, response); err != nil {
8479
response.Status = runtimehooksv1.ResponseStatusFailure
8580
response.Message = err.Error()
8681
return
8782
}
88-
err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneInitialized, response)
89-
if err != nil {
83+
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneInitialized, response); err != nil {
9084
response.Status = runtimehooksv1.ResponseStatusFailure
9185
response.Message = err.Error()
92-
return
9386
}
9487
}
9588

@@ -98,16 +91,14 @@ func (h *Handler) DoAfterControlPlaneUpgrade(ctx context.Context, request *runti
9891
log := ctrl.LoggerFrom(ctx)
9992
log.Info("AfterControlPlaneUpgrade is called")
10093
cluster := request.Cluster
101-
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneUpgrade); err != nil {
94+
if err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneUpgrade, response); err != nil {
10295
response.Status = runtimehooksv1.ResponseStatusFailure
10396
response.Message = err.Error()
10497
return
10598
}
106-
err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneUpgrade, response)
107-
if err != nil {
99+
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneUpgrade, response); err != nil {
108100
response.Status = runtimehooksv1.ResponseStatusFailure
109101
response.Message = err.Error()
110-
return
111102
}
112103
}
113104

@@ -116,16 +107,14 @@ func (h *Handler) DoAfterClusterUpgrade(ctx context.Context, request *runtimehoo
116107
log := ctrl.LoggerFrom(ctx)
117108
log.Info("AfterClusterUpgrade is called")
118109
cluster := request.Cluster
119-
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterClusterUpgrade); err != nil {
110+
if err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterClusterUpgrade, response); err != nil {
120111
response.Status = runtimehooksv1.ResponseStatusFailure
121112
response.Message = err.Error()
122113
return
123114
}
124-
err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterClusterUpgrade, response)
125-
if err != nil {
115+
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterClusterUpgrade, response); err != nil {
126116
response.Status = runtimehooksv1.ResponseStatusFailure
127117
response.Message = err.Error()
128-
return
129118
}
130119
}
131120

@@ -136,22 +125,21 @@ func (h *Handler) readResponseFromConfigMap(ctx context.Context, name, namespace
136125
if err := h.Client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: configMapName}, configMap); err != nil {
137126
return errors.Wrapf(err, "failed to read the ConfigMap %s/%s", namespace, configMapName)
138127
}
139-
if err := yaml.Unmarshal([]byte(configMap.Data[hookName+"-response"]), response); err != nil {
128+
if err := yaml.Unmarshal([]byte(configMap.Data[hookName+"-preloadedResponse"]), response); err != nil {
140129
return errors.Wrapf(err, "failed to read %q response information from ConfigMap", hook)
141130
}
142131
return nil
143132
}
144133

145-
func (h *Handler) recordCallInConfigMap(ctx context.Context, name, namespace string, hook runtimecatalog.Hook) error {
134+
func (h *Handler) recordCallInConfigMap(ctx context.Context, name, namespace string, hook runtimecatalog.Hook, response runtimehooksv1.ResponseObject) error {
146135
hookName := runtimecatalog.HookName(hook)
147136
configMap := &corev1.ConfigMap{}
148137
configMapName := name + "-hookresponses"
149138
if err := h.Client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: configMapName}, configMap); err != nil {
150139
return errors.Wrapf(err, "failed to read the ConfigMap %s/%s", namespace, configMapName)
151140
}
152-
153141
patch := client.RawPatch(types.MergePatchType,
154-
[]byte(fmt.Sprintf(`{"data":{"%s-called":"true"}}`, hookName)))
142+
[]byte(fmt.Sprintf(`{"data":{"%s-actualResponseStatus":"%s"}}`, hookName, response.GetStatus()))) //nolint:gocritic
155143
if err := h.Client.Patch(ctx, configMap, patch); err != nil {
156144
return errors.Wrapf(err, "failed to update the ConfigMap %s/%s", namespace, configMapName)
157145
}

test/framework/cluster_topology_helpers.go

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,7 @@ type UpgradeClusterTopologyAndWaitForUpgradeInput struct {
6969
WaitForKubeProxyUpgrade []interface{}
7070
WaitForDNSUpgrade []interface{}
7171
WaitForEtcdUpgrade []interface{}
72+
RuntimeHookTestHandlers
7273
}
7374

7475
// UpgradeClusterTopologyAndWaitForUpgrade upgrades a Cluster topology and waits for it to be upgraded.
@@ -102,6 +103,12 @@ func UpgradeClusterTopologyAndWaitForUpgrade(ctx context.Context, input UpgradeC
102103
return patchHelper.Patch(ctx, input.Cluster)
103104
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed())
104105

106+
// Once we have patched the Kubernetes Cluster we can run the BeforeClusterUpgradeTest handler to ensure the
107+
// lifecycle hook is executed and blocking correctly.
108+
if input.RuntimeHookTestHandlers.BeforeClusterUpgrade != nil {
109+
input.RuntimeHookTestHandlers.BeforeClusterUpgrade()
110+
}
111+
105112
log.Logf("Waiting for control-plane machines to have the upgraded Kubernetes version")
106113
WaitForControlPlaneMachinesToBeUpgraded(ctx, WaitForControlPlaneMachinesToBeUpgradedInput{
107114
Lister: mgmtClient,
@@ -133,6 +140,12 @@ func UpgradeClusterTopologyAndWaitForUpgrade(ctx context.Context, input UpgradeC
133140
Condition: EtcdImageTagCondition(input.EtcdImageTag, int(*input.ControlPlane.Spec.Replicas)),
134141
}, input.WaitForEtcdUpgrade...)
135142

143+
// Once the ControlPlane is upgraded we can run the AfterControlPlaneUpgrade handler to ensure the
144+
// lifecycle hook is executed and blocking correctly.
145+
if input.RuntimeHookTestHandlers.AfterControlPlaneUpgrade != nil {
146+
input.RuntimeHookTestHandlers.AfterControlPlaneUpgrade()
147+
}
148+
136149
for _, deployment := range input.MachineDeployments {
137150
if *deployment.Spec.Replicas > 0 {
138151
log.Logf("Waiting for Kubernetes versions of machines in MachineDeployment %s/%s to be upgraded to %s",
@@ -147,3 +160,10 @@ func UpgradeClusterTopologyAndWaitForUpgrade(ctx context.Context, input UpgradeC
147160
}
148161
}
149162
}
163+
164+
// RuntimeHookTestHandlers are functions that test the blocking behaviour of RuntimeHooks.
165+
type RuntimeHookTestHandlers struct {
166+
BeforeClusterCreate func()
167+
BeforeClusterUpgrade func()
168+
AfterControlPlaneUpgrade func()
169+
}

test/framework/clusterctl/clusterctl_helpers.go

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -185,9 +185,10 @@ type ApplyClusterTemplateAndWaitInput struct {
185185
WaitForMachinePools []interface{}
186186
Args []string // extra args to be used during `kubectl apply`
187187
ControlPlaneWaiters
188+
framework.RuntimeHookTestHandlers
188189
}
189190

190-
// Waiter is a function that runs and waits for a long running operation to finish and updates the result.
191+
// Waiter is a function that runs and waits for a long-running operation to finish and updates the result.
191192
type Waiter func(ctx context.Context, input ApplyClusterTemplateAndWaitInput, result *ApplyClusterTemplateAndWaitResult)
192193

193194
// ControlPlaneWaiters are Waiter functions for the control plane.
@@ -272,6 +273,11 @@ func ApplyClusterTemplateAndWait(ctx context.Context, input ApplyClusterTemplate
272273
log.Logf("Applying the cluster template yaml to the cluster")
273274
Expect(input.ClusterProxy.Apply(ctx, workloadClusterTemplate, input.Args...)).To(Succeed())
274275

276+
log.Logf("Blocking with BeforeClusterCreate hook")
277+
if input.RuntimeHookTestHandlers.BeforeClusterCreate != nil {
278+
input.RuntimeHookTestHandlers.BeforeClusterCreate()
279+
}
280+
275281
log.Logf("Waiting for the cluster infrastructure to be provisioned")
276282
result.Cluster = framework.DiscoveryAndWaitForCluster(ctx, framework.DiscoveryAndWaitForClusterInput{
277283
Getter: input.ClusterProxy.GetClient(),

0 commit comments

Comments
 (0)