Skip to content

Commit e6a5c4d

Browse files
Add blocking end-to-end tests for lifecycle hooks
Signed-off-by: killianmuldoon <[email protected]>
1 parent 23cd3fe commit e6a5c4d

File tree

4 files changed

+176
-52
lines changed

4 files changed

+176
-52
lines changed

test/e2e/cluster_upgrade_runtimesdk.go

Lines changed: 119 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -29,13 +29,17 @@ import (
2929
"github.com/pkg/errors"
3030
corev1 "k8s.io/api/core/v1"
3131
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
32+
"k8s.io/apimachinery/pkg/types"
3233
"k8s.io/utils/pointer"
3334
"sigs.k8s.io/controller-runtime/pkg/client"
3435

36+
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
37+
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
3538
runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1"
3639
"sigs.k8s.io/cluster-api/test/framework"
3740
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
3841
"sigs.k8s.io/cluster-api/util"
42+
"sigs.k8s.io/cluster-api/util/conditions"
3943
)
4044

4145
// clusterUpgradeWithRuntimeSDKSpecInput is the input for clusterUpgradeWithRuntimeSDKSpec.
@@ -113,7 +117,7 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
113117
workerMachineCount = *input.WorkerMachineCount
114118
}
115119

116-
// Setup a Namespace where to host objects for this spec and create a watcher for the Namespace events.
120+
// Set up a Namespace where to host objects for this spec and create a watcher for the Namespace events.
117121
namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder)
118122
clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult)
119123
})
@@ -156,6 +160,9 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
156160
ControlPlaneMachineCount: pointer.Int64Ptr(controlPlaneMachineCount),
157161
WorkerMachineCount: pointer.Int64Ptr(workerMachineCount),
158162
},
163+
PreWaitForCluster: func() {
164+
beforeClusterCreateTestHandler(ctx, input.BootstrapClusterProxy.GetClient(), namespace.Name, clusterName, input.E2EConfig.GetIntervals(specName, "wait-cluster"))
165+
},
159166
WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"),
160167
WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"),
161168
WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
@@ -176,6 +183,12 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
176183
WaitForKubeProxyUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
177184
WaitForDNSUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
178185
WaitForEtcdUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
186+
PreWaitForControlPlaneToBeUpgraded: func() {
187+
beforeClusterUpgradeTestHandler(ctx, input.BootstrapClusterProxy.GetClient(), namespace.Name, clusterName, input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"))
188+
},
189+
PreWaitForMachineDeploymentToBeUpgraded: func() {
190+
afterControlPlaneUpgradeTestHandler(ctx, input.BootstrapClusterProxy.GetClient(), namespace.Name, clusterName, input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"))
191+
},
179192
})
180193

181194
// Only attempt to upgrade MachinePools if they were provided in the template.
@@ -201,13 +214,13 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
201214
})
202215

203216
By("Checking all lifecycle hooks have been called")
204-
// Assert that each hook passed to this function is marked as "true" in the response configmap
217+
// Assert that each hook has been called and returned "Success" during the test.
205218
err = checkLifecycleHooks(ctx, input.BootstrapClusterProxy.GetClient(), namespace.Name, clusterName, map[string]string{
206-
"BeforeClusterCreate": "",
207-
"BeforeClusterUpgrade": "",
208-
"AfterControlPlaneInitialized": "",
209-
"AfterControlPlaneUpgrade": "",
210-
"AfterClusterUpgrade": "",
219+
"BeforeClusterCreate": "Success",
220+
"BeforeClusterUpgrade": "Success",
221+
"AfterControlPlaneInitialized": "Success",
222+
"AfterControlPlaneUpgrade": "Success",
223+
"AfterClusterUpgrade": "Success",
211224
})
212225
Expect(err).ToNot(HaveOccurred(), "Lifecycle hook calls were not as expected")
213226

@@ -266,26 +279,117 @@ func responsesConfigMap(name string, namespace *corev1.Namespace) *corev1.Config
266279
Name: fmt.Sprintf("%s-hookresponses", name),
267280
Namespace: namespace.Name,
268281
},
269-
// Every response contain only Status:Success. The test checks whether each handler has been called at least once.
282+
// Set the initial preloadedResponses for each of the tested hooks.
270283
Data: map[string]string{
271-
"BeforeClusterCreate-response": `{"Status": "Success"}`,
272-
"BeforeClusterUpgrade-response": `{"Status": "Success"}`,
273-
"AfterControlPlaneInitialized-response": `{"Status": "Success"}`,
274-
"AfterControlPlaneUpgrade-response": `{"Status": "Success"}`,
275-
"AfterClusterUpgrade-response": `{"Status": "Success"}`,
284+
// Blocking hooks are set to Status:Failure initially. These will be changed during the test.
285+
"BeforeClusterCreate-preloadedResponse": `{"Status": "Failure", "Message": "hook failed"}`,
286+
"BeforeClusterUpgrade-preloadedResponse": `{"Status": "Failure", "Message": "hook failed"}`,
287+
"AfterControlPlaneUpgrade-preloadedResponse": `{"Status": "Failure", "Message": "hook failed"}`,
288+
289+
// Non-blocking hooks are set to Status:Success.
290+
"AfterControlPlaneInitialized-preloadedResponse": `{"Status": "Success"}`,
291+
"AfterClusterUpgrade-preloadedResponse": `{"Status": "Success"}`,
276292
},
277293
}
278294
}
279295

296+
// Check that each hook in hooks has been called at least once by checking if its actualResponseStatus is in the hook response configmap.
297+
// If the provided hooks have both keys and values check that the values match those in the hook response configmap.
280298
func checkLifecycleHooks(ctx context.Context, c client.Client, namespace string, clusterName string, hooks map[string]string) error {
281299
configMap := &corev1.ConfigMap{}
282300
configMapName := clusterName + "-hookresponses"
283301
err := c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: configMapName}, configMap)
284302
Expect(err).ToNot(HaveOccurred(), "Failed to get the hook response configmap")
285-
for hook := range hooks {
286-
if _, ok := configMap.Data[hook+"-called"]; !ok {
303+
for hook, expected := range hooks {
304+
v, ok := configMap.Data[hook+"-actualResponseStatus"]
305+
if !ok {
287306
return errors.Errorf("hook %s call not recorded in configMap %s/%s", hook, namespace, configMapName)
288307
}
308+
if expected != "" && expected != v {
309+
return errors.Errorf("hook %s was expected to be %s in configMap got %s", expected, hook, v)
310+
}
289311
}
290312
return nil
291313
}
314+
315+
// beforeClusterCreateTestHandler provides an unblocked function which returns true if the Cluster has entered ClusterPhaseProvisioned.
316+
func beforeClusterCreateTestHandler(ctx context.Context, c client.Client, namespace, clusterName string, intervals []interface{}) {
317+
runtimeHookTestHandler(ctx, c, namespace, clusterName, "BeforeClusterCreate", func() bool {
318+
// This hook should block the Cluster from entering the "Provisioned" state.
319+
cluster := &clusterv1.Cluster{}
320+
Expect(c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: clusterName}, cluster)).To(Succeed())
321+
return cluster.Status.Phase == string(clusterv1.ClusterPhaseProvisioned)
322+
}, intervals)
323+
}
324+
325+
// beforeClusterUpgradeTestHandler provides an unblocked function which returns true if the Cluster has controlplanev1.RollingUpdateInProgressReason in its
326+
// ReadyCondition.
327+
func beforeClusterUpgradeTestHandler(ctx context.Context, c client.Client, namespace, clusterName, version string, intervals []interface{}) {
328+
runtimeHookTestHandler(ctx, c, namespace, clusterName, "BeforeClusterUpgrade", func() bool {
329+
cluster := &clusterv1.Cluster{}
330+
var unblocked bool
331+
332+
// First ensure the Cluster topology has been updated to the target Kubernetes Version.
333+
Eventually(func() bool {
334+
Expect(c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: clusterName}, cluster)).To(Succeed())
335+
return cluster.Spec.Topology.Version == version
336+
}).Should(BeTrue(), "BeforeClusterUpgrade blocking condition false: Cluster topology has not been updated to the target Kubernetes Version")
337+
338+
// Check if the Cluster is showing the RollingUpdateInProgress condition reason. If it has the update process is unblocked.
339+
if conditions.IsFalse(cluster, clusterv1.ReadyCondition) &&
340+
conditions.GetReason(cluster, clusterv1.ReadyCondition) == controlplanev1.RollingUpdateInProgressReason {
341+
unblocked = true
342+
}
343+
return unblocked
344+
}, intervals)
345+
}
346+
347+
// afterControlPlaneUpgradeTestHandler provides an unblocked function which returns true if any MachineDeployment in the Cluster
348+
// has upgraded to the target Kubernetes version.
349+
func afterControlPlaneUpgradeTestHandler(ctx context.Context, c client.Client, namespace, clusterName, version string, intervals []interface{}) {
350+
runtimeHookTestHandler(ctx, c, namespace, clusterName, "AfterControlPlaneUpgrade", func() bool {
351+
var unblocked bool
352+
mds := &clusterv1.MachineDeploymentList{}
353+
Expect(c.List(ctx, mds, client.MatchingLabels{
354+
clusterv1.ClusterLabelName: clusterName,
355+
clusterv1.ClusterTopologyOwnedLabel: "",
356+
})).To(Succeed())
357+
358+
// If any of the MachineDeployments have the target Kubernetes Version, the hook is unblocked.
359+
for _, md := range mds.Items {
360+
if *md.Spec.Template.Spec.Version == version {
361+
unblocked = true
362+
}
363+
}
364+
return unblocked
365+
}, intervals)
366+
}
367+
368+
func runtimeHookTestHandler(ctx context.Context, c client.Client, namespace, clusterName, hookName string, condition func() bool, intervals []interface{}) {
369+
// First check that the LifecycleHook has been called at least once.
370+
Eventually(func() bool {
371+
err := checkLifecycleHooks(ctx, c, namespace, clusterName, map[string]string{hookName: ""})
372+
return err != nil
373+
}, intervals...).Should(BeTrue(), "%s has not been called", hookName)
374+
375+
// condition should consistently be false as the Runtime hook is returning "Failure".
376+
Consistently(func() bool {
377+
return condition()
378+
}, intervals...).Should(BeFalse(), fmt.Sprintf("%s hook blocking condition succeeded before unblocking", hookName))
379+
380+
// Patch the ConfigMap to set the hook response to "Success".
381+
By(fmt.Sprintf("Setting %s response to Status:Success", hookName))
382+
383+
configMap := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: clusterName + "-hookresponses", Namespace: namespace}}
384+
Expect(c.Get(ctx, util.ObjectKey(configMap), configMap)).To(Succeed())
385+
patch := client.RawPatch(types.MergePatchType,
386+
[]byte(fmt.Sprintf(`{"data":{"%s-preloadedResponse":%s}}`, hookName, "\"{\\\"Status\\\": \\\"Success\\\"}\"")))
387+
err := c.Patch(ctx, configMap, patch)
388+
Expect(err).ToNot(HaveOccurred())
389+
390+
// Expect the Hook to pass, setting the condition to true before the timeout ends.
391+
Eventually(func() bool {
392+
return condition()
393+
}, intervals...).Should(BeTrue(),
394+
fmt.Sprintf("%s hook blocking condition did not succeed after unblocking", hookName))
395+
}

test/extension/handlers/lifecycle/handlers.go

Lines changed: 23 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -42,18 +42,15 @@ func (h *Handler) DoBeforeClusterCreate(ctx context.Context, request *runtimehoo
4242
log := ctrl.LoggerFrom(ctx)
4343
log.Info("BeforeClusterCreate is called")
4444
cluster := request.Cluster
45-
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterCreate); err != nil {
45+
46+
if err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterCreate, response); err != nil {
4647
response.Status = runtimehooksv1.ResponseStatusFailure
4748
response.Message = err.Error()
4849
return
4950
}
50-
log.Info("BeforeClusterCreate has been recorded in configmap", "cm", cluster.Name+"-hookresponses")
51-
52-
err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterCreate, response)
53-
if err != nil {
51+
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterCreate, response); err != nil {
5452
response.Status = runtimehooksv1.ResponseStatusFailure
5553
response.Message = err.Error()
56-
return
5754
}
5855
}
5956

@@ -62,16 +59,16 @@ func (h *Handler) DoBeforeClusterUpgrade(ctx context.Context, request *runtimeho
6259
log := ctrl.LoggerFrom(ctx)
6360
log.Info("BeforeClusterUpgrade is called")
6461
cluster := request.Cluster
65-
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterUpgrade); err != nil {
62+
63+
if err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterUpgrade, response); err != nil {
6664
response.Status = runtimehooksv1.ResponseStatusFailure
6765
response.Message = err.Error()
6866
return
6967
}
70-
err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterUpgrade, response)
71-
if err != nil {
68+
69+
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterUpgrade, response); err != nil {
7270
response.Status = runtimehooksv1.ResponseStatusFailure
7371
response.Message = err.Error()
74-
return
7572
}
7673
}
7774

@@ -80,16 +77,16 @@ func (h *Handler) DoAfterControlPlaneInitialized(ctx context.Context, request *r
8077
log := ctrl.LoggerFrom(ctx)
8178
log.Info("AfterControlPlaneInitialized is called")
8279
cluster := request.Cluster
83-
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneInitialized); err != nil {
80+
81+
if err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneInitialized, response); err != nil {
8482
response.Status = runtimehooksv1.ResponseStatusFailure
8583
response.Message = err.Error()
8684
return
8785
}
88-
err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneInitialized, response)
89-
if err != nil {
86+
87+
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneInitialized, response); err != nil {
9088
response.Status = runtimehooksv1.ResponseStatusFailure
9189
response.Message = err.Error()
92-
return
9390
}
9491
}
9592

@@ -98,16 +95,16 @@ func (h *Handler) DoAfterControlPlaneUpgrade(ctx context.Context, request *runti
9895
log := ctrl.LoggerFrom(ctx)
9996
log.Info("AfterControlPlaneUpgrade is called")
10097
cluster := request.Cluster
101-
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneUpgrade); err != nil {
98+
99+
if err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneUpgrade, response); err != nil {
102100
response.Status = runtimehooksv1.ResponseStatusFailure
103101
response.Message = err.Error()
104102
return
105103
}
106-
err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneUpgrade, response)
107-
if err != nil {
104+
105+
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterControlPlaneUpgrade, response); err != nil {
108106
response.Status = runtimehooksv1.ResponseStatusFailure
109107
response.Message = err.Error()
110-
return
111108
}
112109
}
113110

@@ -116,16 +113,16 @@ func (h *Handler) DoAfterClusterUpgrade(ctx context.Context, request *runtimehoo
116113
log := ctrl.LoggerFrom(ctx)
117114
log.Info("AfterClusterUpgrade is called")
118115
cluster := request.Cluster
119-
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterClusterUpgrade); err != nil {
116+
117+
if err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterClusterUpgrade, response); err != nil {
120118
response.Status = runtimehooksv1.ResponseStatusFailure
121119
response.Message = err.Error()
122120
return
123121
}
124-
err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterClusterUpgrade, response)
125-
if err != nil {
122+
123+
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.AfterClusterUpgrade, response); err != nil {
126124
response.Status = runtimehooksv1.ResponseStatusFailure
127125
response.Message = err.Error()
128-
return
129126
}
130127
}
131128

@@ -136,22 +133,23 @@ func (h *Handler) readResponseFromConfigMap(ctx context.Context, name, namespace
136133
if err := h.Client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: configMapName}, configMap); err != nil {
137134
return errors.Wrapf(err, "failed to read the ConfigMap %s/%s", namespace, configMapName)
138135
}
139-
if err := yaml.Unmarshal([]byte(configMap.Data[hookName+"-response"]), response); err != nil {
136+
if err := yaml.Unmarshal([]byte(configMap.Data[hookName+"-preloadedResponse"]), response); err != nil {
140137
return errors.Wrapf(err, "failed to read %q response information from ConfigMap", hook)
141138
}
142139
return nil
143140
}
144141

145-
func (h *Handler) recordCallInConfigMap(ctx context.Context, name, namespace string, hook runtimecatalog.Hook) error {
142+
func (h *Handler) recordCallInConfigMap(ctx context.Context, name, namespace string, hook runtimecatalog.Hook, response runtimehooksv1.ResponseObject) error {
146143
hookName := runtimecatalog.HookName(hook)
147144
configMap := &corev1.ConfigMap{}
148145
configMapName := name + "-hookresponses"
149146
if err := h.Client.Get(ctx, client.ObjectKey{Namespace: namespace, Name: configMapName}, configMap); err != nil {
150147
return errors.Wrapf(err, "failed to read the ConfigMap %s/%s", namespace, configMapName)
151148
}
152149

150+
// Patch the actualResponseStatus with the returned value
153151
patch := client.RawPatch(types.MergePatchType,
154-
[]byte(fmt.Sprintf(`{"data":{"%s-called":"true"}}`, hookName)))
152+
[]byte(fmt.Sprintf(`{"data":{"%s-actualResponseStatus":"%s"}}`, hookName, response.GetStatus()))) //nolint:gocritic
155153
if err := h.Client.Patch(ctx, configMap, patch); err != nil {
156154
return errors.Wrapf(err, "failed to update the ConfigMap %s/%s", namespace, configMapName)
157155
}

0 commit comments

Comments
 (0)