Skip to content

Commit 5e1d960

Browse files
author
Yuvaraj Kakaraparthi
committed
Implement BeforeClusterDelete hook
1 parent 001c327 commit 5e1d960

File tree

8 files changed

+366
-3
lines changed

8 files changed

+366
-3
lines changed

exp/addons/internal/controllers/clusterresourcesetbinding_controller.go

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,8 @@ import (
3030

3131
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
3232
addonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1"
33+
"sigs.k8s.io/cluster-api/feature"
34+
"sigs.k8s.io/cluster-api/internal/hooks"
3335
"sigs.k8s.io/cluster-api/util"
3436
"sigs.k8s.io/cluster-api/util/predicates"
3537
)
@@ -91,6 +93,12 @@ func (r *ClusterResourceSetBindingReconciler) Reconcile(ctx context.Context, req
9193
}
9294
// If the owner cluster is in deletion process, delete its ClusterResourceSetBinding
9395
if !cluster.DeletionTimestamp.IsZero() {
96+
if feature.Gates.Enabled(feature.RuntimeSDK) && feature.Gates.Enabled(feature.ClusterTopology) {
97+
if cluster.Spec.Topology != nil && !hooks.IsOkToDelete(cluster) {
98+
// If the Cluster is not yet ready to be deleted then do not delete the ClusterResourceSetBinding.
99+
return ctrl.Result{}, nil
100+
}
101+
}
94102
log.Info("deleting ClusterResourceSetBinding because the owner Cluster is currently being deleted")
95103
return ctrl.Result{}, r.Client.Delete(ctx, binding)
96104
}

exp/runtime/api/v1alpha1/extensionconfig_types.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -210,4 +210,8 @@ const (
210210
// The annotation will be used to track the intent to call a hook as soon as an operation completes;
211211
// the intent will be removed as soon as the hook call completes successfully.
212212
PendingHooksAnnotation string = "runtime.cluster.x-k8s.io/pending-hooks"
213+
214+
// OkToDeleteAnnotation is the annotation used to indicate if a cluster is ready to be fully deleted.
215+
// This annotation is added to the cluster after the BeforeClusterDelete hook has passed.
216+
OkToDeleteAnnotation string = "runtime.cluster.x-k8s.io/ok-to-delete"
213217
)

internal/controllers/cluster/cluster_controller.go

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ import (
4141
"sigs.k8s.io/cluster-api/controllers/external"
4242
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
4343
"sigs.k8s.io/cluster-api/feature"
44+
"sigs.k8s.io/cluster-api/internal/hooks"
4445
"sigs.k8s.io/cluster-api/util"
4546
"sigs.k8s.io/cluster-api/util/annotations"
4647
"sigs.k8s.io/cluster-api/util/collections"
@@ -215,6 +216,14 @@ func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster)
215216
func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster) (reconcile.Result, error) {
216217
log := ctrl.LoggerFrom(ctx)
217218

219+
// If the RuntimeSDK and ClusterTopology flags are enabled, for clusters with managed topologies
220+
// only proceed with delete if the cluster is marked as `ok-to-delete`
221+
if feature.Gates.Enabled(feature.RuntimeSDK) && feature.Gates.Enabled(feature.ClusterTopology) {
222+
if cluster.Spec.Topology != nil && !hooks.IsOkToDelete(cluster) {
223+
return ctrl.Result{}, nil
224+
}
225+
}
226+
218227
descendants, err := r.listDescendants(ctx, cluster)
219228
if err != nil {
220229
log.Error(err, "Failed to list descendants")

internal/controllers/cluster/cluster_controller_test.go

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,15 +21,19 @@ import (
2121

2222
. "github.com/onsi/gomega"
2323
corev1 "k8s.io/api/core/v1"
24+
apierrors "k8s.io/apimachinery/pkg/api/errors"
2425
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
26+
utilfeature "k8s.io/component-base/featuregate/testing"
2527
"k8s.io/utils/pointer"
2628
ctrl "sigs.k8s.io/controller-runtime"
2729
"sigs.k8s.io/controller-runtime/pkg/client"
2830
"sigs.k8s.io/controller-runtime/pkg/client/fake"
2931

3032
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
3133
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
34+
runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1"
3235
"sigs.k8s.io/cluster-api/feature"
36+
"sigs.k8s.io/cluster-api/internal/test/builder"
3337
"sigs.k8s.io/cluster-api/util"
3438
"sigs.k8s.io/cluster-api/util/conditions"
3539
"sigs.k8s.io/cluster-api/util/patch"
@@ -350,6 +354,53 @@ func TestClusterReconciler(t *testing.T) {
350354
})
351355
}
352356

357+
func TestClusterReconciler_reconcileDelete(t *testing.T) {
358+
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true)()
359+
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)()
360+
361+
fakeInfraCluster := builder.InfrastructureCluster("test-ns", "test-cluster").Build()
362+
363+
tests := []struct {
364+
name string
365+
cluster *clusterv1.Cluster
366+
wantDelete bool
367+
}{
368+
{
369+
name: "should proceed with delete if the cluster has the ok-to-delete annotation",
370+
cluster: func() *clusterv1.Cluster {
371+
fakeCluster := builder.Cluster("test-ns", "test-cluster").WithTopology(&clusterv1.Topology{}).WithInfrastructureCluster(fakeInfraCluster).Build()
372+
if fakeCluster.Annotations == nil {
373+
fakeCluster.Annotations = map[string]string{}
374+
}
375+
fakeCluster.Annotations[runtimev1.OkToDeleteAnnotation] = ""
376+
return fakeCluster
377+
}(),
378+
wantDelete: true,
379+
},
380+
{
381+
name: "should not proceed with delete if the cluster does not have the ok-to-delete annotation",
382+
cluster: builder.Cluster("test-ns", "test-cluster").WithTopology(&clusterv1.Topology{}).WithInfrastructureCluster(fakeInfraCluster).Build(),
383+
wantDelete: false,
384+
},
385+
}
386+
387+
for _, tt := range tests {
388+
t.Run(tt.name, func(t *testing.T) {
389+
g := NewWithT(t)
390+
fakeClient := fake.NewClientBuilder().WithObjects(fakeInfraCluster, tt.cluster).Build()
391+
r := &Reconciler{
392+
Client: fakeClient,
393+
APIReader: fakeClient,
394+
}
395+
396+
_, _ = r.reconcileDelete(ctx, tt.cluster)
397+
infraCluster := builder.InfrastructureCluster("", "").Build()
398+
err := fakeClient.Get(ctx, client.ObjectKeyFromObject(fakeInfraCluster), infraCluster)
399+
g.Expect(apierrors.IsNotFound(err)).To(Equal(tt.wantDelete))
400+
})
401+
}
402+
}
403+
353404
func TestClusterReconcilerNodeRef(t *testing.T) {
354405
t.Run("machine to cluster", func(t *testing.T) {
355406
cluster := &clusterv1.Cluster{

internal/controllers/topology/cluster/cluster_controller.go

Lines changed: 27 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@ import (
4242
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches"
4343
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope"
4444
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge"
45+
"sigs.k8s.io/cluster-api/internal/hooks"
4546
runtimeclient "sigs.k8s.io/cluster-api/internal/runtime/client"
4647
"sigs.k8s.io/cluster-api/util"
4748
"sigs.k8s.io/cluster-api/util/annotations"
@@ -162,9 +163,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re
162163
// In case the object is deleted, the managed topology stops to reconcile;
163164
// (the other controllers will take care of deletion).
164165
if !cluster.ObjectMeta.DeletionTimestamp.IsZero() {
165-
// TODO: When external patching is supported, we should handle the deletion
166-
// of those external CRDs we created.
167-
return ctrl.Result{}, nil
166+
return r.reconcileDelete(ctx, cluster)
168167
}
169168

170169
patchHelper, err := patch.NewHelper(cluster, r.Client)
@@ -336,6 +335,31 @@ func (r *Reconciler) machineDeploymentToCluster(o client.Object) []ctrl.Request
336335
}}
337336
}
338337

338+
func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster) (ctrl.Result, error) {
339+
// Call the BeforeClusterDelete hook if the 'ok-to-delete' annotation is not set
340+
// and add the annotation to the cluster after receiving a successful non-blocking response.
341+
if feature.Gates.Enabled(feature.RuntimeSDK) {
342+
if !hooks.IsOkToDelete(cluster) {
343+
hookRequest := &runtimehooksv1.BeforeClusterDeleteRequest{
344+
Cluster: *cluster,
345+
}
346+
hookResponse := &runtimehooksv1.BeforeClusterDeleteResponse{}
347+
if err := r.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.BeforeClusterDelete, cluster, hookRequest, hookResponse); err != nil {
348+
return ctrl.Result{}, err
349+
}
350+
if hookResponse.RetryAfterSeconds != 0 {
351+
return ctrl.Result{RequeueAfter: time.Duration(hookResponse.RetryAfterSeconds) * time.Second}, nil
352+
}
353+
// The BeforeClusterDelete hook returned a non-blocking response. Now the cluster is ready to be deleted.
354+
// Lets mark the cluster as `ok-to-delete`
355+
if err := hooks.MarkAsOkToDelete(ctx, r.Client, cluster); err != nil {
356+
return ctrl.Result{}, errors.Wrapf(err, "failed to mark %s/%s cluster as ok to delete", cluster.Namespace, cluster.Name)
357+
}
358+
}
359+
}
360+
return ctrl.Result{}, nil
361+
}
362+
339363
// serverSideApplyPatchHelperFactory makes use of managed fields provided by server side apply and is used by the controller.
340364
func serverSideApplyPatchHelperFactory(c client.Client) structuredmerge.PatchHelperFactoryFunc {
341365
return func(original, modified client.Object, opts ...structuredmerge.HelperOption) (structuredmerge.PatchHelper, error) {

internal/controllers/topology/cluster/cluster_controller_test.go

Lines changed: 155 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,19 +23,23 @@ import (
2323

2424
. "github.com/onsi/gomega"
2525
corev1 "k8s.io/api/core/v1"
26+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2627
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
2728
"k8s.io/apimachinery/pkg/runtime/schema"
2829
utilfeature "k8s.io/component-base/featuregate/testing"
2930
ctrl "sigs.k8s.io/controller-runtime"
3031
"sigs.k8s.io/controller-runtime/pkg/client"
32+
"sigs.k8s.io/controller-runtime/pkg/client/fake"
3133
"sigs.k8s.io/controller-runtime/pkg/reconcile"
3234

3335
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
36+
runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1"
3437
runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog"
3538
runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1"
3639
"sigs.k8s.io/cluster-api/feature"
3740
"sigs.k8s.io/cluster-api/internal/contract"
3841
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope"
42+
"sigs.k8s.io/cluster-api/internal/hooks"
3943
fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake"
4044
"sigs.k8s.io/cluster-api/internal/test/builder"
4145
"sigs.k8s.io/cluster-api/util/conditions"
@@ -389,6 +393,157 @@ func TestClusterReconciler_reconcileClusterClassRebase(t *testing.T) {
389393
}, timeout).Should(Succeed())
390394
}
391395

396+
func TestClusterReconciler_reconcileDelete(t *testing.T) {
397+
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true)()
398+
399+
catalog := runtimecatalog.New()
400+
_ = runtimehooksv1.AddToCatalog(catalog)
401+
402+
beforeClusterDeleteGVH, err := catalog.GroupVersionHook(runtimehooksv1.BeforeClusterDelete)
403+
if err != nil {
404+
panic(err)
405+
}
406+
407+
blockingResponse := &runtimehooksv1.BeforeClusterDeleteResponse{
408+
CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
409+
RetryAfterSeconds: int32(10),
410+
CommonResponse: runtimehooksv1.CommonResponse{
411+
Status: runtimehooksv1.ResponseStatusSuccess,
412+
},
413+
},
414+
}
415+
nonBlockingResponse := &runtimehooksv1.BeforeClusterDeleteResponse{
416+
CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
417+
RetryAfterSeconds: int32(0),
418+
CommonResponse: runtimehooksv1.CommonResponse{
419+
Status: runtimehooksv1.ResponseStatusSuccess,
420+
},
421+
},
422+
}
423+
failureResponse := &runtimehooksv1.BeforeClusterDeleteResponse{
424+
CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
425+
CommonResponse: runtimehooksv1.CommonResponse{
426+
Status: runtimehooksv1.ResponseStatusFailure,
427+
},
428+
},
429+
}
430+
431+
tests := []struct {
432+
name string
433+
cluster *clusterv1.Cluster
434+
hookResponse *runtimehooksv1.BeforeClusterDeleteResponse
435+
wantHookToBeCalled bool
436+
wantResult ctrl.Result
437+
wantOkToDelete bool
438+
wantErr bool
439+
}{
440+
{
441+
name: "should apply the ok-to-delete annotation if the BeforeClusterDelete hook returns a non-blocking response",
442+
cluster: &clusterv1.Cluster{
443+
ObjectMeta: metav1.ObjectMeta{
444+
Name: "test-cluster",
445+
Namespace: "test-ns",
446+
},
447+
Spec: clusterv1.ClusterSpec{
448+
Topology: &clusterv1.Topology{},
449+
},
450+
},
451+
hookResponse: nonBlockingResponse,
452+
wantResult: ctrl.Result{},
453+
wantHookToBeCalled: true,
454+
wantOkToDelete: true,
455+
wantErr: false,
456+
},
457+
{
458+
name: "should requeue if the BeforeClusterDelete hook returns a blocking response",
459+
cluster: &clusterv1.Cluster{
460+
ObjectMeta: metav1.ObjectMeta{
461+
Name: "test-cluster",
462+
Namespace: "test-ns",
463+
},
464+
Spec: clusterv1.ClusterSpec{
465+
Topology: &clusterv1.Topology{},
466+
},
467+
},
468+
hookResponse: blockingResponse,
469+
wantResult: ctrl.Result{RequeueAfter: time.Duration(10) * time.Second},
470+
wantHookToBeCalled: true,
471+
wantOkToDelete: false,
472+
wantErr: false,
473+
},
474+
{
475+
name: "should fail if the BeforeClusterDelete hook returns a failure response",
476+
cluster: &clusterv1.Cluster{
477+
ObjectMeta: metav1.ObjectMeta{
478+
Name: "test-cluster",
479+
Namespace: "test-ns",
480+
},
481+
Spec: clusterv1.ClusterSpec{
482+
Topology: &clusterv1.Topology{},
483+
},
484+
},
485+
hookResponse: failureResponse,
486+
wantResult: ctrl.Result{},
487+
wantHookToBeCalled: true,
488+
wantOkToDelete: false,
489+
wantErr: true,
490+
},
491+
{
492+
name: "should succeed if the ok-to-delete annotation is already present",
493+
cluster: &clusterv1.Cluster{
494+
ObjectMeta: metav1.ObjectMeta{
495+
Name: "test-cluster",
496+
Namespace: "test-ns",
497+
Annotations: map[string]string{
498+
// If the hook is already marked the hook should not be called during cluster delete.
499+
runtimev1.OkToDeleteAnnotation: "",
500+
},
501+
},
502+
Spec: clusterv1.ClusterSpec{
503+
Topology: &clusterv1.Topology{},
504+
},
505+
},
506+
// Using a blocking response here should not matter as the hook should never be called.
507+
// Using a blocking response to enforce the point.
508+
hookResponse: blockingResponse,
509+
wantResult: ctrl.Result{},
510+
wantHookToBeCalled: false,
511+
wantOkToDelete: true,
512+
wantErr: false,
513+
},
514+
}
515+
516+
for _, tt := range tests {
517+
t.Run(tt.name, func(t *testing.T) {
518+
g := NewWithT(t)
519+
520+
fakeClient := fake.NewClientBuilder().WithObjects(tt.cluster).Build()
521+
fakeRuntimeClient := fakeruntimeclient.NewRuntimeClientBuilder().
522+
WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{
523+
beforeClusterDeleteGVH: tt.hookResponse,
524+
}).
525+
WithCatalog(catalog).
526+
Build()
527+
528+
r := &Reconciler{
529+
Client: fakeClient,
530+
APIReader: fakeClient,
531+
RuntimeClient: fakeRuntimeClient,
532+
}
533+
534+
res, err := r.reconcileDelete(ctx, tt.cluster)
535+
if tt.wantErr {
536+
g.Expect(err).NotTo(BeNil())
537+
} else {
538+
g.Expect(err).To(BeNil())
539+
g.Expect(res).To(Equal(tt.wantResult))
540+
g.Expect(hooks.IsOkToDelete(tt.cluster)).To(Equal(tt.wantOkToDelete))
541+
g.Expect(fakeRuntimeClient.CallAllCount(runtimehooksv1.BeforeClusterDelete) == 1).To(Equal(tt.wantHookToBeCalled))
542+
}
543+
})
544+
}
545+
}
546+
392547
// TestClusterReconciler_deleteClusterClass tests the correct deletion behaviour for a ClusterClass with references in existing Clusters.
393548
// In this case deletion of the ClusterClass should be blocked by the webhook.
394549
func TestClusterReconciler_deleteClusterClass(t *testing.T) {

0 commit comments

Comments
 (0)