Skip to content

Commit ae2d194

Browse files
author
Yuvaraj Kakaraparthi
committed
fix review comments
1 parent 3ecb5e8 commit ae2d194

12 files changed

+139
-199
lines changed

internal/controllers/cluster/cluster_controller.go

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,6 @@ import (
4040
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
4141
"sigs.k8s.io/cluster-api/controllers/external"
4242
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
43-
runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1"
4443
"sigs.k8s.io/cluster-api/feature"
4544
runtimeclient "sigs.k8s.io/cluster-api/internal/runtime/client"
4645
"sigs.k8s.io/cluster-api/util"
@@ -219,22 +218,6 @@ func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster)
219218
func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster) (reconcile.Result, error) {
220219
log := ctrl.LoggerFrom(ctx)
221220

222-
// Call the BeforeClusterDelete hook to before proceeding further.
223-
if feature.Gates.Enabled(feature.RuntimeSDK) {
224-
hookRequest := &runtimehooksv1.BeforeClusterDeleteRequest{
225-
Cluster: *cluster,
226-
}
227-
hookResponse := &runtimehooksv1.BeforeClusterDeleteResponse{}
228-
if err := r.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.BeforeClusterDelete, hookRequest, hookResponse); err != nil {
229-
return ctrl.Result{}, errors.Wrap(err, "error calling BeforeClusterDelete hook")
230-
}
231-
if hookResponse.RetryAfterSeconds != 0 {
232-
// Cannot proceed with deleting the cluster yet. Lets requeue to retry at a later time.
233-
return ctrl.Result{RequeueAfter: time.Duration(hookResponse.RetryAfterSeconds) * time.Second}, nil
234-
}
235-
// We can proceed with the delete operation.
236-
}
237-
238221
descendants, err := r.listDescendants(ctx, cluster)
239222
if err != nil {
240223
log.Error(err, "Failed to list descendants")

internal/controllers/cluster/cluster_controller_test.go

Lines changed: 0 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -18,24 +18,18 @@ package cluster
1818

1919
import (
2020
"testing"
21-
"time"
2221

2322
. "github.com/onsi/gomega"
2423
corev1 "k8s.io/api/core/v1"
2524
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
26-
utilfeature "k8s.io/component-base/featuregate/testing"
2725
"k8s.io/utils/pointer"
2826
ctrl "sigs.k8s.io/controller-runtime"
2927
"sigs.k8s.io/controller-runtime/pkg/client"
3028
"sigs.k8s.io/controller-runtime/pkg/client/fake"
3129

3230
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
3331
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
34-
runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1"
3532
"sigs.k8s.io/cluster-api/feature"
36-
runtimecatalog "sigs.k8s.io/cluster-api/internal/runtime/catalog"
37-
runtimeclient "sigs.k8s.io/cluster-api/internal/runtime/client"
38-
runtimeclienttest "sigs.k8s.io/cluster-api/internal/runtime/client/test"
3933
"sigs.k8s.io/cluster-api/util"
4034
"sigs.k8s.io/cluster-api/util/conditions"
4135
"sigs.k8s.io/cluster-api/util/patch"
@@ -355,91 +349,6 @@ func TestClusterReconciler(t *testing.T) {
355349
}, timeout).Should(BeTrue())
356350
})
357351
}
358-
359-
func TestReconcileDelete(t *testing.T) {
360-
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true)()
361-
gvh, err := runtimeclienttest.DefaultTestCatalog.GroupVersionHook(runtimehooksv1.BeforeClusterDelete)
362-
if err != nil {
363-
panic(err)
364-
}
365-
366-
blockingResponse := &runtimehooksv1.BeforeClusterDeleteResponse{
367-
CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
368-
RetryAfterSeconds: int32(10),
369-
},
370-
}
371-
runtimeClientWithBlockingResponse := runtimeclienttest.NewFakeRuntimeClientBuilder().
372-
WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{
373-
gvh: blockingResponse,
374-
}).
375-
Build()
376-
377-
nonBlockingResponse := &runtimehooksv1.BeforeClusterDeleteResponse{
378-
CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
379-
RetryAfterSeconds: int32(0),
380-
},
381-
}
382-
runtimeClientWithNonBlockingResponse := runtimeclienttest.NewFakeRuntimeClientBuilder().
383-
WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{
384-
gvh: nonBlockingResponse,
385-
}).
386-
Build()
387-
388-
tests := []struct {
389-
name string
390-
cluster *clusterv1.Cluster
391-
runtimeClient runtimeclient.Client
392-
wantResult ctrl.Result
393-
wantErr bool
394-
}{
395-
{
396-
name: "should requeue if BeforeClusterDelete hook is blocking",
397-
cluster: &clusterv1.Cluster{
398-
ObjectMeta: metav1.ObjectMeta{
399-
Name: "test-name",
400-
Namespace: "test-ns",
401-
},
402-
Spec: clusterv1.ClusterSpec{},
403-
},
404-
runtimeClient: runtimeClientWithBlockingResponse,
405-
wantResult: ctrl.Result{RequeueAfter: time.Duration(10) * time.Second},
406-
wantErr: false,
407-
},
408-
{
409-
name: "should perform delete if BeforeClusterDelete hook is non-blocking",
410-
cluster: &clusterv1.Cluster{
411-
ObjectMeta: metav1.ObjectMeta{
412-
Name: "test-name",
413-
Namespace: "test-ns",
414-
},
415-
Spec: clusterv1.ClusterSpec{},
416-
},
417-
runtimeClient: runtimeClientWithNonBlockingResponse,
418-
wantResult: ctrl.Result{},
419-
wantErr: false,
420-
},
421-
}
422-
423-
for _, tt := range tests {
424-
t.Run(tt.name, func(t *testing.T) {
425-
g := NewWithT(t)
426-
427-
r := &Reconciler{
428-
RuntimeClient: tt.runtimeClient,
429-
Client: env,
430-
APIReader: env,
431-
}
432-
result, err := r.reconcileDelete(ctx, tt.cluster)
433-
if tt.wantErr {
434-
g.Expect(err).NotTo(BeNil())
435-
} else {
436-
g.Expect(err).To(BeNil())
437-
g.Expect(result).To(Equal(tt.wantResult))
438-
}
439-
})
440-
}
441-
}
442-
443352
func TestClusterReconcilerNodeRef(t *testing.T) {
444353
t.Run("machine to cluster", func(t *testing.T) {
445354
cluster := &clusterv1.Cluster{

internal/controllers/topology/cluster/cluster_controller.go

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ import (
4141
"sigs.k8s.io/cluster-api/feature"
4242
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches"
4343
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope"
44+
runtimecatalog "sigs.k8s.io/cluster-api/internal/runtime/catalog"
4445
runtimeclient "sigs.k8s.io/cluster-api/internal/runtime/client"
4546
"sigs.k8s.io/cluster-api/util"
4647
"sigs.k8s.io/cluster-api/util/annotations"
@@ -234,7 +235,7 @@ func (r *Reconciler) reconcile(ctx context.Context, s *scope.Scope) (ctrl.Result
234235
return ctrl.Result{}, errors.Wrap(err, "error reconciling the Cluster topology")
235236
}
236237

237-
requeueAfter := s.HookResponseTracker.EffectiveRequeueAfter()
238+
requeueAfter := s.HookResponseTracker.EffectiveRetryAfter()
238239
if requeueAfter != 0 {
239240
return ctrl.Result{RequeueAfter: requeueAfter}, nil
240241
}
@@ -272,9 +273,9 @@ func (r *Reconciler) callBeforeClusterCreateHook(ctx context.Context, s *scope.S
272273
}
273274
hookResponse := &runtimehooksv1.BeforeClusterCreateResponse{}
274275
if err := r.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.BeforeClusterCreate, hookRequest, hookResponse); err != nil {
275-
return ctrl.Result{}, errors.Wrap(err, "error calling the BeforeClusterCreate hook")
276+
return ctrl.Result{}, errors.Wrapf(err, "error calling the %s hook", runtimecatalog.HookName(runtimehooksv1.BeforeClusterCreate))
276277
}
277-
s.HookResponseTracker.Add("BeforeClusterCreate", hookResponse)
278+
s.HookResponseTracker.Add(runtimehooksv1.BeforeClusterCreate, hookResponse)
278279
if hookResponse.RetryAfterSeconds != 0 {
279280
return ctrl.Result{RequeueAfter: time.Duration(hookResponse.RetryAfterSeconds) * time.Second}, nil
280281
}

internal/controllers/topology/cluster/cluster_controller_test.go

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ import (
3636
"sigs.k8s.io/cluster-api/internal/contract"
3737
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope"
3838
runtimecatalog "sigs.k8s.io/cluster-api/internal/runtime/catalog"
39-
runtimeclienttest "sigs.k8s.io/cluster-api/internal/runtime/client/test"
39+
fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake"
4040
"sigs.k8s.io/cluster-api/internal/test/builder"
4141
"sigs.k8s.io/cluster-api/util/conditions"
4242
"sigs.k8s.io/cluster-api/util/patch"
@@ -443,18 +443,26 @@ func TestClusterReconciler_deleteClusterClass(t *testing.T) {
443443
}
444444

445445
func TestReconciler_callBeforeClusterCreateHook(t *testing.T) {
446-
gvh, err := runtimeclienttest.DefaultTestCatalog.GroupVersionHook(runtimehooksv1.BeforeClusterCreate)
446+
catalog := runtimecatalog.New()
447+
_ = runtimehooksv1.AddToCatalog(catalog)
448+
gvh, err := catalog.GroupVersionHook(runtimehooksv1.BeforeClusterCreate)
447449
if err != nil {
448450
panic(err)
449451
}
450452

451453
blockingResponse := &runtimehooksv1.BeforeClusterCreateResponse{
452454
CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
455+
CommonResponse: runtimehooksv1.CommonResponse{
456+
Status: runtimehooksv1.ResponseStatusSuccess,
457+
},
453458
RetryAfterSeconds: int32(10),
454459
},
455460
}
456461
nonBlockingResponse := &runtimehooksv1.BeforeClusterCreateResponse{
457462
CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
463+
CommonResponse: runtimehooksv1.CommonResponse{
464+
Status: runtimehooksv1.ResponseStatusSuccess,
465+
},
458466
RetryAfterSeconds: int32(0),
459467
},
460468
}
@@ -496,7 +504,8 @@ func TestReconciler_callBeforeClusterCreateHook(t *testing.T) {
496504
t.Run(tt.name, func(t *testing.T) {
497505
g := NewWithT(t)
498506

499-
runtimeClient := runtimeclienttest.NewFakeRuntimeClientBuilder().
507+
runtimeClient := fakeruntimeclient.NewRuntimeClientBuilder().
508+
WithCatalog(catalog).
500509
WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{
501510
gvh: tt.hookResponse,
502511
}).

internal/controllers/topology/cluster/conditions.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -59,14 +59,14 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste
5959

6060
// If any of the lifecycle hooks are blocking any part of the reconciliation then topology
6161
// is not considered as fully reconciled.
62-
if s.HookResponseTracker.EffectiveRequeueAfter() != 0 {
62+
if s.HookResponseTracker.EffectiveRetryAfter() != 0 {
6363
conditions.Set(
6464
cluster,
6565
conditions.FalseCondition(
6666
clusterv1.TopologyReconciledCondition,
6767
clusterv1.TopologyReconciledHookBlockingReason,
6868
clusterv1.ConditionSeverityInfo,
69-
s.HookResponseTracker.MessageSummary(),
69+
s.HookResponseTracker.EffectiveMessage(),
7070
),
7171
)
7272
return nil

internal/controllers/topology/cluster/desired_state.go

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ import (
3636
"sigs.k8s.io/cluster-api/internal/contract"
3737
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope"
3838
tlog "sigs.k8s.io/cluster-api/internal/log"
39+
runtimecatalog "sigs.k8s.io/cluster-api/internal/runtime/catalog"
3940
)
4041

4142
// computeDesiredState computes the desired state of the cluster topology.
@@ -158,7 +159,7 @@ func computeControlPlaneInfrastructureMachineTemplate(_ context.Context, s *scop
158159

159160
// computeControlPlane computes the desired state for the ControlPlane object starting from the
160161
// corresponding template defined in the blueprint.
161-
func (r *Reconciler) computeControlPlane(_ context.Context, s *scope.Scope, infrastructureMachineTemplate *unstructured.Unstructured) (*unstructured.Unstructured, error) {
162+
func (r *Reconciler) computeControlPlane(ctx context.Context, s *scope.Scope, infrastructureMachineTemplate *unstructured.Unstructured) (*unstructured.Unstructured, error) {
162163
template := s.Blueprint.ControlPlane.Template
163164
templateClonedFromRef := s.Blueprint.ClusterClass.Spec.ControlPlane.Ref
164165
cluster := s.Current.Cluster
@@ -222,7 +223,7 @@ func (r *Reconciler) computeControlPlane(_ context.Context, s *scope.Scope, infr
222223
}
223224

224225
// Sets the desired Kubernetes version for the control plane.
225-
version, err := r.computeControlPlaneVersion(s)
226+
version, err := r.computeControlPlaneVersion(ctx, s)
226227
if err != nil {
227228
return nil, errors.Wrap(err, "failed to compute version of control plane")
228229
}
@@ -236,7 +237,7 @@ func (r *Reconciler) computeControlPlane(_ context.Context, s *scope.Scope, infr
236237
// computeControlPlaneVersion calculates the version of the desired control plane.
237238
// The version is calculated using the state of the current machine deployments, the current control plane
238239
// and the version defined in the topology.
239-
func (r *Reconciler) computeControlPlaneVersion(s *scope.Scope) (string, error) {
240+
func (r *Reconciler) computeControlPlaneVersion(ctx context.Context, s *scope.Scope) (string, error) {
240241
desiredVersion := s.Blueprint.Topology.Version
241242
// If we are creating the control plane object (current control plane is nil), use version from topology.
242243
if s.Current.ControlPlane == nil || s.Current.ControlPlane.Object == nil {
@@ -325,12 +326,10 @@ func (r *Reconciler) computeControlPlaneVersion(s *scope.Scope) (string, error)
325326
ToKubernetesVersion: desiredVersion,
326327
}
327328
hookResponse := &runtimehooksv1.BeforeClusterUpgradeResponse{}
328-
// TODO: consider adding ctx to the functions above the chain?
329-
// Will also be useful to pickup a logger from the context to add some log messages.
330-
if err := r.RuntimeClient.CallAllExtensions(context.Background(), runtimehooksv1.BeforeClusterUpgrade, hookRequest, hookResponse); err != nil {
331-
return *currentVersion, errors.Wrap(err, "failed to call BeforeClusterUpgrade hook")
329+
if err := r.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.BeforeClusterUpgrade, hookRequest, hookResponse); err != nil {
330+
return "", errors.Wrapf(err, "failed to call %s hook", runtimecatalog.HookName(runtimehooksv1.BeforeClusterUpgrade))
332331
}
333-
s.HookResponseTracker.Add("BeforeClusterUpgrade", hookResponse)
332+
s.HookResponseTracker.Add(runtimehooksv1.BeforeClusterUpgrade, hookResponse)
334333
if hookResponse.RetryAfterSeconds != 0 {
335334
// Cannot pickup the new version right now. Need to try again later.
336335
return *currentVersion, nil

0 commit comments

Comments
 (0)