Skip to content

Commit c4dbfce

Browse files
Implements paused condition for kubeadm kcp
1 parent e67a7ac commit c4dbfce

File tree

2 files changed

+139
-24
lines changed

2 files changed

+139
-24
lines changed

controlplane/kubeadm/internal/controllers/controller.go

Lines changed: 38 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -161,11 +161,6 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.
161161
log = log.WithValues("Cluster", klog.KObj(cluster))
162162
ctx = ctrl.LoggerInto(ctx, log)
163163

164-
if annotations.IsPaused(cluster, kcp) {
165-
log.Info("Reconciliation is paused for this object")
166-
return ctrl.Result{}, nil
167-
}
168-
169164
// Initialize the patch helper.
170165
patchHelper, err := patch.NewHelper(kcp, r.Client)
171166
if err != nil {
@@ -181,27 +176,28 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.
181176
// patch and return right away instead of reusing the main defer,
182177
// because the main defer may take too much time to get cluster status
183178
// Patch ObservedGeneration only if the reconciliation completed successfully
179+
180+
// TODO theobarberbany: Is this ordering correct, do we want finalizer to
181+
// take priority over the paused condition?
184182
patchOpts := []patch.Option{patch.WithStatusObservedGeneration{}}
185183
if err := patchHelper.Patch(ctx, kcp, patchOpts...); err != nil {
186184
return ctrl.Result{}, errors.Wrapf(err, "failed to add finalizer")
187185
}
188-
186+
log.Info("Returning early to add finalizer")
189187
return ctrl.Result{}, nil
190188
}
191189

192190
// Initialize the control plane scope; this includes also checking for orphan machines and
193191
// adopt them if necessary.
194192
controlPlane, adoptableMachineFound, err := r.initControlPlaneScope(ctx, cluster, kcp)
195193
if err != nil {
194+
log.Error(err, "Failed to initControlPlaneScope")
196195
return ctrl.Result{}, err
197196
}
198-
if adoptableMachineFound {
199-
// if there are no errors but at least one CP machine has been adopted, then requeue and
200-
// wait for the update event for the ownership to be set.
201-
return ctrl.Result{}, nil
202-
}
197+
log.Info("initControlPlaneScope")
203198

204199
defer func() {
200+
log.Info("start of deferred update status")
205201
// Always attempt to update status.
206202
if err := r.updateStatus(ctx, controlPlane); err != nil {
207203
var connFailure *internal.RemoteClusterConnectionError
@@ -222,6 +218,7 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.
222218
log.Error(err, "Failed to patch KubeadmControlPlane")
223219
reterr = kerrors.NewAggregate([]error{reterr, err})
224220
}
221+
log.Info("patched KubeadmControlPlane")
225222

226223
// Only requeue if there is no error, Requeue or RequeueAfter and the object does not have a deletion timestamp.
227224
if reterr == nil && res.IsZero() && kcp.ObjectMeta.DeletionTimestamp.IsZero() {
@@ -243,6 +240,21 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.
243240
}
244241
}()
245242

243+
if annotations.IsPaused(cluster, kcp) {
244+
log.Info("Reconciliation is paused for this object")
245+
conditions.MarkTrue(kcp, clusterv1.PausedCondition)
246+
return ctrl.Result{}, nil
247+
}
248+
log.Info("Object not paused")
249+
conditions.MarkFalse(kcp, clusterv1.PausedCondition, clusterv1.ResourceNotPausedReason, clusterv1.ConditionSeverityInfo, "Resource is operating as expected")
250+
251+
if adoptableMachineFound {
252+
// if there are no errors but at least one CP machine has been adopted, then requeue and
253+
// wait for the update event for the ownership to be set.
254+
log.Info("Returning early, adoptableMachineFound")
255+
return ctrl.Result{}, nil
256+
}
257+
246258
if !kcp.ObjectMeta.DeletionTimestamp.IsZero() {
247259
// Handle deletion reconciliation loop.
248260
res, err = r.reconcileDelete(ctx, controlPlane)
@@ -325,19 +337,21 @@ func patchKubeadmControlPlane(ctx context.Context, patchHelper *patch.Helper, kc
325337
)
326338

327339
// Patch the object, ignoring conflicts on the conditions owned by this controller.
328-
// Also, if requested, we are adding additional options like e.g. Patch ObservedGeneration when issuing the
329-
// patch at the end of the reconcile loop.
330-
options = append(options, patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{
331-
controlplanev1.MachinesCreatedCondition,
332-
clusterv1.ReadyCondition,
333-
controlplanev1.MachinesSpecUpToDateCondition,
334-
controlplanev1.ResizedCondition,
335-
controlplanev1.MachinesReadyCondition,
336-
controlplanev1.AvailableCondition,
337-
controlplanev1.CertificatesAvailableCondition,
338-
}})
339-
340-
return patchHelper.Patch(ctx, kcp, options...)
340+
return patchHelper.Patch(
341+
ctx,
342+
kcp,
343+
patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{
344+
controlplanev1.MachinesCreatedCondition,
345+
clusterv1.ReadyCondition,
346+
clusterv1.PausedCondition,
347+
controlplanev1.MachinesSpecUpToDateCondition,
348+
controlplanev1.ResizedCondition,
349+
controlplanev1.MachinesReadyCondition,
350+
controlplanev1.AvailableCondition,
351+
controlplanev1.CertificatesAvailableCondition,
352+
}},
353+
patch.WithStatusObservedGeneration{},
354+
)
341355
}
342356

343357
// reconcile handles KubeadmControlPlane reconciliation.

controlplane/kubeadm/internal/controllers/controller_test.go

Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,10 @@ import (
6464
"sigs.k8s.io/cluster-api/util/secret"
6565
)
6666

67+
const (
68+
timeout = time.Second * 30
69+
)
70+
6771
func TestClusterToKubeadmControlPlane(t *testing.T) {
6872
g := NewWithT(t)
6973
fakeClient := newFakeClient()
@@ -391,6 +395,15 @@ func TestReconcilePaused(t *testing.T) {
391395
Client: fakeClient,
392396
SecretCachingClient: fakeClient,
393397
recorder: record.NewFakeRecorder(32),
398+
managementCluster: &fakeManagementCluster{
399+
Management: &internal.Management{Client: env},
400+
Workload: fakeWorkloadCluster{
401+
Workload: &internal.Workload{
402+
Client: env,
403+
},
404+
Status: internal.ClusterStatus{},
405+
},
406+
},
394407
}
395408

396409
_, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
@@ -2280,6 +2293,94 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) {
22802293
})
22812294
}
22822295

2296+
func TestReconcilePausedCondition(t *testing.T) {
2297+
g := NewWithT(t)
2298+
2299+
ns, err := env.CreateNamespace(ctx, "test-reconcile-pause-condition")
2300+
g.Expect(err).ToNot(HaveOccurred())
2301+
2302+
cluster, kcp, _ := createClusterWithControlPlane(ns.Name)
2303+
g.Expect(env.Create(ctx, cluster)).To(Succeed())
2304+
g.Expect(env.Create(ctx, kcp)).To(Succeed())
2305+
defer func(do ...client.Object) {
2306+
g.Expect(env.Cleanup(ctx, do...)).To(Succeed())
2307+
}(cluster, kcp, ns)
2308+
2309+
// Set cluster.status.InfrastructureReady so we actually enter in the reconcile loop
2310+
patch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf("{\"status\":{\"infrastructureReady\":%t}}", true)))
2311+
g.Expect(env.Status().Patch(ctx, cluster, patch)).To(Succeed())
2312+
2313+
genericInfrastructureMachineTemplate := &unstructured.Unstructured{
2314+
Object: map[string]interface{}{
2315+
"kind": "GenericInfrastructureMachineTemplate",
2316+
"apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1",
2317+
"metadata": map[string]interface{}{
2318+
"name": "infra-foo",
2319+
"namespace": cluster.Namespace,
2320+
},
2321+
"spec": map[string]interface{}{
2322+
"template": map[string]interface{}{
2323+
"spec": map[string]interface{}{
2324+
"hello": "world",
2325+
},
2326+
},
2327+
},
2328+
},
2329+
}
2330+
g.Expect(env.Create(ctx, genericInfrastructureMachineTemplate)).To(Succeed())
2331+
2332+
r := &KubeadmControlPlaneReconciler{
2333+
Client: env,
2334+
SecretCachingClient: secretCachingClient,
2335+
recorder: record.NewFakeRecorder(32),
2336+
managementCluster: &fakeManagementCluster{
2337+
Management: &internal.Management{Client: env},
2338+
Workload: fakeWorkloadCluster{
2339+
Workload: &internal.Workload{
2340+
Client: env,
2341+
},
2342+
Status: internal.ClusterStatus{},
2343+
},
2344+
},
2345+
}
2346+
2347+
// We start unpaused
2348+
g.Eventually(func() bool {
2349+
_, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
2350+
g.Expect(err).ToNot(HaveOccurred())
2351+
2352+
key := util.ObjectKey(kcp)
2353+
if err := env.Get(ctx, key, kcp); err != nil {
2354+
return false
2355+
}
2356+
2357+
// Checks the condition is set
2358+
if !conditions.Has(kcp, clusterv1.PausedCondition) {
2359+
return false
2360+
}
2361+
// The condition is set to false
2362+
return conditions.IsFalse(kcp, clusterv1.PausedCondition)
2363+
}, timeout).Should(BeTrue())
2364+
2365+
// Pause the cluster
2366+
cluster.Spec.Paused = true
2367+
g.Expect(env.Update(ctx, cluster)).To(Succeed())
2368+
2369+
g.Eventually(func() bool {
2370+
_, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
2371+
g.Expect(err).ToNot(HaveOccurred())
2372+
2373+
key := util.ObjectKey(kcp)
2374+
if err := env.Get(ctx, key, kcp); err != nil {
2375+
return false
2376+
}
2377+
2378+
// The condition is set to true
2379+
return conditions.IsTrue(kcp, clusterv1.PausedCondition)
2380+
}, timeout).Should(BeTrue())
2381+
2382+
}
2383+
22832384
// test utils.
22842385

22852386
func newFakeClient(initObjs ...client.Object) client.Client {

0 commit comments

Comments
 (0)