@@ -49,6 +49,7 @@ import (
49
49
capierrors "sigs.k8s.io/cluster-api/errors"
50
50
"sigs.k8s.io/cluster-api/util"
51
51
"sigs.k8s.io/cluster-api/util/annotations"
52
+ "sigs.k8s.io/cluster-api/util/conditions"
52
53
"sigs.k8s.io/cluster-api/util/patch"
53
54
"sigs.k8s.io/cluster-api/util/predicates"
54
55
"sigs.k8s.io/cluster-api/util/secret"
@@ -184,6 +185,17 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(req ctrl.Request) (res ctrl.Re
184
185
}
185
186
}
186
187
188
+ // Always update the readyCondition.
189
+ conditions .SetSummary (kcp ,
190
+ conditions .WithConditions (
191
+ controlplanev1 .MachinesSpecUpToDateCondition ,
192
+ controlplanev1 .ResizedCondition ,
193
+ controlplanev1 .MachinesReadyCondition ,
194
+ controlplanev1 .AvailableCondition ,
195
+ controlplanev1 .CertificatesAvailableCondition ,
196
+ ),
197
+ )
198
+
187
199
// Always attempt to Patch the KubeadmControlPlane object and status after each reconciliation.
188
200
if err := patchHelper .Patch (ctx , kcp ); err != nil {
189
201
logger .Error (err , "Failed to patch KubeadmControlPlane" )
@@ -229,8 +241,10 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *
229
241
controllerRef := metav1 .NewControllerRef (kcp , controlplanev1 .GroupVersion .WithKind ("KubeadmControlPlane" ))
230
242
if err := certificates .LookupOrGenerate (ctx , r .Client , util .ObjectKey (cluster ), * controllerRef ); err != nil {
231
243
logger .Error (err , "unable to lookup or create cluster certificates" )
244
+ conditions .MarkFalse (kcp , controlplanev1 .CertificatesAvailableCondition , controlplanev1 .CertificatesGenerationFailedReason , clusterv1 .ConditionSeverityWarning , err .Error ())
232
245
return ctrl.Result {}, err
233
246
}
247
+ conditions .MarkTrue (kcp , controlplanev1 .CertificatesAvailableCondition )
234
248
235
249
// If ControlPlaneEndpoint is not set, return early
236
250
if cluster .Spec .ControlPlaneEndpoint .IsZero () {
@@ -264,11 +278,27 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *
264
278
}
265
279
266
280
controlPlane := internal .NewControlPlane (cluster , kcp , ownedMachines )
267
- requireUpgrade := controlPlane .MachinesNeedingUpgrade ()
268
- // Upgrade takes precedence over other operations
269
- if len (requireUpgrade ) > 0 {
270
- logger .Info ("Upgrading Control Plane" )
281
+
282
+ // Aggregate the operational state of all the machines; while aggregating we are adding the
283
+ // source ref (reason@machine/name) so the problem can be easily tracked down to its source machine.
284
+ conditions .SetAggregate (controlPlane .KCP , controlplanev1 .MachinesReadyCondition , ownedMachines .ConditionGetters (), conditions .AddSourceRef ())
285
+
286
+ // Control plane machines rollout due to configuration changes (e.g. upgrades) takes precedence over other operations.
287
+ needRollout := controlPlane .MachinesNeedingRollout ()
288
+ switch {
289
+ case len (needRollout ) > 0 :
290
+ logger .Info ("Rolling out Control Plane machines" )
291
+ // NOTE: we are using Status.UpdatedReplicas from the previous reconciliation only to provide a meaningful message
292
+ // and this does not influence any reconciliation logic.
293
+ conditions .MarkFalse (controlPlane .KCP , controlplanev1 .MachinesSpecUpToDateCondition , controlplanev1 .RollingUpdateInProgressReason , clusterv1 .ConditionSeverityWarning , "Rolling %d replicas with outdated spec (%d replicas up to date)" , len (needRollout ), kcp .Status .UpdatedReplicas )
271
294
return r .upgradeControlPlane (ctx , cluster , kcp , controlPlane )
295
+ default :
296
+ // make sure last upgrade operation is marked as completed.
297
+ // NOTE: we are checking the condition already exists in order to avoid to set this condition at the first
298
+ // reconciliation/before a rolling upgrade actually starts.
299
+ if conditions .Has (controlPlane .KCP , controlplanev1 .MachinesSpecUpToDateCondition ) {
300
+ conditions .MarkTrue (controlPlane .KCP , controlplanev1 .MachinesSpecUpToDateCondition )
301
+ }
272
302
}
273
303
274
304
// If we've made it this far, we can assume that all ownedMachines are up to date
@@ -280,6 +310,7 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *
280
310
case numMachines < desiredReplicas && numMachines == 0 :
281
311
// Create new Machine w/ init
282
312
logger .Info ("Initializing control plane" , "Desired" , desiredReplicas , "Existing" , numMachines )
313
+ conditions .MarkFalse (controlPlane .KCP , controlplanev1 .AvailableCondition , controlplanev1 .WaitingForKubeadmInitReason , clusterv1 .ConditionSeverityInfo , "" )
283
314
return r .initializeControlPlane (ctx , cluster , kcp , controlPlane )
284
315
// We are scaling up
285
316
case numMachines < desiredReplicas && numMachines > 0 :
0 commit comments