@@ -49,6 +49,7 @@ import (
49
49
capierrors "sigs.k8s.io/cluster-api/errors"
50
50
"sigs.k8s.io/cluster-api/util"
51
51
"sigs.k8s.io/cluster-api/util/annotations"
52
+ "sigs.k8s.io/cluster-api/util/conditions"
52
53
"sigs.k8s.io/cluster-api/util/patch"
53
54
"sigs.k8s.io/cluster-api/util/predicates"
54
55
"sigs.k8s.io/cluster-api/util/secret"
@@ -172,6 +173,17 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(req ctrl.Request) (res ctrl.Re
172
173
}
173
174
}
174
175
176
+ // Always update the readyCondition.
177
+ conditions .SetSummary (kcp ,
178
+ conditions .WithConditions (
179
+ controlplanev1 .MachinesSpecUpToDateCondition ,
180
+ controlplanev1 .ResizedCondition ,
181
+ controlplanev1 .MachinesReadyCondition ,
182
+ controlplanev1 .AvailableCondition ,
183
+ controlplanev1 .CertificatesAvailableCondition ,
184
+ ),
185
+ )
186
+
175
187
// Always attempt to Patch the KubeadmControlPlane object and status after each reconciliation.
176
188
if err := patchHelper .Patch (ctx , kcp ); err != nil {
177
189
logger .Error (err , "Failed to patch KubeadmControlPlane" )
@@ -220,8 +232,10 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *
220
232
controllerRef := metav1 .NewControllerRef (kcp , controlplanev1 .GroupVersion .WithKind ("KubeadmControlPlane" ))
221
233
if err := certificates .LookupOrGenerate (ctx , r .Client , util .ObjectKey (cluster ), * controllerRef ); err != nil {
222
234
logger .Error (err , "unable to lookup or create cluster certificates" )
235
+ conditions .MarkFalse (kcp , controlplanev1 .CertificatesAvailableCondition , controlplanev1 .CertificatesGenerationFailedReason , clusterv1 .ConditionSeverityWarning , err .Error ())
223
236
return ctrl.Result {}, err
224
237
}
238
+ conditions .MarkTrue (kcp , controlplanev1 .CertificatesAvailableCondition )
225
239
226
240
// If ControlPlaneEndpoint is not set, return early
227
241
if cluster .Spec .ControlPlaneEndpoint .IsZero () {
@@ -255,11 +269,27 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *
255
269
}
256
270
257
271
controlPlane := internal .NewControlPlane (cluster , kcp , ownedMachines )
258
- requireUpgrade := controlPlane .MachinesNeedingUpgrade ()
259
- // Upgrade takes precedence over other operations
260
- if len (requireUpgrade ) > 0 {
261
- logger .Info ("Upgrading Control Plane" )
272
+
273
+ // Aggregate the operational state of all the machines; while aggregating we are adding the
274
+ // source ref (reason@machine/name) so the problem can be easily tracked down to its source machine.
275
+ conditions .SetAggregate (controlPlane .KCP , controlplanev1 .MachinesReadyCondition , ownedMachines .ConditionGetters (), conditions .AddSourceRef ())
276
+
277
+ // Control plane machines rollout due to configuration changes (e.g. upgrades) takes precedence over other operations.
278
+ needRollout := controlPlane .MachinesNeedingRollout ()
279
+ switch {
280
+ case len (needRollout ) > 0 :
281
+ logger .Info ("Rolling out Control Plane machines" )
282
+ // NOTE: we are using Status.UpdatedReplicas from the previous reconciliation only to provide a meaningful message
283
+ // and this does not influence any reconciliation logic.
284
+ conditions .MarkFalse (controlPlane .KCP , controlplanev1 .MachinesSpecUpToDateCondition , controlplanev1 .RollingUpdateInProgressReason , clusterv1 .ConditionSeverityWarning , "Rolling %d replicas with outdated spec (%d replicas up to date)" , len (needRollout ), kcp .Status .UpdatedReplicas )
262
285
return r .upgradeControlPlane (ctx , cluster , kcp , controlPlane )
286
+ default :
287
+ // make sure last upgrade operation is marked as completed.
288
+ // NOTE: we are checking the condition already exists in order to avoid to set this condition at the first
289
+ // reconciliation/before a rolling upgrade actually starts.
290
+ if conditions .Has (controlPlane .KCP , controlplanev1 .MachinesSpecUpToDateCondition ) {
291
+ conditions .MarkTrue (controlPlane .KCP , controlplanev1 .MachinesSpecUpToDateCondition )
292
+ }
263
293
}
264
294
265
295
// If we've made it this far, we can assume that all ownedMachines are up to date
@@ -271,6 +301,7 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *
271
301
case numMachines < desiredReplicas && numMachines == 0 :
272
302
// Create new Machine w/ init
273
303
logger .Info ("Initializing control plane" , "Desired" , desiredReplicas , "Existing" , numMachines )
304
+ conditions .MarkFalse (controlPlane .KCP , controlplanev1 .AvailableCondition , controlplanev1 .WaitingForKubeadmInitReason , clusterv1 .ConditionSeverityInfo , "" )
274
305
return r .initializeControlPlane (ctx , cluster , kcp , controlPlane )
275
306
// We are scaling up
276
307
case numMachines < desiredReplicas && numMachines > 0 :
0 commit comments