@@ -49,6 +49,7 @@ import (
49
49
capierrors "sigs.k8s.io/cluster-api/errors"
50
50
"sigs.k8s.io/cluster-api/util"
51
51
"sigs.k8s.io/cluster-api/util/annotations"
52
+ "sigs.k8s.io/cluster-api/util/conditions"
52
53
"sigs.k8s.io/cluster-api/util/patch"
53
54
"sigs.k8s.io/cluster-api/util/predicates"
54
55
"sigs.k8s.io/cluster-api/util/secret"
@@ -161,6 +162,13 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(req ctrl.Request) (res ctrl.Re
161
162
}
162
163
}
163
164
165
+ // Always update the readyCondition; the summary is represented using the "1 of x completed" notation.
166
+ conditions .SetSummary (kcp , conditions .WithConditionOrder (
167
+ controlplanev1 .MachinesReadyCondition ,
168
+ controlplanev1 .ControlPlaneUpgradingCondition ,
169
+ controlplanev1 .ResizeSucceededCondition ,
170
+ ))
171
+
164
172
// Always attempt to update status.
165
173
if err := r .updateStatus (ctx , kcp , cluster ); err != nil {
166
174
var connFailure * internal.RemoteClusterConnectionError
@@ -222,6 +230,9 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *
222
230
logger .Error (err , "unable to lookup or create cluster certificates" )
223
231
return ctrl.Result {}, err
224
232
}
233
+ if certificates .HasGenerated () {
234
+ conditions .MarkTrue (kcp , controlplanev1 .CertificatesGeneratedCondition )
235
+ }
225
236
226
237
// If ControlPlaneEndpoint is not set, return early
227
238
if cluster .Spec .ControlPlaneEndpoint .IsZero () {
@@ -255,11 +266,23 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *
255
266
}
256
267
257
268
controlPlane := internal .NewControlPlane (cluster , kcp , ownedMachines )
258
- requireUpgrade := controlPlane .MachinesNeedingUpgrade ()
269
+
270
+ // Aggregate the operational state of all the machines; while aggregating we are adding the
271
+ // source ref to the aggregate reason (reason@machine/name) so the problem can be easily tracked down to its source.
272
+ conditions .SetAggregateCondition (controlPlane .KCP , controlplanev1 .MachinesReadyCondition , ownedMachines .ConditionGetters (), conditions .AddSourceRef ())
273
+
259
274
// Upgrade takes precedence over other operations
260
- if len (requireUpgrade ) > 0 {
275
+ requireUpgrade := controlPlane .MachinesNeedingUpgrade ()
276
+ switch {
277
+ case len (requireUpgrade ) > 0 :
261
278
logger .Info ("Upgrading Control Plane" )
279
+ conditions .MarkFalse (controlPlane .KCP , controlplanev1 .ControlPlaneUpgradingCondition , controlplanev1 .UpgradingReason , clusterv1 .ConditionSeverityWarning , "Upgrading to %s version (%d new version, %d old)" , kcp .Spec .Version , kcp .Status .UpdatedReplicas , len (requireUpgrade ))
262
280
return r .upgradeControlPlane (ctx , cluster , kcp , controlPlane )
281
+ default :
282
+ // make sure last upgrade operation is marked as completed (if any)
283
+ if conditions .Has (controlPlane .KCP , controlplanev1 .ControlPlaneUpgradingCondition ) {
284
+ conditions .MarkTrue (controlPlane .KCP , controlplanev1 .ControlPlaneUpgradingCondition )
285
+ }
263
286
}
264
287
265
288
// If we've made it this far, we can assume that all ownedMachines are up to date
@@ -271,16 +294,24 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *
271
294
case numMachines < desiredReplicas && numMachines == 0 :
272
295
// Create new Machine w/ init
273
296
logger .Info ("Initializing control plane" , "Desired" , desiredReplicas , "Existing" , numMachines )
297
+ conditions .MarkFalse (controlPlane .KCP , controlplanev1 .ResizeSucceededCondition , controlplanev1 .ScalingUpReason , clusterv1 .ConditionSeverityWarning , "Scaling up to %d replicas (actual %d)" , desiredReplicas , numMachines )
274
298
return r .initializeControlPlane (ctx , cluster , kcp , controlPlane )
275
299
// We are scaling up
276
300
case numMachines < desiredReplicas && numMachines > 0 :
277
301
// Create a new Machine w/ join
278
302
logger .Info ("Scaling up control plane" , "Desired" , desiredReplicas , "Existing" , numMachines )
303
+ conditions .MarkFalse (controlPlane .KCP , controlplanev1 .ResizeSucceededCondition , controlplanev1 .ScalingUpReason , clusterv1 .ConditionSeverityWarning , "Scaling up to %d replicas (actual %d)" , desiredReplicas , numMachines )
279
304
return r .scaleUpControlPlane (ctx , cluster , kcp , controlPlane )
280
305
// We are scaling down
281
306
case numMachines > desiredReplicas :
282
307
logger .Info ("Scaling down control plane" , "Desired" , desiredReplicas , "Existing" , numMachines )
308
+ conditions .MarkFalse (controlPlane .KCP , controlplanev1 .ResizeSucceededCondition , controlplanev1 .ScalingDownReason , clusterv1 .ConditionSeverityWarning , "Scaling down to %d replicas (actual %d)" , desiredReplicas , numMachines )
283
309
return r .scaleDownControlPlane (ctx , cluster , kcp , controlPlane )
310
+ default :
311
+ // make sure last resize operation is marked as completed (if any)
312
+ if conditions .Has (controlPlane .KCP , controlplanev1 .ResizeSucceededCondition ) {
313
+ conditions .MarkTrue (controlPlane .KCP , controlplanev1 .ResizeSucceededCondition )
314
+ }
284
315
}
285
316
286
317
// Get the workload cluster client.
@@ -328,6 +359,9 @@ func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, clu
328
359
return ctrl.Result {}, nil
329
360
}
330
361
362
+ // TODO: consider if this makes sense, given that it seems going down from 3 to 0 immediately
363
+ conditions .MarkFalse (kcp , "Ready" , "Deleting" , clusterv1 .ConditionSeverityInfo , "Deleting all replicas (actual %d)" , len (ownedMachines ))
364
+
331
365
// Verify that only control plane machines remain
332
366
if len (allMachines ) != len (ownedMachines ) {
333
367
logger .V (2 ).Info ("Waiting for worker nodes to be deleted first" )
0 commit comments