@@ -49,6 +49,7 @@ import (
49
49
capierrors "sigs.k8s.io/cluster-api/errors"
50
50
"sigs.k8s.io/cluster-api/util"
51
51
"sigs.k8s.io/cluster-api/util/annotations"
52
+ "sigs.k8s.io/cluster-api/util/conditions"
52
53
"sigs.k8s.io/cluster-api/util/patch"
53
54
"sigs.k8s.io/cluster-api/util/predicates"
54
55
"sigs.k8s.io/cluster-api/util/secret"
@@ -161,6 +162,15 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(req ctrl.Request) (res ctrl.Re
161
162
}
162
163
}
163
164
165
+ // Always update the readyCondition.
166
+ conditions .SetSummary (kcp ,
167
+ conditions .WithConditions (
168
+ controlplanev1 .MachinesReadyCondition ,
169
+ controlplanev1 .AvailableCondition ,
170
+ controlplanev1 .CertificatesAvailableCondition ,
171
+ ),
172
+ )
173
+
164
174
// Always attempt to update status.
165
175
if err := r .updateStatus (ctx , kcp , cluster ); err != nil {
166
176
var connFailure * internal.RemoteClusterConnectionError
@@ -220,8 +230,10 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *
220
230
controllerRef := metav1 .NewControllerRef (kcp , controlplanev1 .GroupVersion .WithKind ("KubeadmControlPlane" ))
221
231
if err := certificates .LookupOrGenerate (ctx , r .Client , util .ObjectKey (cluster ), * controllerRef ); err != nil {
222
232
logger .Error (err , "unable to lookup or create cluster certificates" )
233
+ conditions .MarkFalse (kcp , controlplanev1 .CertificatesAvailableCondition , controlplanev1 .CertificatesGenerationFailedReason , clusterv1 .ConditionSeverityWarning , err .Error ())
223
234
return ctrl.Result {}, err
224
235
}
236
+ conditions .MarkTrue (kcp , controlplanev1 .CertificatesAvailableCondition )
225
237
226
238
// If ControlPlaneEndpoint is not set, return early
227
239
if cluster .Spec .ControlPlaneEndpoint .IsZero () {
@@ -255,6 +267,11 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *
255
267
}
256
268
257
269
controlPlane := internal .NewControlPlane (cluster , kcp , ownedMachines )
270
+
271
+ // Aggregate the operational state of all the machines; while aggregating we are adding the
272
+ // source ref to the aggregate reason (reason@machine/name) so the problem can be easily tracked down to its source.
273
+ conditions .SetAggregate (controlPlane .KCP , controlplanev1 .MachinesReadyCondition , ownedMachines .ConditionGetters (), conditions .AddSourceRef ())
274
+
258
275
requireUpgrade := controlPlane .MachinesNeedingUpgrade ()
259
276
// Upgrade takes precedence over other operations
260
277
if len (requireUpgrade ) > 0 {
@@ -271,6 +288,7 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *
271
288
case numMachines < desiredReplicas && numMachines == 0 :
272
289
// Create new Machine w/ init
273
290
logger .Info ("Initializing control plane" , "Desired" , desiredReplicas , "Existing" , numMachines )
291
+ conditions .MarkFalse (controlPlane .KCP , controlplanev1 .AvailableCondition , controlplanev1 .WaitingForKubeadmInitReason , clusterv1 .ConditionSeverityInfo , "" )
274
292
return r .initializeControlPlane (ctx , cluster , kcp , controlPlane )
275
293
// We are scaling up
276
294
case numMachines < desiredReplicas && numMachines > 0 :
0 commit comments