From 74f2b4fa491a2a3aedc6b113a16c5b51d895ef24 Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Thu, 18 Jun 2020 11:47:20 +0200 Subject: [PATCH] add conditions to kcp --- .../kubeadm/api/v1alpha3/condition_consts.go | 68 +++++++++++++++++++ .../v1alpha3/kubeadm_control_plane_types.go | 13 ++++ .../api/v1alpha3/zz_generated.deepcopy.go | 8 +++ ...cluster.x-k8s.io_kubeadmcontrolplanes.yaml | 44 ++++++++++++ .../kubeadm/controllers/controller.go | 39 +++++++++-- .../kubeadm/controllers/controller_test.go | 6 +- controlplane/kubeadm/controllers/scale.go | 2 +- .../kubeadm/controllers/scale_test.go | 1 + controlplane/kubeadm/controllers/status.go | 20 ++++++ .../kubeadm/controllers/status_test.go | 2 + .../kubeadm/internal/control_plane.go | 7 +- .../kubeadm/internal/control_plane_test.go | 12 ++-- .../kubeadm/internal/machine_collection.go | 11 +++ .../machinefilters/machine_filters.go | 11 +++ 14 files changed, 229 insertions(+), 15 deletions(-) create mode 100644 controlplane/kubeadm/api/v1alpha3/condition_consts.go diff --git a/controlplane/kubeadm/api/v1alpha3/condition_consts.go b/controlplane/kubeadm/api/v1alpha3/condition_consts.go new file mode 100644 index 000000000000..bf512965e6cc --- /dev/null +++ b/controlplane/kubeadm/api/v1alpha3/condition_consts.go @@ -0,0 +1,68 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + +// Conditions and condition Reasons for the KubeadmControlPlane object + +const ( + // MachinesReady reports an aggregate of current status of the machines controlled by the KubeadmControlPlane. + MachinesReadyCondition clusterv1.ConditionType = "MachinesReady" +) + +const ( + // CertificatesAvailableCondition documents that cluster certificates were generated as part of the + // processing of a a KubeadmControlPlane object. + CertificatesAvailableCondition clusterv1.ConditionType = "CertificatesAvailable" + + // CertificatesGenerationFailedReason (Severity=Warning) documents a KubeadmControlPlane controller detecting + // an error while generating certificates; those kind of errors are usually temporary and the controller + // automatically recover from them. + CertificatesGenerationFailedReason = "CertificatesGenerationFailed" +) + +const ( + // AvailableCondition documents that the first control plane instance has completed the kubeadm init operation + // and so the control plane is available and an API server instance is ready for processing requests. + AvailableCondition clusterv1.ConditionType = "Available" + + // WaitingForKubeadmInitReason (Severity=Info) documents a KubeadmControlPlane object waiting for the first + // control plane instance to complete the kubeadm init operation. + WaitingForKubeadmInitReason = "WaitingForKubeadmInit" +) + +const ( + // MachinesSpecUpToDateCondition documents that the spec of the machines controlled by the KubeadmControlPlane + // is up to date. Whe this condition is false, the KubeadmControlPlane is executing a rolling upgrade. + MachinesSpecUpToDateCondition clusterv1.ConditionType = "MachinesSpecUpToDate" + + // RollingUpdateInProgressReason (Severity=Warning) documents a KubeadmControlPlane object executing a + // rolling upgrade for aligning the machines spec to the desired state. + RollingUpdateInProgressReason = "RollingUpdateInProgress" +) + +const ( + // ResizedCondition documents a KubeadmControlPlane that is resizing the set of controlled machines. + ResizedCondition clusterv1.ConditionType = "Resized" + + // ScalingUpReason (Severity=Info) documents a KubeadmControlPlane that is increasing the number of replicas. + ScalingUpReason = "ScalingUp" + + // ScalingDownReason (Severity=Info) documents a KubeadmControlPlane that is decreasing the number of replicas. + ScalingDownReason = "ScalingDown" +) diff --git a/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_types.go b/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_types.go index f985772226b3..137162bc90c7 100644 --- a/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_types.go +++ b/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_types.go @@ -19,6 +19,7 @@ package v1alpha3 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" cabpkv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha3" "sigs.k8s.io/cluster-api/errors" @@ -114,6 +115,10 @@ type KubeadmControlPlaneStatus struct { // ObservedGeneration is the latest generation observed by the controller. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions defines current service state of the KubeadmControlPlane. + // +optional + Conditions clusterv1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -137,6 +142,14 @@ type KubeadmControlPlane struct { Status KubeadmControlPlaneStatus `json:"status,omitempty"` } +func (in *KubeadmControlPlane) GetConditions() clusterv1.Conditions { + return in.Status.Conditions +} + +func (in *KubeadmControlPlane) SetConditions(conditions clusterv1.Conditions) { + in.Status.Conditions = conditions +} + // +kubebuilder:object:root=true // KubeadmControlPlaneList contains a list of KubeadmControlPlane. diff --git a/controlplane/kubeadm/api/v1alpha3/zz_generated.deepcopy.go b/controlplane/kubeadm/api/v1alpha3/zz_generated.deepcopy.go index 14ed532ce3b0..5b1ffc58e057 100644 --- a/controlplane/kubeadm/api/v1alpha3/zz_generated.deepcopy.go +++ b/controlplane/kubeadm/api/v1alpha3/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ package v1alpha3 import ( "k8s.io/apimachinery/pkg/runtime" + apiv1alpha3 "sigs.k8s.io/cluster-api/api/v1alpha3" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -117,6 +118,13 @@ func (in *KubeadmControlPlaneStatus) DeepCopyInto(out *KubeadmControlPlaneStatus *out = new(string) **out = **in } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(apiv1alpha3.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeadmControlPlaneStatus. diff --git a/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml b/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml index ad1b756008d3..93e4bf748773 100644 --- a/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml +++ b/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml @@ -1045,6 +1045,50 @@ spec: status: description: KubeadmControlPlaneStatus defines the observed state of KubeadmControlPlane. properties: + conditions: + description: Conditions defines current service state of the KubeadmControlPlane. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. This should be when the underlying condition changed. + If that is not known, then using the time when the API field + changed is acceptable. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. This field may be empty. + type: string + reason: + description: The reason for the condition's last transition + in CamelCase. The specific API may choose whether or not this + field is considered a guaranteed API. This field may not be + empty. + type: string + severity: + description: Severity provides an explicit classification of + Reason code, so the users or machines can immediately understand + the current situation and act accordingly. The Severity field + MUST be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. + type: string + required: + - status + - type + type: object + type: array failureMessage: description: ErrorMessage indicates that there is a terminal problem reconciling the state, and will be set to a descriptive error message. diff --git a/controlplane/kubeadm/controllers/controller.go b/controlplane/kubeadm/controllers/controller.go index be060b708f31..44a7f04d3ff1 100644 --- a/controlplane/kubeadm/controllers/controller.go +++ b/controlplane/kubeadm/controllers/controller.go @@ -49,6 +49,7 @@ import ( capierrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" "sigs.k8s.io/cluster-api/util/secret" @@ -172,6 +173,17 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(req ctrl.Request) (res ctrl.Re } } + // Always update the readyCondition. + conditions.SetSummary(kcp, + conditions.WithConditions( + controlplanev1.MachinesSpecUpToDateCondition, + controlplanev1.ResizedCondition, + controlplanev1.MachinesReadyCondition, + controlplanev1.AvailableCondition, + controlplanev1.CertificatesAvailableCondition, + ), + ) + // Always attempt to Patch the KubeadmControlPlane object and status after each reconciliation. if err := patchHelper.Patch(ctx, kcp); err != nil { logger.Error(err, "Failed to patch KubeadmControlPlane") @@ -220,8 +232,10 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster * controllerRef := metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane")) if err := certificates.LookupOrGenerate(ctx, r.Client, util.ObjectKey(cluster), *controllerRef); err != nil { logger.Error(err, "unable to lookup or create cluster certificates") + conditions.MarkFalse(kcp, controlplanev1.CertificatesAvailableCondition, controlplanev1.CertificatesGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) return ctrl.Result{}, err } + conditions.MarkTrue(kcp, controlplanev1.CertificatesAvailableCondition) // If ControlPlaneEndpoint is not set, return early if cluster.Spec.ControlPlaneEndpoint.IsZero() { @@ -255,11 +269,27 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster * } controlPlane := internal.NewControlPlane(cluster, kcp, ownedMachines) - requireUpgrade := controlPlane.MachinesNeedingUpgrade() - // Upgrade takes precedence over other operations - if len(requireUpgrade) > 0 { - logger.Info("Upgrading Control Plane") + + // Aggregate the operational state of all the machines; while aggregating we are adding the + // source ref (reason@machine/name) so the problem can be easily tracked down to its source machine. + conditions.SetAggregate(controlPlane.KCP, controlplanev1.MachinesReadyCondition, ownedMachines.ConditionGetters(), conditions.AddSourceRef()) + + // Control plane machines rollout due to configuration changes (e.g. upgrades) takes precedence over other operations. + needRollout := controlPlane.MachinesNeedingRollout() + switch { + case len(needRollout) > 0: + logger.Info("Rolling out Control Plane machines") + // NOTE: we are using Status.UpdatedReplicas from the previous reconciliation only to provide a meaningful message + // and this does not influence any reconciliation logic. + conditions.MarkFalse(controlPlane.KCP, controlplanev1.MachinesSpecUpToDateCondition, controlplanev1.RollingUpdateInProgressReason, clusterv1.ConditionSeverityWarning, "Rolling %d replicas with outdated spec (%d replicas up to date)", len(needRollout), kcp.Status.UpdatedReplicas) return r.upgradeControlPlane(ctx, cluster, kcp, controlPlane) + default: + // make sure last upgrade operation is marked as completed. + // NOTE: we are checking the condition already exists in order to avoid to set this condition at the first + // reconciliation/before a rolling upgrade actually starts. + if conditions.Has(controlPlane.KCP, controlplanev1.MachinesSpecUpToDateCondition) { + conditions.MarkTrue(controlPlane.KCP, controlplanev1.MachinesSpecUpToDateCondition) + } } // If we've made it this far, we can assume that all ownedMachines are up to date @@ -271,6 +301,7 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster * case numMachines < desiredReplicas && numMachines == 0: // Create new Machine w/ init logger.Info("Initializing control plane", "Desired", desiredReplicas, "Existing", numMachines) + conditions.MarkFalse(controlPlane.KCP, controlplanev1.AvailableCondition, controlplanev1.WaitingForKubeadmInitReason, clusterv1.ConditionSeverityInfo, "") return r.initializeControlPlane(ctx, cluster, kcp, controlPlane) // We are scaling up case numMachines < desiredReplicas && numMachines > 0: diff --git a/controlplane/kubeadm/controllers/controller_test.go b/controlplane/kubeadm/controllers/controller_test.go index 2fe0e00c9560..022bdc768949 100644 --- a/controlplane/kubeadm/controllers/controller_test.go +++ b/controlplane/kubeadm/controllers/controller_test.go @@ -43,12 +43,13 @@ import ( "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/hash" capierrors "sigs.k8s.io/cluster-api/errors" + "sigs.k8s.io/cluster-api/test/helpers" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/cluster-api/util/secret" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" @@ -756,6 +757,7 @@ kubernetesVersion: metav1.16.1`, g.Expect(kcp.Status.Selector).NotTo(BeEmpty()) g.Expect(kcp.Status.Replicas).To(BeEquivalentTo(1)) + g.Expect(conditions.IsFalse(kcp, controlplanev1.AvailableCondition)).To(BeTrue()) s, err := secret.GetFromNamespacedName(context.Background(), fakeClient, client.ObjectKey{Namespace: "test", Name: "foo"}, secret.ClusterCA) g.Expect(err).NotTo(HaveOccurred()) @@ -1180,7 +1182,7 @@ func newFakeClient(g *WithT, initObjs ...runtime.Object) client.Client { g.Expect(controlplanev1.AddToScheme(scheme.Scheme)).To(Succeed()) return &fakeClient{ startTime: time.Now(), - Client: fake.NewFakeClientWithScheme(scheme.Scheme, initObjs...), + Client: helpers.NewFakeClientWithScheme(scheme.Scheme, initObjs...), } } diff --git a/controlplane/kubeadm/controllers/scale.go b/controlplane/kubeadm/controllers/scale.go index c0e8f4362d49..cedc48bc6b6c 100644 --- a/controlplane/kubeadm/controllers/scale.go +++ b/controlplane/kubeadm/controllers/scale.go @@ -146,7 +146,7 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( func selectMachineForScaleDown(controlPlane *internal.ControlPlane) (*clusterv1.Machine, error) { machines := controlPlane.Machines - if needingUpgrade := controlPlane.MachinesNeedingUpgrade(); needingUpgrade.Len() > 0 { + if needingUpgrade := controlPlane.MachinesNeedingRollout(); needingUpgrade.Len() > 0 { machines = needingUpgrade } return controlPlane.MachineInFailureDomainWithMostMachines(machines) diff --git a/controlplane/kubeadm/controllers/scale_test.go b/controlplane/kubeadm/controllers/scale_test.go index 72fe8688d5f6..42905d35663b 100644 --- a/controlplane/kubeadm/controllers/scale_test.go +++ b/controlplane/kubeadm/controllers/scale_test.go @@ -184,6 +184,7 @@ func TestKubeadmControlPlaneReconciler_scaleUpControlPlane(t *testing.T) { endMachines := internal.NewFilterableMachineCollectionFromMachineList(controlPlaneMachines) for _, m := range endMachines { bm, ok := beforeMachines[m.Name] + bm.SetResourceVersion("1") g.Expect(ok).To(BeTrue()) g.Expect(m).To(Equal(bm)) } diff --git a/controlplane/kubeadm/controllers/status.go b/controlplane/kubeadm/controllers/status.go index 6b14342818c0..4bdd6d800a13 100644 --- a/controlplane/kubeadm/controllers/status.go +++ b/controlplane/kubeadm/controllers/status.go @@ -25,6 +25,7 @@ import ( "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/hash" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/machinefilters" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" ) // updateStatus is called after every reconcilitation loop in a defer statement to always make sure we have the @@ -44,12 +45,30 @@ func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, kcp *c kcp.Status.UpdatedReplicas = int32(len(currentMachines)) replicas := int32(len(ownedMachines)) + desiredReplicas := *kcp.Spec.Replicas // set basic data that does not require interacting with the workload cluster kcp.Status.Replicas = replicas kcp.Status.ReadyReplicas = 0 kcp.Status.UnavailableReplicas = replicas + switch { + // We are scaling up + case replicas < desiredReplicas: + conditions.MarkFalse(kcp, controlplanev1.ResizedCondition, controlplanev1.ScalingUpReason, clusterv1.ConditionSeverityWarning, "Scaling up to %d replicas (actual %d)", desiredReplicas, replicas) + // We are scaling down + case replicas > desiredReplicas: + conditions.MarkFalse(kcp, controlplanev1.ResizedCondition, controlplanev1.ScalingDownReason, clusterv1.ConditionSeverityWarning, "Scaling down to %d replicas (actual %d)", desiredReplicas, replicas) + default: + // make sure last resize operation is marked as completed. + // NOTE: we are checking the number of machines ready so we report resize completed only when the machines + // are actually provisioned (vs reporting completed immediately after the last machine object is created). + readyMachines := ownedMachines.Filter(machinefilters.IsReady()) + if int32(len(readyMachines)) == replicas { + conditions.MarkTrue(kcp, controlplanev1.ResizedCondition) + } + } + // Return early if the deletion timestamp is set, we don't want to try to connect to the workload cluster. if !kcp.DeletionTimestamp.IsZero() { return nil @@ -69,6 +88,7 @@ func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, kcp *c // This only gets initialized once and does not change if the kubeadm config map goes away. if status.HasKubeadmConfig { kcp.Status.Initialized = true + conditions.MarkTrue(kcp, controlplanev1.AvailableCondition) } if kcp.Status.ReadyReplicas > 0 { diff --git a/controlplane/kubeadm/controllers/status_test.go b/controlplane/kubeadm/controllers/status_test.go index 81dc542e6746..9bd9cdb65c36 100644 --- a/controlplane/kubeadm/controllers/status_test.go +++ b/controlplane/kubeadm/controllers/status_test.go @@ -32,6 +32,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -197,6 +198,7 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesReady(t *testing.T g.Expect(kcp.Status.FailureMessage).To(BeNil()) g.Expect(kcp.Status.FailureReason).To(BeEquivalentTo("")) g.Expect(kcp.Status.Initialized).To(BeTrue()) + g.Expect(conditions.IsTrue(kcp, controlplanev1.AvailableCondition)).To(BeTrue()) g.Expect(kcp.Status.Ready).To(BeTrue()) } diff --git a/controlplane/kubeadm/internal/control_plane.go b/controlplane/kubeadm/internal/control_plane.go index b87bb3ec478a..405d62f278be 100644 --- a/controlplane/kubeadm/internal/control_plane.go +++ b/controlplane/kubeadm/internal/control_plane.go @@ -93,8 +93,11 @@ func (c *ControlPlane) EtcdImageData() (string, string) { return "", "" } -// MachinesNeedingUpgrade return a list of machines that need to be upgraded. -func (c *ControlPlane) MachinesNeedingUpgrade() FilterableMachineCollection { +// MachinesNeedingRollout return a list of machines that need to be rolled out due to configuration changes. +// +// NOTE: Expiration of the spec.UpgradeAfter value forces inclusion of all the machines in this set even if +// no changes have been made to the KubeadmControlPlane. +func (c *ControlPlane) MachinesNeedingRollout() FilterableMachineCollection { now := metav1.Now() if c.KCP.Spec.UpgradeAfter != nil && c.KCP.Spec.UpgradeAfter.Before(&now) { return c.Machines.AnyFilter( diff --git a/controlplane/kubeadm/internal/control_plane_test.go b/controlplane/kubeadm/internal/control_plane_test.go index 5d79cb9a8e58..17adcbfd62a3 100644 --- a/controlplane/kubeadm/internal/control_plane_test.go +++ b/controlplane/kubeadm/internal/control_plane_test.go @@ -89,7 +89,7 @@ var _ = Describe("Control Plane", func() { Describe("MachinesNeedingUpgrade", func() { Context("With no machines", func() { It("should return no machines", func() { - Expect(controlPlane.MachinesNeedingUpgrade()).To(HaveLen(0)) + Expect(controlPlane.MachinesNeedingRollout()).To(HaveLen(0)) }) }) @@ -108,7 +108,7 @@ var _ = Describe("Control Plane", func() { controlPlane.Machines.Insert(machine("machine-4", withHash(controlPlane.SpecHash()+"outdated"))) }) It("should return some machines", func() { - Expect(controlPlane.MachinesNeedingUpgrade()).To(HaveLen(1)) + Expect(controlPlane.MachinesNeedingRollout()).To(HaveLen(1)) }) }) @@ -130,7 +130,7 @@ var _ = Describe("Control Plane", func() { Context("That has no upgradeAfter value set", func() { It("should return no machines", func() { - Expect(controlPlane.MachinesNeedingUpgrade()).To(HaveLen(0)) + Expect(controlPlane.MachinesNeedingRollout()).To(HaveLen(0)) }) }) @@ -141,7 +141,7 @@ var _ = Describe("Control Plane", func() { controlPlane.KCP.Spec.UpgradeAfter = &metav1.Time{Time: future} }) It("should return no machines", func() { - Expect(controlPlane.MachinesNeedingUpgrade()).To(HaveLen(0)) + Expect(controlPlane.MachinesNeedingRollout()).To(HaveLen(0)) }) }) @@ -151,7 +151,7 @@ var _ = Describe("Control Plane", func() { controlPlane.KCP.Spec.UpgradeAfter = &metav1.Time{Time: time.Date(year-2, 0, 0, 0, 0, 0, 0, time.UTC)} }) It("should return no machines", func() { - Expect(controlPlane.MachinesNeedingUpgrade()).To(HaveLen(0)) + Expect(controlPlane.MachinesNeedingRollout()).To(HaveLen(0)) }) }) @@ -160,7 +160,7 @@ var _ = Describe("Control Plane", func() { controlPlane.KCP.Spec.UpgradeAfter = &metav1.Time{Time: time.Date(year, 1, 0, 0, 0, 0, 0, time.UTC)} }) It("should return all machines older than this date machines", func() { - Expect(controlPlane.MachinesNeedingUpgrade()).To(HaveLen(2)) + Expect(controlPlane.MachinesNeedingRollout()).To(HaveLen(2)) }) }) }) diff --git a/controlplane/kubeadm/internal/machine_collection.go b/controlplane/kubeadm/internal/machine_collection.go index e425060b940e..c1db81c8b192 100644 --- a/controlplane/kubeadm/internal/machine_collection.go +++ b/controlplane/kubeadm/internal/machine_collection.go @@ -33,6 +33,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/machinefilters" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" ) // FilterableMachineCollection is a set of Machines @@ -134,3 +135,13 @@ func (s FilterableMachineCollection) DeepCopy() FilterableMachineCollection { } return result } + +// ConditionGetters returns the slice with machines converted into conditions.Getter. +func (s FilterableMachineCollection) ConditionGetters() []conditions.Getter { + res := make([]conditions.Getter, 0, len(s)) + for _, v := range s { + value := *v + res = append(res, &value) + } + return res +} diff --git a/controlplane/kubeadm/internal/machinefilters/machine_filters.go b/controlplane/kubeadm/internal/machinefilters/machine_filters.go index 5a75db4736ef..69d4e70e6dfd 100644 --- a/controlplane/kubeadm/internal/machinefilters/machine_filters.go +++ b/controlplane/kubeadm/internal/machinefilters/machine_filters.go @@ -20,6 +20,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" + "sigs.k8s.io/cluster-api/util/conditions" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3" @@ -148,6 +149,16 @@ func MatchesConfigurationHash(configHash string) Func { } } +// IsReady returns a filter to find all machines with the ReadyCondition equals to True. +func IsReady() Func { + return func(machine *clusterv1.Machine) bool { + if machine == nil { + return false + } + return conditions.IsTrue(machine, clusterv1.ReadyCondition) + } +} + // OlderThan returns a filter to find all machines // that have a CreationTimestamp earlier than the given time. func OlderThan(t *metav1.Time) Func {