diff --git a/api/v1beta1/cluster_types.go b/api/v1beta1/cluster_types.go index ad0c43f6e0a0..83a6d011b0e5 100644 --- a/api/v1beta1/cluster_types.go +++ b/api/v1beta1/cluster_types.go @@ -493,6 +493,9 @@ type ClusterSpec struct { // availabilityGates specifies additional conditions to include when evaluating Cluster Available condition. // + // If this field is not defined and the Cluster implements a managed topology, availabilityGates + // from the corresponding ClusterClass will be used, if any. + // // NOTE: this field is considered only for computing v1beta2 conditions. // +optional // +listType=map @@ -594,6 +597,22 @@ type ControlPlaneTopology struct { // +optional NodeDeletionTimeout *metav1.Duration `json:"nodeDeletionTimeout,omitempty"` + // readinessGates specifies additional conditions to include when evaluating Machine Ready condition. + // + // This field can be used e.g. to instruct the machine controller to include in the computation for Machine's ready + // computation a condition, managed by an external controllers, reporting the status of special software/hardware installed on the Machine. + // + // If this field is not defined, readinessGates from the corresponding ControlPlaneClass will be used, if any. + // + // NOTE: This field is considered only for computing v1beta2 conditions. + // NOTE: Specific control plane provider implementations might automatically extend the list of readinessGates; + // e.g. the kubeadm control provider adds ReadinessGates for the APIServerPodHealthy, SchedulerPodHealthy conditions, etc. + // +optional + // +listType=map + // +listMapKey=conditionType + // +kubebuilder:validation:MaxItems=32 + ReadinessGates []MachineReadinessGate `json:"readinessGates,omitempty"` + // variables can be used to customize the ControlPlane through patches. // +optional Variables *ControlPlaneVariables `json:"variables,omitempty"` @@ -674,6 +693,20 @@ type MachineDeploymentTopology struct { // +optional MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` + // readinessGates specifies additional conditions to include when evaluating Machine Ready condition. + // + // This field can be used e.g. to instruct the machine controller to include in the computation for Machine's ready + // computation a condition, managed by an external controllers, reporting the status of special software/hardware installed on the Machine. + // + // If this field is not defined, readinessGates from the corresponding MachineDeploymentClass will be used, if any. + // + // NOTE: This field is considered only for computing v1beta2 conditions. + // +optional + // +listType=map + // +listMapKey=conditionType + // +kubebuilder:validation:MaxItems=32 + ReadinessGates []MachineReadinessGate `json:"readinessGates,omitempty"` + // strategy is the deployment strategy to use to replace existing machines with // new ones. // +optional diff --git a/api/v1beta1/clusterclass_types.go b/api/v1beta1/clusterclass_types.go index e14552c1667e..b637fcc26126 100644 --- a/api/v1beta1/clusterclass_types.go +++ b/api/v1beta1/clusterclass_types.go @@ -81,6 +81,17 @@ type ClusterClass struct { // ClusterClassSpec describes the desired state of the ClusterClass. type ClusterClassSpec struct { + // availabilityGates specifies additional conditions to include when evaluating Cluster Available condition. + // + // NOTE: this field is considered only for computing v1beta2 conditions. + // NOTE: If a Cluster is using this ClusterClass, and this Cluster defines a custom list of availabilityGates, + // such list overrides availabilityGates defined in this field. + // +optional + // +listType=map + // +listMapKey=conditionType + // +kubebuilder:validation:MaxItems=32 + AvailabilityGates []ClusterAvailabilityGate `json:"availabilityGates,omitempty"` + // infrastructure is a reference to a provider-specific template that holds // the details for provisioning infrastructure specific cluster // for the underlying provider. @@ -165,6 +176,22 @@ type ControlPlaneClass struct { // NOTE: This value can be overridden while defining a Cluster.Topology. // +optional NodeDeletionTimeout *metav1.Duration `json:"nodeDeletionTimeout,omitempty"` + + // readinessGates specifies additional conditions to include when evaluating Machine Ready condition. + // + // This field can be used e.g. to instruct the machine controller to include in the computation for Machine's ready + // computation a condition, managed by an external controllers, reporting the status of special software/hardware installed on the Machine. + // + // NOTE: This field is considered only for computing v1beta2 conditions. + // NOTE: If a Cluster defines a custom list of readinessGates for the control plane, + // such list overrides readinessGates defined in this field. + // NOTE: Specific control plane provider implementations might automatically extend the list of readinessGates; + // e.g. the kubeadm control provider adds ReadinessGates for the APIServerPodHealthy, SchedulerPodHealthy conditions, etc. + // +optional + // +listType=map + // +listMapKey=conditionType + // +kubebuilder:validation:MaxItems=32 + ReadinessGates []MachineReadinessGate `json:"readinessGates,omitempty"` } // ControlPlaneClassNamingStrategy defines the naming strategy for control plane objects. @@ -250,6 +277,20 @@ type MachineDeploymentClass struct { // NOTE: This value can be overridden while defining a Cluster.Topology using this MachineDeploymentClass. MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` + // readinessGates specifies additional conditions to include when evaluating Machine Ready condition. + // + // This field can be used e.g. to instruct the machine controller to include in the computation for Machine's ready + // computation a condition, managed by an external controllers, reporting the status of special software/hardware installed on the Machine. + // + // NOTE: This field is considered only for computing v1beta2 conditions. + // NOTE: If a Cluster defines a custom list of readinessGates for a MachineDeployment using this MachineDeploymentClass, + // such list overrides readinessGates defined in this field. + // +optional + // +listType=map + // +listMapKey=conditionType + // +kubebuilder:validation:MaxItems=32 + ReadinessGates []MachineReadinessGate `json:"readinessGates,omitempty"` + // strategy is the deployment strategy to use to replace existing machines with // new ones. // NOTE: This value can be overridden while defining a Cluster.Topology using this MachineDeploymentClass. diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index 2bbda4d4933f..7b6656d83d1e 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -205,6 +205,11 @@ func (in *ClusterClassPatch) DeepCopy() *ClusterClassPatch { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterClassSpec) DeepCopyInto(out *ClusterClassSpec) { *out = *in + if in.AvailabilityGates != nil { + in, out := &in.AvailabilityGates, &out.AvailabilityGates + *out = make([]ClusterAvailabilityGate, len(*in)) + copy(*out, *in) + } in.Infrastructure.DeepCopyInto(&out.Infrastructure) in.ControlPlane.DeepCopyInto(&out.ControlPlane) in.Workers.DeepCopyInto(&out.Workers) @@ -682,6 +687,11 @@ func (in *ControlPlaneClass) DeepCopyInto(out *ControlPlaneClass) { *out = new(metav1.Duration) **out = **in } + if in.ReadinessGates != nil { + in, out := &in.ReadinessGates, &out.ReadinessGates + *out = make([]MachineReadinessGate, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneClass. @@ -743,6 +753,11 @@ func (in *ControlPlaneTopology) DeepCopyInto(out *ControlPlaneTopology) { *out = new(metav1.Duration) **out = **in } + if in.ReadinessGates != nil { + in, out := &in.ReadinessGates, &out.ReadinessGates + *out = make([]MachineReadinessGate, len(*in)) + copy(*out, *in) + } if in.Variables != nil { in, out := &in.Variables, &out.Variables *out = new(ControlPlaneVariables) @@ -1212,6 +1227,11 @@ func (in *MachineDeploymentClass) DeepCopyInto(out *MachineDeploymentClass) { *out = new(int32) **out = **in } + if in.ReadinessGates != nil { + in, out := &in.ReadinessGates, &out.ReadinessGates + *out = make([]MachineReadinessGate, len(*in)) + copy(*out, *in) + } if in.Strategy != nil { in, out := &in.Strategy, &out.Strategy *out = new(MachineDeploymentStrategy) @@ -1441,6 +1461,11 @@ func (in *MachineDeploymentTopology) DeepCopyInto(out *MachineDeploymentTopology *out = new(int32) **out = **in } + if in.ReadinessGates != nil { + in, out := &in.ReadinessGates, &out.ReadinessGates + *out = make([]MachineReadinessGate, len(*in)) + copy(*out, *in) + } if in.Strategy != nil { in, out := &in.Strategy, &out.Strategy *out = new(MachineDeploymentStrategy) diff --git a/api/v1beta1/zz_generated.openapi.go b/api/v1beta1/zz_generated.openapi.go index ee601f90f5ba..eceda3181e20 100644 --- a/api/v1beta1/zz_generated.openapi.go +++ b/api/v1beta1/zz_generated.openapi.go @@ -412,6 +412,28 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_ClusterClassSpec(ref common.Refere Description: "ClusterClassSpec describes the desired state of the ClusterClass.", Type: []string{"object"}, Properties: map[string]spec.Schema{ + "availabilityGates": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "conditionType", + }, + "x-kubernetes-list-type": "map", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "availabilityGates specifies additional conditions to include when evaluating Cluster Available condition.\n\nNOTE: this field is considered only for computing v1beta2 conditions. NOTE: If a Cluster is using this ClusterClass, and this Cluster defines a custom list of availabilityGates, such list overrides availabilityGates defined in this field.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.ClusterAvailabilityGate"), + }, + }, + }, + }, + }, "infrastructure": { SchemaProps: spec.SchemaProps{ Description: "infrastructure is a reference to a provider-specific template that holds the details for provisioning infrastructure specific cluster for the underlying provider. The underlying provider is responsible for the implementation of the template to an infrastructure cluster.", @@ -465,7 +487,7 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_ClusterClassSpec(ref common.Refere }, }, Dependencies: []string{ - "sigs.k8s.io/cluster-api/api/v1beta1.ClusterClassPatch", "sigs.k8s.io/cluster-api/api/v1beta1.ClusterClassVariable", "sigs.k8s.io/cluster-api/api/v1beta1.ControlPlaneClass", "sigs.k8s.io/cluster-api/api/v1beta1.LocalObjectTemplate", "sigs.k8s.io/cluster-api/api/v1beta1.WorkersClass"}, + "sigs.k8s.io/cluster-api/api/v1beta1.ClusterAvailabilityGate", "sigs.k8s.io/cluster-api/api/v1beta1.ClusterClassPatch", "sigs.k8s.io/cluster-api/api/v1beta1.ClusterClassVariable", "sigs.k8s.io/cluster-api/api/v1beta1.ControlPlaneClass", "sigs.k8s.io/cluster-api/api/v1beta1.LocalObjectTemplate", "sigs.k8s.io/cluster-api/api/v1beta1.WorkersClass"}, } } @@ -938,7 +960,7 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_ClusterSpec(ref common.ReferenceCa }, }, SchemaProps: spec.SchemaProps{ - Description: "availabilityGates specifies additional conditions to include when evaluating Cluster Available condition.\n\nNOTE: this field is considered only for computing v1beta2 conditions.", + Description: "availabilityGates specifies additional conditions to include when evaluating Cluster Available condition.\n\nIf this field is not defined and the Cluster implements a managed topology, availabilityGates from the corresponding ClusterClass will be used, if any.\n\nNOTE: this field is considered only for computing v1beta2 conditions.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -1253,12 +1275,34 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_ControlPlaneClass(ref common.Refer Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, + "readinessGates": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "conditionType", + }, + "x-kubernetes-list-type": "map", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "readinessGates specifies additional conditions to include when evaluating Machine Ready condition.\n\nThis field can be used e.g. to instruct the machine controller to include in the computation for Machine's ready computation a condition, managed by an external controllers, reporting the status of special software/hardware installed on the Machine.\n\nNOTE: This field is considered only for computing v1beta2 conditions. NOTE: If a Cluster defines a custom list of readinessGates for the control plane, such list overrides readinessGates defined in this field. NOTE: Specific control plane provider implementations might automatically extend the list of readinessGates; e.g. the kubeadm control provider adds ReadinessGates for the APIServerPodHealthy, SchedulerPodHealthy conditions, etc.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.MachineReadinessGate"), + }, + }, + }, + }, + }, }, Required: []string{"ref"}, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/v1beta1.ControlPlaneClassNamingStrategy", "sigs.k8s.io/cluster-api/api/v1beta1.LocalObjectTemplate", "sigs.k8s.io/cluster-api/api/v1beta1.MachineHealthCheckClass", "sigs.k8s.io/cluster-api/api/v1beta1.ObjectMeta"}, + "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/v1beta1.ControlPlaneClassNamingStrategy", "sigs.k8s.io/cluster-api/api/v1beta1.LocalObjectTemplate", "sigs.k8s.io/cluster-api/api/v1beta1.MachineHealthCheckClass", "sigs.k8s.io/cluster-api/api/v1beta1.MachineReadinessGate", "sigs.k8s.io/cluster-api/api/v1beta1.ObjectMeta"}, } } @@ -1327,6 +1371,28 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_ControlPlaneTopology(ref common.Re Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, + "readinessGates": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "conditionType", + }, + "x-kubernetes-list-type": "map", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "readinessGates specifies additional conditions to include when evaluating Machine Ready condition.\n\nThis field can be used e.g. to instruct the machine controller to include in the computation for Machine's ready computation a condition, managed by an external controllers, reporting the status of special software/hardware installed on the Machine.\n\nIf this field is not defined, readinessGates from the corresponding ControlPlaneClass will be used, if any.\n\nNOTE: This field is considered only for computing v1beta2 conditions. NOTE: Specific control plane provider implementations might automatically extend the list of readinessGates; e.g. the kubeadm control provider adds ReadinessGates for the APIServerPodHealthy, SchedulerPodHealthy conditions, etc.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.MachineReadinessGate"), + }, + }, + }, + }, + }, "variables": { SchemaProps: spec.SchemaProps{ Description: "variables can be used to customize the ControlPlane through patches.", @@ -1337,7 +1403,7 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_ControlPlaneTopology(ref common.Re }, }, Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/v1beta1.ControlPlaneVariables", "sigs.k8s.io/cluster-api/api/v1beta1.MachineHealthCheckTopology", "sigs.k8s.io/cluster-api/api/v1beta1.ObjectMeta"}, + "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/v1beta1.ControlPlaneVariables", "sigs.k8s.io/cluster-api/api/v1beta1.MachineHealthCheckTopology", "sigs.k8s.io/cluster-api/api/v1beta1.MachineReadinessGate", "sigs.k8s.io/cluster-api/api/v1beta1.ObjectMeta"}, } } @@ -2051,6 +2117,28 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentClass(ref common. Format: "int32", }, }, + "readinessGates": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "conditionType", + }, + "x-kubernetes-list-type": "map", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "readinessGates specifies additional conditions to include when evaluating Machine Ready condition.\n\nThis field can be used e.g. to instruct the machine controller to include in the computation for Machine's ready computation a condition, managed by an external controllers, reporting the status of special software/hardware installed on the Machine.\n\nNOTE: This field is considered only for computing v1beta2 conditions. NOTE: If a Cluster defines a custom list of readinessGates for a MachineDeployment using this MachineDeploymentClass, such list overrides readinessGates defined in this field.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.MachineReadinessGate"), + }, + }, + }, + }, + }, "strategy": { SchemaProps: spec.SchemaProps{ Description: "strategy is the deployment strategy to use to replace existing machines with new ones. NOTE: This value can be overridden while defining a Cluster.Topology using this MachineDeploymentClass.", @@ -2062,7 +2150,7 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentClass(ref common. }, }, Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentClassNamingStrategy", "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentClassTemplate", "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentStrategy", "sigs.k8s.io/cluster-api/api/v1beta1.MachineHealthCheckClass"}, + "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentClassNamingStrategy", "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentClassTemplate", "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentStrategy", "sigs.k8s.io/cluster-api/api/v1beta1.MachineHealthCheckClass", "sigs.k8s.io/cluster-api/api/v1beta1.MachineReadinessGate"}, } } @@ -2468,6 +2556,28 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentTopology(ref comm Format: "int32", }, }, + "readinessGates": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "conditionType", + }, + "x-kubernetes-list-type": "map", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "readinessGates specifies additional conditions to include when evaluating Machine Ready condition.\n\nThis field can be used e.g. to instruct the machine controller to include in the computation for Machine's ready computation a condition, managed by an external controllers, reporting the status of special software/hardware installed on the Machine.\n\nIf this field is not defined, readinessGates from the corresponding MachineDeploymentClass will be used, if any.\n\nNOTE: This field is considered only for computing v1beta2 conditions.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.MachineReadinessGate"), + }, + }, + }, + }, + }, "strategy": { SchemaProps: spec.SchemaProps{ Description: "strategy is the deployment strategy to use to replace existing machines with new ones.", @@ -2485,7 +2595,7 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentTopology(ref comm }, }, Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentStrategy", "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentVariables", "sigs.k8s.io/cluster-api/api/v1beta1.MachineHealthCheckTopology", "sigs.k8s.io/cluster-api/api/v1beta1.ObjectMeta"}, + "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentStrategy", "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentVariables", "sigs.k8s.io/cluster-api/api/v1beta1.MachineHealthCheckTopology", "sigs.k8s.io/cluster-api/api/v1beta1.MachineReadinessGate", "sigs.k8s.io/cluster-api/api/v1beta1.ObjectMeta"}, } } diff --git a/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml b/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml index d67b02bcba89..d090ec249c14 100644 --- a/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml +++ b/config/crd/bases/cluster.x-k8s.io_clusterclasses.yaml @@ -446,6 +446,34 @@ spec: spec: description: ClusterClassSpec describes the desired state of the ClusterClass. properties: + availabilityGates: + description: |- + availabilityGates specifies additional conditions to include when evaluating Cluster Available condition. + + NOTE: this field is considered only for computing v1beta2 conditions. + NOTE: If a Cluster is using this ClusterClass, and this Cluster defines a custom list of availabilityGates, + such list overrides availabilityGates defined in this field. + items: + description: ClusterAvailabilityGate contains the type of a Cluster + condition to be used as availability gate. + properties: + conditionType: + description: |- + conditionType refers to a positive polarity condition (status true means good) with matching type in the Cluster's condition list. + If the conditions doesn't exist, it will be treated as unknown. + Note: Both Cluster API conditions or conditions added by 3rd party controllers can be used as availability gates. + maxLength: 316 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - conditionType + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - conditionType + x-kubernetes-list-type: map controlPlane: description: |- controlPlane is a reference to a local struct that holds the details @@ -694,6 +722,39 @@ spec: to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. NOTE: This value can be overridden while defining a Cluster.Topology. type: string + readinessGates: + description: |- + readinessGates specifies additional conditions to include when evaluating Machine Ready condition. + + This field can be used e.g. to instruct the machine controller to include in the computation for Machine's ready + computation a condition, managed by an external controllers, reporting the status of special software/hardware installed on the Machine. + + NOTE: This field is considered only for computing v1beta2 conditions. + NOTE: If a Cluster defines a custom list of readinessGates for the control plane, + such list overrides readinessGates defined in this field. + NOTE: Specific control plane provider implementations might automatically extend the list of readinessGates; + e.g. the kubeadm control provider adds ReadinessGates for the APIServerPodHealthy, SchedulerPodHealthy conditions, etc. + items: + description: MachineReadinessGate contains the type of a Machine + condition to be used as a readiness gate. + properties: + conditionType: + description: |- + conditionType refers to a positive polarity condition (status true means good) with matching type in the Machine's condition list. + If the conditions doesn't exist, it will be treated as unknown. + Note: Both Cluster API conditions or conditions added by 3rd party controllers can be used as readiness gates. + maxLength: 316 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - conditionType + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - conditionType + x-kubernetes-list-type: map ref: description: |- ref is a required reference to a custom resource @@ -1550,6 +1611,37 @@ spec: to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. NOTE: This value can be overridden while defining a Cluster.Topology using this MachineDeploymentClass. type: string + readinessGates: + description: |- + readinessGates specifies additional conditions to include when evaluating Machine Ready condition. + + This field can be used e.g. to instruct the machine controller to include in the computation for Machine's ready + computation a condition, managed by an external controllers, reporting the status of special software/hardware installed on the Machine. + + NOTE: This field is considered only for computing v1beta2 conditions. + NOTE: If a Cluster defines a custom list of readinessGates for a MachineDeployment using this MachineDeploymentClass, + such list overrides readinessGates defined in this field. + items: + description: MachineReadinessGate contains the type of + a Machine condition to be used as a readiness gate. + properties: + conditionType: + description: |- + conditionType refers to a positive polarity condition (status true means good) with matching type in the Machine's condition list. + If the conditions doesn't exist, it will be treated as unknown. + Note: Both Cluster API conditions or conditions added by 3rd party controllers can be used as readiness gates. + maxLength: 316 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - conditionType + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - conditionType + x-kubernetes-list-type: map strategy: description: |- strategy is the deployment strategy to use to replace existing machines with diff --git a/config/crd/bases/cluster.x-k8s.io_clusters.yaml b/config/crd/bases/cluster.x-k8s.io_clusters.yaml index 0ffba4740e2a..65dcd3c7eebe 100644 --- a/config/crd/bases/cluster.x-k8s.io_clusters.yaml +++ b/config/crd/bases/cluster.x-k8s.io_clusters.yaml @@ -761,6 +761,9 @@ spec: description: |- availabilityGates specifies additional conditions to include when evaluating Cluster Available condition. + If this field is not defined and the Cluster implements a managed topology, availabilityGates + from the corresponding ClusterClass will be used, if any. + NOTE: this field is considered only for computing v1beta2 conditions. items: description: ClusterAvailabilityGate contains the type of a Cluster @@ -1129,6 +1132,39 @@ spec: nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. type: string + readinessGates: + description: |- + readinessGates specifies additional conditions to include when evaluating Machine Ready condition. + + This field can be used e.g. to instruct the machine controller to include in the computation for Machine's ready + computation a condition, managed by an external controllers, reporting the status of special software/hardware installed on the Machine. + + If this field is not defined, readinessGates from the corresponding ControlPlaneClass will be used, if any. + + NOTE: This field is considered only for computing v1beta2 conditions. + NOTE: Specific control plane provider implementations might automatically extend the list of readinessGates; + e.g. the kubeadm control provider adds ReadinessGates for the APIServerPodHealthy, SchedulerPodHealthy conditions, etc. + items: + description: MachineReadinessGate contains the type of a + Machine condition to be used as a readiness gate. + properties: + conditionType: + description: |- + conditionType refers to a positive polarity condition (status true means good) with matching type in the Machine's condition list. + If the conditions doesn't exist, it will be treated as unknown. + Note: Both Cluster API conditions or conditions added by 3rd party controllers can be used as readiness gates. + maxLength: 316 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - conditionType + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - conditionType + x-kubernetes-list-type: map replicas: description: |- replicas is the number of control plane nodes. @@ -1440,6 +1476,38 @@ spec: nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. type: string + readinessGates: + description: |- + readinessGates specifies additional conditions to include when evaluating Machine Ready condition. + + This field can be used e.g. to instruct the machine controller to include in the computation for Machine's ready + computation a condition, managed by an external controllers, reporting the status of special software/hardware installed on the Machine. + + If this field is not defined, readinessGates from the corresponding MachineDeploymentClass will be used, if any. + + NOTE: This field is considered only for computing v1beta2 conditions. + items: + description: MachineReadinessGate contains the type + of a Machine condition to be used as a readiness + gate. + properties: + conditionType: + description: |- + conditionType refers to a positive polarity condition (status true means good) with matching type in the Machine's condition list. + If the conditions doesn't exist, it will be treated as unknown. + Note: Both Cluster API conditions or conditions added by 3rd party controllers can be used as readiness gates. + maxLength: 316 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - conditionType + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - conditionType + x-kubernetes-list-type: map replicas: description: |- replicas is the number of worker nodes belonging to this set. diff --git a/controlplane/kubeadm/api/v1beta1/kubeadm_control_plane_types.go b/controlplane/kubeadm/api/v1beta1/kubeadm_control_plane_types.go index 58d20cae96f0..dfca0ecc773d 100644 --- a/controlplane/kubeadm/api/v1beta1/kubeadm_control_plane_types.go +++ b/controlplane/kubeadm/api/v1beta1/kubeadm_control_plane_types.go @@ -142,6 +142,21 @@ type KubeadmControlPlaneMachineTemplate struct { // offered by an infrastructure provider. InfrastructureRef corev1.ObjectReference `json:"infrastructureRef"` + // readinessGates specifies additional conditions to include when evaluating Machine Ready condition; + // KubeadmControlPlane will always add readinessGates for the condition it is setting on the Machine: + // APIServerPodHealthy, SchedulerPodHealthy, ControllerManagerPodHealthy, and if etcd is managed by CKP also + // EtcdPodHealthy, EtcdMemberHealthy. + // + // This field can be used e.g. to instruct the machine controller to include in the computation for Machine's ready + // computation a condition, managed by an external controllers, reporting the status of special software/hardware installed on the Machine. + // + // NOTE: This field is considered only for computing v1beta2 conditions. + // +optional + // +listType=map + // +listMapKey=conditionType + // +kubebuilder:validation:MaxItems=32 + ReadinessGates []clusterv1.MachineReadinessGate `json:"readinessGates,omitempty"` + // nodeDrainTimeout is the total amount of time that the controller will spend on draining a controlplane node // The default value is 0, meaning that the node can be drained without any time limitations. // NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` diff --git a/controlplane/kubeadm/api/v1beta1/zz_generated.deepcopy.go b/controlplane/kubeadm/api/v1beta1/zz_generated.deepcopy.go index 46cd8278d466..b6d41c20e602 100644 --- a/controlplane/kubeadm/api/v1beta1/zz_generated.deepcopy.go +++ b/controlplane/kubeadm/api/v1beta1/zz_generated.deepcopy.go @@ -91,6 +91,11 @@ func (in *KubeadmControlPlaneMachineTemplate) DeepCopyInto(out *KubeadmControlPl *out = *in in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.InfrastructureRef = in.InfrastructureRef + if in.ReadinessGates != nil { + in, out := &in.ReadinessGates, &out.ReadinessGates + *out = make([]apiv1beta1.MachineReadinessGate, len(*in)) + copy(*out, *in) + } if in.NodeDrainTimeout != nil { in, out := &in.NodeDrainTimeout, &out.NodeDrainTimeout *out = new(v1.Duration) diff --git a/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml b/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml index ec5031ee9fce..3ce0cb678470 100644 --- a/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml +++ b/controlplane/kubeadm/config/crd/bases/controlplane.cluster.x-k8s.io_kubeadmcontrolplanes.yaml @@ -4300,6 +4300,38 @@ spec: nodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. type: string + readinessGates: + description: |- + readinessGates specifies additional conditions to include when evaluating Machine Ready condition; + KubeadmControlPlane will always add readinessGates for the condition it is setting on the Machine: + APIServerPodHealthy, SchedulerPodHealthy, ControllerManagerPodHealthy, and if etcd is managed by CKP also + EtcdPodHealthy, EtcdMemberHealthy. + + This field can be used e.g. to instruct the machine controller to include in the computation for Machine's ready + computation a condition, managed by an external controllers, reporting the status of special software/hardware installed on the Machine. + + NOTE: This field is considered only for computing v1beta2 conditions. + items: + description: MachineReadinessGate contains the type of a Machine + condition to be used as a readiness gate. + properties: + conditionType: + description: |- + conditionType refers to a positive polarity condition (status true means good) with matching type in the Machine's condition list. + If the conditions doesn't exist, it will be treated as unknown. + Note: Both Cluster API conditions or conditions added by 3rd party controllers can be used as readiness gates. + maxLength: 316 + minLength: 1 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - conditionType + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - conditionType + x-kubernetes-list-type: map required: - infrastructureRef type: object diff --git a/controlplane/kubeadm/internal/controllers/helpers.go b/controlplane/kubeadm/internal/controllers/helpers.go index 09386ff2c79a..029e0cbbd9c7 100644 --- a/controlplane/kubeadm/internal/controllers/helpers.go +++ b/controlplane/kubeadm/internal/controllers/helpers.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -52,6 +53,10 @@ var mandatoryMachineReadinessGates = []clusterv1.MachineReadinessGate{ {ConditionType: string(controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition)}, {ConditionType: string(controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyV1Beta2Condition)}, {ConditionType: string(controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyV1Beta2Condition)}, +} + +// etcdMandatoryMachineReadinessGates are readinessGates KCP enforces to be set on machine it owns if etcd is managed. +var etcdMandatoryMachineReadinessGates = []clusterv1.MachineReadinessGate{ {ConditionType: string(controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyV1Beta2Condition)}, {ConditionType: string(controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition)}, } @@ -464,29 +469,26 @@ func (r *KubeadmControlPlaneReconciler) computeDesiredMachine(kcp *controlplanev if existingMachine != nil { desiredMachine.Spec.InfrastructureRef = existingMachine.Spec.InfrastructureRef desiredMachine.Spec.Bootstrap.ConfigRef = existingMachine.Spec.Bootstrap.ConfigRef - desiredMachine.Spec.ReadinessGates = existingMachine.Spec.ReadinessGates } - ensureMandatoryReadinessGates(desiredMachine) - - return desiredMachine, nil -} -func ensureMandatoryReadinessGates(m *clusterv1.Machine) { - if m.Spec.ReadinessGates == nil { - m.Spec.ReadinessGates = mandatoryMachineReadinessGates - return + // Set machines readiness gates + allReadinessGates := []clusterv1.MachineReadinessGate{} + allReadinessGates = append(allReadinessGates, mandatoryMachineReadinessGates...) + isEtcdManaged := kcp.Spec.KubeadmConfigSpec.ClusterConfiguration == nil || kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.External == nil + if isEtcdManaged { + allReadinessGates = append(allReadinessGates, etcdMandatoryMachineReadinessGates...) } + allReadinessGates = append(allReadinessGates, kcp.Spec.MachineTemplate.ReadinessGates...) - for _, want := range mandatoryMachineReadinessGates { - found := false - for _, got := range m.Spec.ReadinessGates { - if got.ConditionType == want.ConditionType { - found = true - break - } - } - if !found { - m.Spec.ReadinessGates = append(m.Spec.ReadinessGates, want) + desiredMachine.Spec.ReadinessGates = []clusterv1.MachineReadinessGate{} + knownGates := sets.Set[string]{} + for _, gate := range allReadinessGates { + if knownGates.Has(gate.ConditionType) { + continue } + desiredMachine.Spec.ReadinessGates = append(desiredMachine.Spec.ReadinessGates, gate) + knownGates.Insert(gate.ConditionType) } + + return desiredMachine, nil } diff --git a/controlplane/kubeadm/internal/controllers/helpers_test.go b/controlplane/kubeadm/internal/controllers/helpers_test.go index d6bde7dc3f61..4c999befdf8a 100644 --- a/controlplane/kubeadm/internal/controllers/helpers_test.go +++ b/controlplane/kubeadm/internal/controllers/helpers_test.go @@ -530,7 +530,10 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { Spec: controlplanev1.KubeadmControlPlaneSpec{ Version: "v1.16.6", MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ - ObjectMeta: kcpMachineTemplateObjectMeta, + ObjectMeta: kcpMachineTemplateObjectMeta, + ReadinessGates: []clusterv1.MachineReadinessGate{ + {ConditionType: "Foo"}, + }, NodeDrainTimeout: duration5s, NodeDeletionTimeout: duration5s, NodeVolumeDetachTimeout: duration5s, @@ -669,6 +672,32 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { Not(HaveSuffix("00000")), }, }, + { + name: "should return the correct Machine object when creating a new Machine with additional kcp readinessGates", + kcp: &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: kcpName, + Namespace: cluster.Namespace, + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.16.6", + MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ + ObjectMeta: kcpMachineTemplateObjectMeta, + ReadinessGates: []clusterv1.MachineReadinessGate{{ConditionType: "Bar"}}, + NodeDrainTimeout: duration5s, + NodeDeletionTimeout: duration5s, + NodeVolumeDetachTimeout: duration5s, + }, + KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ + ClusterName: clusterName, + }, + }, + }, + }, + isUpdatingExistingMachine: false, + wantErr: false, + }, { name: "should return the correct Machine object when updating an existing Machine", kcp: &controlplanev1.KubeadmControlPlane{ @@ -683,6 +712,9 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { NodeDrainTimeout: duration5s, NodeDeletionTimeout: duration5s, NodeVolumeDetachTimeout: duration5s, + ReadinessGates: []clusterv1.MachineReadinessGate{ + {ConditionType: "Foo"}, + }, }, KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ @@ -758,7 +790,7 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { NodeDrainTimeout: tt.kcp.Spec.MachineTemplate.NodeDrainTimeout, NodeDeletionTimeout: tt.kcp.Spec.MachineTemplate.NodeDeletionTimeout, NodeVolumeDetachTimeout: tt.kcp.Spec.MachineTemplate.NodeVolumeDetachTimeout, - ReadinessGates: append([]clusterv1.MachineReadinessGate{{ConditionType: "Foo"}}, mandatoryMachineReadinessGates...), + ReadinessGates: append(append(mandatoryMachineReadinessGates, etcdMandatoryMachineReadinessGates...), tt.kcp.Spec.MachineTemplate.ReadinessGates...), } // Verify the Name and UID of the Machine remain unchanged @@ -792,7 +824,7 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { NodeDrainTimeout: tt.kcp.Spec.MachineTemplate.NodeDrainTimeout, NodeDeletionTimeout: tt.kcp.Spec.MachineTemplate.NodeDeletionTimeout, NodeVolumeDetachTimeout: tt.kcp.Spec.MachineTemplate.NodeVolumeDetachTimeout, - ReadinessGates: mandatoryMachineReadinessGates, + ReadinessGates: append(append(mandatoryMachineReadinessGates, etcdMandatoryMachineReadinessGates...), tt.kcp.Spec.MachineTemplate.ReadinessGates...), } // Verify Name. for _, matcher := range tt.want { diff --git a/docs/book/src/developer/providers/contracts/control-plane.md b/docs/book/src/developer/providers/contracts/control-plane.md index 40f4617ff445..bd681d3ab00e 100644 --- a/docs/book/src/developer/providers/contracts/control-plane.md +++ b/docs/book/src/developer/providers/contracts/control-plane.md @@ -463,7 +463,37 @@ Please note that some of the above fields (`metadata`, `nodeDrainTimeout`, `node must be propagated to machines without triggering rollouts. See [In place propagation of changes affecting Kubernetes objects only] as well as [Metadata propagation] for more details. -Additionally, in case you are developing a control plane provider where control plane instances uses a Cluster API Machine +In case you are developing a control plane provider that allows definition of machine readiness gates, you SHOULD also implement +the following `machineTemplate` field. + +```go +type FooControlPlaneMachineTemplate struct { + // readinessGates specifies additional conditions to include when evaluating Machine Ready condition. + // + // This field can be used e.g. by Cluster API control plane providers to extend the semantic of the + // Ready condition for the Machine they control, like the kubeadm control provider adding ReadinessGates + // for the APIServerPodHealthy, SchedulerPodHealthy conditions, etc. + // + // Another example are external controllers, e.g. responsible to install special software/hardware on the Machines; + // they can include the status of those components with a new condition and add this condition to ReadinessGates. + // + // NOTE: This field is considered only for computing v1beta2 conditions. + // NOTE: In case readinessGates conditions start with the APIServer, ControllerManager, Scheduler prefix, and all those + // readiness gates condition are reporting the same message, when computing the Machine's Ready condition those + // readinessGates will be replaced by a single entry reporting "Control plane components: " + message. + // This helps to improve readability of conditions bubbling up to the Machine's owner resource / to the Cluster). + // +optional + // +listType=map + // +listMapKey=conditionType + // +kubebuilder:validation:MaxItems=32 + ReadinessGates []clusterv1.MachineReadinessGate `json:"readinessGates,omitempty"` + + // See other rules for more details about mandatory/optional fields in ControlPlane spec. + // Other fields SHOULD be added based on the needs of your provider. +} +``` + +In case you are developing a control plane provider where control plane instances uses a Cluster API Machine object to represent each control plane instance, but those instances do not show up as a Kubernetes node (for example, managed control plane providers for AKS, EKS, GKE etc), you SHOULD also implement the following `status` field. diff --git a/docs/book/src/developer/providers/migrations/v1.9-to-v1.10.md b/docs/book/src/developer/providers/migrations/v1.9-to-v1.10.md index 3442d7462302..70ced693614b 100644 --- a/docs/book/src/developer/providers/migrations/v1.9-to-v1.10.md +++ b/docs/book/src/developer/providers/migrations/v1.9-to-v1.10.md @@ -23,4 +23,6 @@ maintainers of providers and consumers of our Go API. - `E2EConfig.GetInt32PtrVariable` is now `E2EConfig.MustGetInt32PtrVariable` - Using the package `sigs.k8s.io/cluster-api/controllers/clustercache` in tests using envtest may require a change to properly shutdown a running clustercache. Otherwise teardown of envtest might time out and lead to failed tests. (xref [#11757](https://github.com/kubernetes-sigs/cluster-api/pull/11757)) -### Suggested changes for providers \ No newline at end of file +### Suggested changes for providers + +- If you are developing a control plane provider with support for machines, please consider adding `spec.machineTemplate.readinessGates` (see [contract](../contracts/control-plane.md#controlplane-machines)) diff --git a/exp/topology/desiredstate/desired_state.go b/exp/topology/desiredstate/desired_state.go index 785273281812..a96dc5b13ec3 100644 --- a/exp/topology/desiredstate/desired_state.go +++ b/exp/topology/desiredstate/desired_state.go @@ -311,23 +311,23 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf if s.Current.ControlPlane.Object != nil { currentRef, err := contract.ControlPlane().MachineTemplate().InfrastructureRef().Get(s.Current.ControlPlane.Object) if err != nil { - return nil, errors.Wrapf(err, "failed get spec.machineTemplate.infrastructureRef from the ControlPlane object") + return nil, errors.Wrapf(err, "failed get %s from the ControlPlane object", contract.ControlPlane().MachineTemplate().InfrastructureRef().Path()) } desiredRef, err := calculateRefDesiredAPIVersion(currentRef, refCopy) if err != nil { - return nil, errors.Wrap(err, "failed to calculate desired spec.machineTemplate.infrastructureRef") + return nil, errors.Wrapf(err, "failed to calculate desired %s", contract.ControlPlane().MachineTemplate().InfrastructureRef().Path()) } refCopy.SetAPIVersion(desiredRef.APIVersion) } if err := contract.ControlPlane().MachineTemplate().InfrastructureRef().Set(controlPlane, refCopy); err != nil { - return nil, errors.Wrap(err, "failed to spec.machineTemplate.infrastructureRef in the ControlPlane object") + return nil, errors.Wrapf(err, "failed to %s in the ControlPlane object", contract.ControlPlane().MachineTemplate().InfrastructureRef().Path()) } // Add the ControlPlane labels and annotations to the ControlPlane machines as well. // Note: We have to ensure the machine template metadata copied from the control plane template is not overwritten. controlPlaneMachineTemplateMetadata, err := contract.ControlPlane().MachineTemplate().Metadata().Get(controlPlane) if err != nil { - return nil, errors.Wrap(err, "failed to get spec.machineTemplate.metadata from the ControlPlane object") + return nil, errors.Wrapf(err, "failed to get %s from the ControlPlane object", contract.ControlPlane().MachineTemplate().Metadata().Path()) } controlPlaneMachineTemplateMetadata.Labels = util.MergeMap(controlPlaneLabels, controlPlaneMachineTemplateMetadata.Labels) @@ -338,7 +338,7 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf Labels: controlPlaneMachineTemplateMetadata.Labels, Annotations: controlPlaneMachineTemplateMetadata.Annotations, }); err != nil { - return nil, errors.Wrap(err, "failed to set spec.machineTemplate.metadata in the ControlPlane object") + return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().MachineTemplate().Metadata().Path()) } } @@ -347,7 +347,20 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf // does not implement support for this field and the ControlPlane object is generated without the number of Replicas. if s.Blueprint.Topology.ControlPlane.Replicas != nil { if err := contract.ControlPlane().Replicas().Set(controlPlane, int64(*s.Blueprint.Topology.ControlPlane.Replicas)); err != nil { - return nil, errors.Wrap(err, "failed to set spec.replicas in the ControlPlane object") + return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().Replicas().Path()) + } + } + + // If it is required to manage the readinessGates for the control plane, set the corresponding field. + // NOTE: If readinessGates value from both Cluster and ClusterClass is nil, it is assumed that the control plane controller + // does not implement support for this field and the ControlPlane object is generated without readinessGates. + if s.Blueprint.Topology.ControlPlane.ReadinessGates != nil { + if err := contract.ControlPlane().MachineTemplate().ReadinessGates().Set(controlPlane, s.Blueprint.Topology.ControlPlane.ReadinessGates); err != nil { + return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().MachineTemplate().ReadinessGates().Path()) + } + } else if s.Blueprint.ClusterClass.Spec.ControlPlane.ReadinessGates != nil { + if err := contract.ControlPlane().MachineTemplate().ReadinessGates().Set(controlPlane, s.Blueprint.ClusterClass.Spec.ControlPlane.ReadinessGates); err != nil { + return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().MachineTemplate().ReadinessGates().Path()) } } @@ -358,7 +371,7 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf } if nodeDrainTimeout != nil { if err := contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Set(controlPlane, *nodeDrainTimeout); err != nil { - return nil, errors.Wrap(err, "failed to set spec.machineTemplate.nodeDrainTimeout in the ControlPlane object") + return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Path()) } } @@ -369,7 +382,7 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf } if nodeVolumeDetachTimeout != nil { if err := contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeout().Set(controlPlane, *nodeVolumeDetachTimeout); err != nil { - return nil, errors.Wrap(err, "failed to set spec.machineTemplate.nodeVolumeDetachTimeout in the ControlPlane object") + return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeout().Path()) } } @@ -380,7 +393,7 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf } if nodeDeletionTimeout != nil { if err := contract.ControlPlane().MachineTemplate().NodeDeletionTimeout().Set(controlPlane, *nodeDeletionTimeout); err != nil { - return nil, errors.Wrap(err, "failed to set spec.machineTemplate.nodeDeletionTimeout in the ControlPlane object") + return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().MachineTemplate().NodeDeletionTimeout().Path()) } } @@ -390,7 +403,7 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf return nil, errors.Wrap(err, "failed to compute version of control plane") } if err := contract.ControlPlane().Version().Set(controlPlane, version); err != nil { - return nil, errors.Wrap(err, "failed to set spec.version in the ControlPlane object") + return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().Version().Path()) } return controlPlane, nil @@ -732,6 +745,11 @@ func (g *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope nodeDeletionTimeout = machineDeploymentTopology.NodeDeletionTimeout } + readinessGates := machineDeploymentClass.ReadinessGates + if machineDeploymentTopology.ReadinessGates != nil { + readinessGates = machineDeploymentTopology.ReadinessGates + } + // Compute the MachineDeployment object. desiredBootstrapTemplateRef, err := calculateRefDesiredAPIVersion(currentBootstrapTemplateRef, desiredMachineDeployment.BootstrapTemplate) if err != nil { @@ -775,6 +793,7 @@ func (g *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope NodeDrainTimeout: nodeDrainTimeout, NodeVolumeDetachTimeout: nodeVolumeDetachTimeout, NodeDeletionTimeout: nodeDeletionTimeout, + ReadinessGates: readinessGates, }, }, }, diff --git a/exp/topology/desiredstate/desired_state_test.go b/exp/topology/desiredstate/desired_state_test.go index eb2077a0f400..a7a150d43937 100644 --- a/exp/topology/desiredstate/desired_state_test.go +++ b/exp/topology/desiredstate/desired_state_test.go @@ -17,6 +17,7 @@ limitations under the License. package desiredstate import ( + "encoding/json" "strings" "testing" "time" @@ -295,6 +296,8 @@ func TestComputeControlPlaneInfrastructureMachineTemplate(t *testing.T) { } func TestComputeControlPlane(t *testing.T) { + g := NewWithT(t) + // templates and ClusterClass labels := map[string]string{"l1": ""} annotations := map[string]string{"a1": ""} @@ -313,8 +316,12 @@ func TestComputeControlPlane(t *testing.T) { Annotations: controlPlaneMachineTemplateAnnotations, }) clusterClassDuration := 20 * time.Second + clusterClassReadinessGates := []clusterv1.MachineReadinessGate{ + {ConditionType: "foo"}, + } clusterClass := builder.ClusterClass(metav1.NamespaceDefault, "class1"). WithControlPlaneMetadata(labels, annotations). + WithControlPlaneReadinessGates(clusterClassReadinessGates). WithControlPlaneTemplate(controlPlaneTemplate). WithControlPlaneNodeDrainTimeout(&metav1.Duration{Duration: clusterClassDuration}). WithControlPlaneNodeVolumeDetachTimeout(&metav1.Duration{Duration: clusterClassDuration}). @@ -328,6 +335,10 @@ func TestComputeControlPlane(t *testing.T) { nodeDrainTimeout := metav1.Duration{Duration: topologyDuration} nodeVolumeDetachTimeout := metav1.Duration{Duration: topologyDuration} nodeDeletionTimeout := metav1.Duration{Duration: topologyDuration} + readinessGates := []clusterv1.MachineReadinessGate{ + {ConditionType: "foo"}, + {ConditionType: "bar"}, + } cluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", @@ -341,6 +352,7 @@ func TestComputeControlPlane(t *testing.T) { Labels: map[string]string{"l2": ""}, Annotations: map[string]string{"a2": ""}, }, + ReadinessGates: readinessGates, Replicas: &replicas, NodeDrainTimeout: &nodeDrainTimeout, NodeVolumeDetachTimeout: &nodeVolumeDetachTimeout, @@ -350,6 +362,15 @@ func TestComputeControlPlane(t *testing.T) { }, } + jsonValue, err := json.Marshal(&clusterClassReadinessGates) + g.Expect(err).ToNot(HaveOccurred()) + var expectedClusterClassReadinessGates []interface{} + g.Expect(json.Unmarshal(jsonValue, &expectedClusterClassReadinessGates)).ToNot(HaveOccurred()) + jsonValue, err = json.Marshal(&readinessGates) + g.Expect(err).ToNot(HaveOccurred()) + var expectedReadinessGates []interface{} + g.Expect(json.Unmarshal(jsonValue, &expectedReadinessGates)).ToNot(HaveOccurred()) + t.Run("Generates the ControlPlane from the template", func(t *testing.T) { g := NewWithT(t) @@ -381,6 +402,7 @@ func TestComputeControlPlane(t *testing.T) { assertNestedField(g, obj, version, contract.ControlPlane().Version().Path()...) assertNestedField(g, obj, int64(replicas), contract.ControlPlane().Replicas().Path()...) + assertNestedField(g, obj, expectedReadinessGates, contract.ControlPlane().MachineTemplate().ReadinessGates().Path()...) assertNestedField(g, obj, topologyDuration.String(), contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Path()...) assertNestedField(g, obj, topologyDuration.String(), contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeout().Path()...) assertNestedField(g, obj, topologyDuration.String(), contract.ControlPlane().MachineTemplate().NodeDeletionTimeout().Path()...) @@ -406,7 +428,7 @@ func TestComputeControlPlane(t *testing.T) { Annotations: map[string]string{"a2": ""}, }, Replicas: &replicas, - // no values for NodeDrainTimeout, NodeVolumeDetachTimeout, NodeDeletionTimeout + // no values for ReadinessGates, NodeDrainTimeout, NodeVolumeDetachTimeout, NodeDeletionTimeout }, }, }, @@ -429,6 +451,7 @@ func TestComputeControlPlane(t *testing.T) { g.Expect(obj).ToNot(BeNil()) // checking only values from CC defaults + assertNestedField(g, obj, expectedClusterClassReadinessGates, contract.ControlPlane().MachineTemplate().ReadinessGates().Path()...) assertNestedField(g, obj, clusterClassDuration.String(), contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Path()...) assertNestedField(g, obj, clusterClassDuration.String(), contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeout().Path()...) assertNestedField(g, obj, clusterClassDuration.String(), contract.ControlPlane().MachineTemplate().NodeDeletionTimeout().Path()...) @@ -470,6 +493,33 @@ func TestComputeControlPlane(t *testing.T) { assertNestedFieldUnset(g, obj, contract.ControlPlane().Replicas().Path()...) assertNestedFieldUnset(g, obj, contract.ControlPlane().MachineTemplate().InfrastructureRef().Path()...) }) + t.Run("Skips setting readinessGates if not set in Cluster and ClusterClass", func(t *testing.T) { + g := NewWithT(t) + + clusterClassWithoutReadinessGates := clusterClass.DeepCopy() + clusterClassWithoutReadinessGates.Spec.ControlPlane.ReadinessGates = nil + + clusterWithoutReadinessGates := cluster.DeepCopy() + clusterWithoutReadinessGates.Spec.Topology.ControlPlane.ReadinessGates = nil + + blueprint := &scope.ClusterBlueprint{ + Topology: clusterWithoutReadinessGates.Spec.Topology, + ClusterClass: clusterClassWithoutReadinessGates, + ControlPlane: &scope.ControlPlaneBlueprint{ + Template: controlPlaneTemplate, + }, + } + + // aggregating current cluster objects into ClusterState (simulating getCurrentState) + scope := scope.New(clusterWithoutReadinessGates) + scope.Blueprint = blueprint + + obj, err := (&generator{}).computeControlPlane(ctx, scope, nil) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(obj).ToNot(BeNil()) + + assertNestedFieldUnset(g, obj, contract.ControlPlane().MachineTemplate().ReadinessGates().Path()...) + }) t.Run("Generates the ControlPlane from the template and adds the infrastructure machine template if required", func(t *testing.T) { g := NewWithT(t) @@ -1366,6 +1416,9 @@ func TestComputeMachineDeployment(t *testing.T) { MaxInFlight: ptr.To(intstr.FromInt32(5)), }, } + clusterClassReadinessGates := []clusterv1.MachineReadinessGate{ + {ConditionType: "foo"}, + } md1 := builder.MachineDeploymentClass("linux-worker"). WithLabels(labels). WithAnnotations(annotations). @@ -1375,6 +1428,7 @@ func TestComputeMachineDeployment(t *testing.T) { UnhealthyConditions: unhealthyConditions, NodeStartupTimeout: nodeTimeoutDuration, }). + WithReadinessGates(clusterClassReadinessGates). WithFailureDomain(&clusterClassFailureDomain). WithNodeDrainTimeout(&clusterClassDuration). WithNodeVolumeDetachTimeout(&clusterClassDuration). @@ -1431,6 +1485,10 @@ func TestComputeMachineDeployment(t *testing.T) { MaxInFlight: ptr.To(intstr.FromInt32(5)), }, } + readinessGates := []clusterv1.MachineReadinessGate{ + {ConditionType: "foo"}, + {ConditionType: "bar"}, + } mdTopology := clusterv1.MachineDeploymentTopology{ Metadata: clusterv1.ObjectMeta{ Labels: map[string]string{ @@ -1449,6 +1507,7 @@ func TestComputeMachineDeployment(t *testing.T) { Name: "big-pool-of-machines", Replicas: &replicas, FailureDomain: &topologyFailureDomain, + ReadinessGates: readinessGates, NodeDrainTimeout: &topologyDuration, NodeVolumeDetachTimeout: &topologyDuration, NodeDeletionTimeout: &topologyDuration, @@ -1488,6 +1547,7 @@ func TestComputeMachineDeployment(t *testing.T) { g.Expect(*actualMd.Spec.Template.Spec.NodeDrainTimeout).To(Equal(topologyDuration)) g.Expect(*actualMd.Spec.Template.Spec.NodeVolumeDetachTimeout).To(Equal(topologyDuration)) g.Expect(*actualMd.Spec.Template.Spec.NodeDeletionTimeout).To(Equal(topologyDuration)) + g.Expect(actualMd.Spec.Template.Spec.ReadinessGates).To(Equal(readinessGates)) g.Expect(actualMd.Spec.ClusterName).To(Equal("cluster1")) g.Expect(actualMd.Name).To(ContainSubstring("cluster1")) g.Expect(actualMd.Name).To(ContainSubstring("big-pool-of-machines")) @@ -1529,7 +1589,7 @@ func TestComputeMachineDeployment(t *testing.T) { Class: "linux-worker", Name: "big-pool-of-machines", Replicas: &replicas, - // missing FailureDomain, NodeDrainTimeout, NodeVolumeDetachTimeout, NodeDeletionTimeout, MinReadySeconds, Strategy + // missing ReadinessGates, FailureDomain, NodeDrainTimeout, NodeVolumeDetachTimeout, NodeDeletionTimeout, MinReadySeconds, Strategy } e := generator{} @@ -1542,11 +1602,62 @@ func TestComputeMachineDeployment(t *testing.T) { g.Expect(*actualMd.Spec.MinReadySeconds).To(Equal(clusterClassMinReadySeconds)) g.Expect(*actualMd.Spec.Strategy).To(BeComparableTo(clusterClassStrategy)) g.Expect(*actualMd.Spec.Template.Spec.FailureDomain).To(Equal(clusterClassFailureDomain)) + g.Expect(actualMd.Spec.Template.Spec.ReadinessGates).To(Equal(clusterClassReadinessGates)) g.Expect(*actualMd.Spec.Template.Spec.NodeDrainTimeout).To(Equal(clusterClassDuration)) g.Expect(*actualMd.Spec.Template.Spec.NodeVolumeDetachTimeout).To(Equal(clusterClassDuration)) g.Expect(*actualMd.Spec.Template.Spec.NodeDeletionTimeout).To(Equal(clusterClassDuration)) }) + t.Run("Skips setting readinessGates if not set in Cluster and ClusterClass", func(t *testing.T) { + g := NewWithT(t) + + clusterClassWithoutReadinessGates := fakeClass.DeepCopy() + clusterClassWithoutReadinessGates.Spec.Workers.MachineDeployments[0].ReadinessGates = nil + + blueprint := &scope.ClusterBlueprint{ + Topology: cluster.Spec.Topology, + ClusterClass: clusterClassWithoutReadinessGates, + MachineDeployments: map[string]*scope.MachineDeploymentBlueprint{ + "linux-worker": { + Metadata: clusterv1.ObjectMeta{ + Labels: labels, + Annotations: annotations, + }, + BootstrapTemplate: workerBootstrapTemplate, + InfrastructureMachineTemplate: workerInfrastructureMachineTemplate, + MachineHealthCheck: &clusterv1.MachineHealthCheckClass{ + UnhealthyConditions: unhealthyConditions, + NodeStartupTimeout: &metav1.Duration{ + Duration: time.Duration(1), + }, + }, + }, + }, + } + + scope := scope.New(cluster) + scope.Blueprint = blueprint + + mdTopology := clusterv1.MachineDeploymentTopology{ + Metadata: clusterv1.ObjectMeta{ + Labels: map[string]string{"foo": "baz"}, + }, + Class: "linux-worker", + Name: "big-pool-of-machines", + Replicas: &replicas, + // missing ReadinessGates + } + + e := generator{} + + actual, err := e.computeMachineDeployment(ctx, scope, mdTopology) + g.Expect(err).ToNot(HaveOccurred()) + + // checking only values from CC defaults + actualMd := actual.Object + g.Expect(actualMd.Spec.Template.Spec.ReadinessGates).To(BeNil()) + }) + t.Run("If there is already a machine deployment, it preserves the object name and the reference names", func(t *testing.T) { g := NewWithT(t) s := scope.New(cluster) diff --git a/internal/apis/controlplane/kubeadm/v1alpha3/conversion.go b/internal/apis/controlplane/kubeadm/v1alpha3/conversion.go index 0d380c2ec681..60c801036ad3 100644 --- a/internal/apis/controlplane/kubeadm/v1alpha3/conversion.go +++ b/internal/apis/controlplane/kubeadm/v1alpha3/conversion.go @@ -40,6 +40,7 @@ func (src *KubeadmControlPlane) ConvertTo(dstRaw conversion.Hub) error { } dst.Spec.MachineTemplate.ObjectMeta = restored.Spec.MachineTemplate.ObjectMeta + dst.Spec.MachineTemplate.ReadinessGates = restored.Spec.MachineTemplate.ReadinessGates dst.Spec.MachineTemplate.NodeDeletionTimeout = restored.Spec.MachineTemplate.NodeDeletionTimeout dst.Spec.MachineTemplate.NodeVolumeDetachTimeout = restored.Spec.MachineTemplate.NodeVolumeDetachTimeout dst.Spec.RolloutBefore = restored.Spec.RolloutBefore diff --git a/internal/apis/controlplane/kubeadm/v1alpha4/conversion.go b/internal/apis/controlplane/kubeadm/v1alpha4/conversion.go index b65d362842a2..e3e441d18b14 100644 --- a/internal/apis/controlplane/kubeadm/v1alpha4/conversion.go +++ b/internal/apis/controlplane/kubeadm/v1alpha4/conversion.go @@ -42,6 +42,7 @@ func (src *KubeadmControlPlane) ConvertTo(dstRaw conversion.Hub) error { return err } + dst.Spec.MachineTemplate.ReadinessGates = restored.Spec.MachineTemplate.ReadinessGates dst.Spec.MachineTemplate.NodeDeletionTimeout = restored.Spec.MachineTemplate.NodeDeletionTimeout dst.Spec.MachineTemplate.NodeVolumeDetachTimeout = restored.Spec.MachineTemplate.NodeVolumeDetachTimeout dst.Spec.RolloutBefore = restored.Spec.RolloutBefore diff --git a/internal/apis/controlplane/kubeadm/v1alpha4/zz_generated.conversion.go b/internal/apis/controlplane/kubeadm/v1alpha4/zz_generated.conversion.go index 9380fbfe8a3d..b3a5886321a0 100644 --- a/internal/apis/controlplane/kubeadm/v1alpha4/zz_generated.conversion.go +++ b/internal/apis/controlplane/kubeadm/v1alpha4/zz_generated.conversion.go @@ -279,6 +279,7 @@ func autoConvert_v1beta1_KubeadmControlPlaneMachineTemplate_To_v1alpha4_KubeadmC return err } out.InfrastructureRef = in.InfrastructureRef + // WARNING: in.ReadinessGates requires manual conversion: does not exist in peer-type out.NodeDrainTimeout = (*v1.Duration)(unsafe.Pointer(in.NodeDrainTimeout)) // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type // WARNING: in.NodeDeletionTimeout requires manual conversion: does not exist in peer-type diff --git a/internal/apis/core/v1alpha4/conversion.go b/internal/apis/core/v1alpha4/conversion.go index 52989c8d8c39..f006f2895a65 100644 --- a/internal/apis/core/v1alpha4/conversion.go +++ b/internal/apis/core/v1alpha4/conversion.go @@ -61,6 +61,7 @@ func (src *Cluster) ConvertTo(dstRaw conversion.Hub) error { if restored.Spec.Topology.ControlPlane.NodeDeletionTimeout != nil { dst.Spec.Topology.ControlPlane.NodeDeletionTimeout = restored.Spec.Topology.ControlPlane.NodeDeletionTimeout } + dst.Spec.Topology.ControlPlane.ReadinessGates = restored.Spec.Topology.ControlPlane.ReadinessGates if restored.Spec.Topology.Workers != nil { if dst.Spec.Topology.Workers == nil { @@ -69,6 +70,7 @@ func (src *Cluster) ConvertTo(dstRaw conversion.Hub) error { for i := range restored.Spec.Topology.Workers.MachineDeployments { dst.Spec.Topology.Workers.MachineDeployments[i].FailureDomain = restored.Spec.Topology.Workers.MachineDeployments[i].FailureDomain dst.Spec.Topology.Workers.MachineDeployments[i].Variables = restored.Spec.Topology.Workers.MachineDeployments[i].Variables + dst.Spec.Topology.Workers.MachineDeployments[i].ReadinessGates = restored.Spec.Topology.Workers.MachineDeployments[i].ReadinessGates dst.Spec.Topology.Workers.MachineDeployments[i].NodeDrainTimeout = restored.Spec.Topology.Workers.MachineDeployments[i].NodeDrainTimeout dst.Spec.Topology.Workers.MachineDeployments[i].NodeVolumeDetachTimeout = restored.Spec.Topology.Workers.MachineDeployments[i].NodeVolumeDetachTimeout dst.Spec.Topology.Workers.MachineDeployments[i].NodeDeletionTimeout = restored.Spec.Topology.Workers.MachineDeployments[i].NodeDeletionTimeout @@ -127,7 +129,9 @@ func (src *ClusterClass) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Patches = restored.Spec.Patches dst.Spec.Variables = restored.Spec.Variables + dst.Spec.AvailabilityGates = restored.Spec.AvailabilityGates dst.Spec.ControlPlane.MachineHealthCheck = restored.Spec.ControlPlane.MachineHealthCheck + dst.Spec.ControlPlane.ReadinessGates = restored.Spec.ControlPlane.ReadinessGates dst.Spec.ControlPlane.NamingStrategy = restored.Spec.ControlPlane.NamingStrategy dst.Spec.ControlPlane.NodeDrainTimeout = restored.Spec.ControlPlane.NodeDrainTimeout dst.Spec.ControlPlane.NodeVolumeDetachTimeout = restored.Spec.ControlPlane.NodeVolumeDetachTimeout @@ -136,6 +140,7 @@ func (src *ClusterClass) ConvertTo(dstRaw conversion.Hub) error { for i := range restored.Spec.Workers.MachineDeployments { dst.Spec.Workers.MachineDeployments[i].MachineHealthCheck = restored.Spec.Workers.MachineDeployments[i].MachineHealthCheck + dst.Spec.Workers.MachineDeployments[i].ReadinessGates = restored.Spec.Workers.MachineDeployments[i].ReadinessGates dst.Spec.Workers.MachineDeployments[i].FailureDomain = restored.Spec.Workers.MachineDeployments[i].FailureDomain dst.Spec.Workers.MachineDeployments[i].NamingStrategy = restored.Spec.Workers.MachineDeployments[i].NamingStrategy dst.Spec.Workers.MachineDeployments[i].NodeDrainTimeout = restored.Spec.Workers.MachineDeployments[i].NodeDrainTimeout diff --git a/internal/apis/core/v1alpha4/zz_generated.conversion.go b/internal/apis/core/v1alpha4/zz_generated.conversion.go index 99c076a83272..ca7076623f9e 100644 --- a/internal/apis/core/v1alpha4/zz_generated.conversion.go +++ b/internal/apis/core/v1alpha4/zz_generated.conversion.go @@ -642,6 +642,7 @@ func Convert_v1alpha4_ClusterClassSpec_To_v1beta1_ClusterClassSpec(in *ClusterCl } func autoConvert_v1beta1_ClusterClassSpec_To_v1alpha4_ClusterClassSpec(in *v1beta1.ClusterClassSpec, out *ClusterClassSpec, s conversion.Scope) error { + // WARNING: in.AvailabilityGates requires manual conversion: does not exist in peer-type if err := Convert_v1beta1_LocalObjectTemplate_To_v1alpha4_LocalObjectTemplate(&in.Infrastructure, &out.Infrastructure, s); err != nil { return err } @@ -859,6 +860,7 @@ func autoConvert_v1beta1_ControlPlaneClass_To_v1alpha4_ControlPlaneClass(in *v1b // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type // WARNING: in.NodeDeletionTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.ReadinessGates requires manual conversion: does not exist in peer-type return nil } @@ -884,6 +886,7 @@ func autoConvert_v1beta1_ControlPlaneTopology_To_v1alpha4_ControlPlaneTopology(i // WARNING: in.NodeDrainTimeout requires manual conversion: does not exist in peer-type // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type // WARNING: in.NodeDeletionTimeout requires manual conversion: does not exist in peer-type + // WARNING: in.ReadinessGates requires manual conversion: does not exist in peer-type // WARNING: in.Variables requires manual conversion: does not exist in peer-type return nil } @@ -1041,6 +1044,7 @@ func autoConvert_v1beta1_MachineDeploymentClass_To_v1alpha4_MachineDeploymentCla // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type // WARNING: in.NodeDeletionTimeout requires manual conversion: does not exist in peer-type // WARNING: in.MinReadySeconds requires manual conversion: does not exist in peer-type + // WARNING: in.ReadinessGates requires manual conversion: does not exist in peer-type // WARNING: in.Strategy requires manual conversion: does not exist in peer-type return nil } @@ -1254,6 +1258,7 @@ func autoConvert_v1beta1_MachineDeploymentTopology_To_v1alpha4_MachineDeployment // WARNING: in.NodeVolumeDetachTimeout requires manual conversion: does not exist in peer-type // WARNING: in.NodeDeletionTimeout requires manual conversion: does not exist in peer-type // WARNING: in.MinReadySeconds requires manual conversion: does not exist in peer-type + // WARNING: in.ReadinessGates requires manual conversion: does not exist in peer-type // WARNING: in.Strategy requires manual conversion: does not exist in peer-type // WARNING: in.Variables requires manual conversion: does not exist in peer-type return nil diff --git a/internal/contract/controlplane.go b/internal/contract/controlplane.go index 743925d2020b..6c725b54cb6a 100644 --- a/internal/contract/controlplane.go +++ b/internal/contract/controlplane.go @@ -17,6 +17,7 @@ limitations under the License. package contract import ( + "encoding/json" "sync" "github.com/blang/semver/v4" @@ -24,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/utils/ptr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/version" ) @@ -351,3 +353,60 @@ func (c *ControlPlaneMachineTemplate) NodeDeletionTimeout() *Duration { path: Path{"spec", "machineTemplate", "nodeDeletionTimeout"}, } } + +// ReadinessGates provides access to control plane's ReadinessGates. +func (c *ControlPlaneMachineTemplate) ReadinessGates() *ReadinessGates { + return &ReadinessGates{} +} + +// ReadinessGates provides a helper struct for working with ReadinessGates. +type ReadinessGates struct{} + +// Path returns the path of the ReadinessGates. +func (m *ReadinessGates) Path() Path { + return Path{"spec", "machineTemplate", "readinessGates"} +} + +// Get gets the ReadinessGates object. +func (m *ReadinessGates) Get(obj *unstructured.Unstructured) ([]clusterv1.MachineReadinessGate, error) { + unstructuredValue, ok, err := unstructured.NestedSlice(obj.UnstructuredContent(), m.Path()...) + if err != nil { + return nil, errors.Wrapf(err, "failed to retrieve control plane %s", "."+m.Path().String()) + } + if !ok { + return nil, errors.Wrapf(ErrFieldNotFound, "path %s", "."+m.Path().String()) + } + + var readinessGates []clusterv1.MachineReadinessGate + jsonValue, err := json.Marshal(unstructuredValue) + if err != nil { + return nil, errors.Wrapf(err, "failed to Marshal control plane %s", "."+m.Path().String()) + } + if err := json.Unmarshal(jsonValue, &readinessGates); err != nil { + return nil, errors.Wrapf(err, "failed to Unmarshal control plane %s", "."+m.Path().String()) + } + + return readinessGates, nil +} + +// Set sets the ReadinessGates value. +// Note: in case the value is nil, the system assumes that the control plane do not implement the optional list of readiness gates. +func (m *ReadinessGates) Set(obj *unstructured.Unstructured, readinessGates []clusterv1.MachineReadinessGate) error { + unstructured.RemoveNestedField(obj.UnstructuredContent(), m.Path()...) + if readinessGates == nil { + return nil + } + + jsonValue, err := json.Marshal(readinessGates) + if err != nil { + return errors.Wrapf(err, "failed to Marshal control plane %s", "."+m.Path().String()) + } + var unstructuredValue []interface{} + if err := json.Unmarshal(jsonValue, &unstructuredValue); err != nil { + return errors.Wrapf(err, "failed to Unmarshal control plane %s", "."+m.Path().String()) + } + if err := unstructured.SetNestedSlice(obj.UnstructuredContent(), unstructuredValue, m.Path()...); err != nil { + return errors.Wrapf(err, "failed to set control plane %s", "."+m.Path().String()) + } + return nil +} diff --git a/internal/contract/controlplane_test.go b/internal/contract/controlplane_test.go index 20204530a3fe..acc27c7dde7d 100644 --- a/internal/contract/controlplane_test.go +++ b/internal/contract/controlplane_test.go @@ -356,6 +356,50 @@ func TestControlPlane(t *testing.T) { g.Expect(found).To(BeTrue()) g.Expect(durationString).To(Equal(expectedDurationString)) }) + t.Run("Manages spec.machineTemplate.readinessGates", func(t *testing.T) { + g := NewWithT(t) + + readinessGates := []clusterv1.MachineReadinessGate{ + {ConditionType: "foo"}, + {ConditionType: "bar"}, + } + + g.Expect(ControlPlane().MachineTemplate().ReadinessGates().Path()).To(Equal(Path{"spec", "machineTemplate", "readinessGates"})) + + err := ControlPlane().MachineTemplate().ReadinessGates().Set(obj, readinessGates) + g.Expect(err).ToNot(HaveOccurred()) + + got, err := ControlPlane().MachineTemplate().ReadinessGates().Get(obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).ToNot(BeNil()) + g.Expect(got).To(BeComparableTo(readinessGates)) + + // Nil readinessGates are not set. + obj2 := &unstructured.Unstructured{Object: map[string]interface{}{}} + readinessGates = nil + + err = ControlPlane().MachineTemplate().ReadinessGates().Set(obj2, readinessGates) + g.Expect(err).ToNot(HaveOccurred()) + + _, ok, err := unstructured.NestedSlice(obj2.UnstructuredContent(), ControlPlane().MachineTemplate().ReadinessGates().Path()...) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeFalse()) + + _, err = ControlPlane().MachineTemplate().ReadinessGates().Get(obj2) + g.Expect(err).To(HaveOccurred()) + + // Empty readinessGates are set. + obj3 := &unstructured.Unstructured{Object: map[string]interface{}{}} + readinessGates = []clusterv1.MachineReadinessGate{} + + err = ControlPlane().MachineTemplate().ReadinessGates().Set(obj3, readinessGates) + g.Expect(err).ToNot(HaveOccurred()) + + got, err = ControlPlane().MachineTemplate().ReadinessGates().Get(obj3) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).ToNot(BeNil()) + g.Expect(got).To(BeComparableTo(readinessGates)) + }) } func TestControlPlaneIsUpgrading(t *testing.T) { diff --git a/internal/controllers/cluster/cluster_controller.go b/internal/controllers/cluster/cluster_controller.go index 7a5d74e04d7a..dab45fc1bb9d 100644 --- a/internal/controllers/cluster/cluster_controller.go +++ b/internal/controllers/cluster/cluster_controller.go @@ -166,6 +166,12 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (retRes ct s := &scope{ cluster: cluster, } + if cluster.Spec.Topology != nil { + s.clusterClass = &clusterv1.ClusterClass{} + if err := r.Client.Get(ctx, cluster.GetClassKey(), s.clusterClass); err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to get ClusterClass %s", cluster.GetClassKey()) + } + } // Initialize the patch helper. patchHelper, err := patch.NewHelper(cluster, r.Client) @@ -316,6 +322,10 @@ type scope struct { // It is set at the beginning of the reconcile function. cluster *clusterv1.Cluster + // clusterClass is the ClusterClass referenced by the object being reconciled. + // It is set at the beginning of the reconcile function. + clusterClass *clusterv1.ClusterClass + // infraCluster is the Infrastructure Cluster object that is referenced by the // Cluster. It is set after reconcileInfrastructure is called. infraCluster *unstructured.Unstructured diff --git a/internal/controllers/cluster/cluster_controller_status.go b/internal/controllers/cluster/cluster_controller_status.go index 28d04b279b46..d9aa55a4a3b0 100644 --- a/internal/controllers/cluster/cluster_controller_status.go +++ b/internal/controllers/cluster/cluster_controller_status.go @@ -74,7 +74,7 @@ func (r *Reconciler) updateStatus(ctx context.Context, s *scope) { setScalingDownCondition(ctx, s.cluster, s.controlPlane, expv1.MachinePoolList{}, s.descendants.machineDeployments, s.descendants.machineSets, s.controlPlaneIsNotFound, s.getDescendantsSucceeded) setRemediatingCondition(ctx, s.cluster, machinesToBeRemediated, unhealthyMachines, s.getDescendantsSucceeded) setDeletingCondition(ctx, s.cluster, s.deletingReason, s.deletingMessage) - setAvailableCondition(ctx, s.cluster) + setAvailableCondition(ctx, s.cluster, s.clusterClass) } func setControlPlaneReplicas(_ context.Context, cluster *clusterv1.Cluster, controlPlane *unstructured.Unstructured, controlPlaneMachines collections.Machines, controlPlaneIsNotFound bool, getDescendantsSucceeded bool) { @@ -1053,7 +1053,7 @@ func (c clusterConditionCustomMergeStrategy) Merge(conditions []v1beta2condition ).Merge(conditions, conditionTypes) } -func setAvailableCondition(ctx context.Context, cluster *clusterv1.Cluster) { +func setAvailableCondition(ctx context.Context, cluster *clusterv1.Cluster, clusterClass *clusterv1.ClusterClass) { log := ctrl.LoggerFrom(ctx) forConditionTypes := v1beta2conditions.ForConditionTypes{ @@ -1064,7 +1064,11 @@ func setAvailableCondition(ctx context.Context, cluster *clusterv1.Cluster) { clusterv1.ClusterWorkersAvailableV1Beta2Condition, clusterv1.ClusterTopologyReconciledV1Beta2Condition, } - for _, g := range cluster.Spec.AvailabilityGates { + availabilityGates := cluster.Spec.AvailabilityGates + if availabilityGates == nil && clusterClass != nil { + availabilityGates = clusterClass.Spec.AvailabilityGates + } + for _, g := range availabilityGates { forConditionTypes = append(forConditionTypes, g.ConditionType) } diff --git a/internal/controllers/cluster/cluster_controller_status_test.go b/internal/controllers/cluster/cluster_controller_status_test.go index 7d13da927c6b..de88e2b771fe 100644 --- a/internal/controllers/cluster/cluster_controller_status_test.go +++ b/internal/controllers/cluster/cluster_controller_status_test.go @@ -2178,6 +2178,7 @@ func TestSetAvailableCondition(t *testing.T) { testCases := []struct { name string cluster *clusterv1.Cluster + clusterClass *clusterv1.ClusterClass expectCondition metav1.Condition }{ { @@ -2367,7 +2368,7 @@ func TestSetAvailableCondition(t *testing.T) { }, }, { - name: "Takes into account Availability gates when defined", + name: "Takes into account Availability gates when defined on the cluster", cluster: &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "machine-test", @@ -2418,6 +2419,15 @@ func TestSetAvailableCondition(t *testing.T) { }, }, }, + clusterClass: &clusterv1.ClusterClass{ + Spec: clusterv1.ClusterClassSpec{ + AvailabilityGates: []clusterv1.ClusterAvailabilityGate{ + { + ConditionType: "MyClusterClassAvailabilityGate", + }, + }, + }, + }, expectCondition: metav1.Condition{ Type: clusterv1.ClusterAvailableV1Beta2Condition, Status: metav1.ConditionFalse, @@ -2425,6 +2435,67 @@ func TestSetAvailableCondition(t *testing.T) { Message: "* MyAvailabilityGate: Some message", }, }, + { + name: "Takes into account Availability gates when defined on the cluster class", + cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-test", + Namespace: metav1.NamespaceDefault, + }, + Status: clusterv1.ClusterStatus{ + V1Beta2: &clusterv1.ClusterV1Beta2Status{ + Conditions: []metav1.Condition{ + { + Type: clusterv1.ClusterInfrastructureReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: "Foo", + }, + { + Type: clusterv1.ClusterControlPlaneAvailableV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: "Foo", + }, + { + Type: clusterv1.ClusterWorkersAvailableV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: "Foo", + }, + { + Type: clusterv1.ClusterRemoteConnectionProbeV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: "Foo", + }, + { + Type: clusterv1.ClusterDeletingV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: "Foo", + }, + { + Type: "MyClusterClassAvailabilityGate", + Status: metav1.ConditionFalse, + Reason: "SomeReason", + Message: "Some message", + }, + }, + }, + }, + }, + clusterClass: &clusterv1.ClusterClass{ + Spec: clusterv1.ClusterClassSpec{ + AvailabilityGates: []clusterv1.ClusterAvailabilityGate{ + { + ConditionType: "MyClusterClassAvailabilityGate", + }, + }, + }, + }, + expectCondition: metav1.Condition{ + Type: clusterv1.ClusterAvailableV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: clusterv1.ClusterNotAvailableV1Beta2Reason, + Message: "* MyClusterClassAvailabilityGate: Some message", + }, + }, { name: "Tolerates InfraCluster and ControlPlane do not exists while the cluster is deleting", cluster: &clusterv1.Cluster{ @@ -2705,7 +2776,7 @@ func TestSetAvailableCondition(t *testing.T) { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - setAvailableCondition(ctx, tc.cluster) + setAvailableCondition(ctx, tc.cluster, tc.clusterClass) condition := v1beta2conditions.Get(tc.cluster, clusterv1.ClusterAvailableV1Beta2Condition) g.Expect(condition).ToNot(BeNil()) diff --git a/internal/controllers/topology/cluster/patches/engine.go b/internal/controllers/topology/cluster/patches/engine.go index 10633db0e027..ed78569e9aba 100644 --- a/internal/controllers/topology/cluster/patches/engine.go +++ b/internal/controllers/topology/cluster/patches/engine.go @@ -527,6 +527,7 @@ func updateDesiredState(ctx context.Context, req *runtimehooksv1.GeneratePatches } if err := patchObject(ctx, desired.ControlPlane.Object, controlPlaneTemplate, PreserveFields{ contract.ControlPlane().MachineTemplate().Metadata().Path(), + contract.ControlPlane().MachineTemplate().ReadinessGates().Path(), contract.ControlPlane().MachineTemplate().InfrastructureRef().Path(), contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Path(), contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeout().Path(), diff --git a/util/test/builder/builders.go b/util/test/builder/builders.go index 762b418fe835..91f843040787 100644 --- a/util/test/builder/builders.go +++ b/util/test/builder/builders.go @@ -339,6 +339,7 @@ type ClusterClassBuilder struct { name string infrastructureClusterTemplate *unstructured.Unstructured controlPlaneMetadata *clusterv1.ObjectMeta + controlPlaneReadinessGates []clusterv1.MachineReadinessGate controlPlaneTemplate *unstructured.Unstructured controlPlaneInfrastructureMachineTemplate *unstructured.Unstructured controlPlaneMHC *clusterv1.MachineHealthCheckClass @@ -383,6 +384,12 @@ func (c *ClusterClassBuilder) WithControlPlaneMetadata(labels, annotations map[s return c } +// WithControlPlaneReadinessGates adds the given readinessGates for use with the ControlPlane to the ClusterClassBuilder. +func (c *ClusterClassBuilder) WithControlPlaneReadinessGates(readinessGates []clusterv1.MachineReadinessGate) *ClusterClassBuilder { + c.controlPlaneReadinessGates = readinessGates + return c +} + // WithControlPlaneInfrastructureMachineTemplate adds the ControlPlane's InfrastructureMachineTemplate to the ClusterClassBuilder. func (c *ClusterClassBuilder) WithControlPlaneInfrastructureMachineTemplate(t *unstructured.Unstructured) *ClusterClassBuilder { c.controlPlaneInfrastructureMachineTemplate = t @@ -489,6 +496,9 @@ func (c *ClusterClassBuilder) Build() *clusterv1.ClusterClass { if c.controlPlaneMetadata != nil { obj.Spec.ControlPlane.Metadata = *c.controlPlaneMetadata } + if c.controlPlaneReadinessGates != nil { + obj.Spec.ControlPlane.ReadinessGates = c.controlPlaneReadinessGates + } if c.controlPlaneTemplate != nil { obj.Spec.ControlPlane.LocalObjectTemplate = clusterv1.LocalObjectTemplate{ Ref: objToRef(c.controlPlaneTemplate), @@ -528,6 +538,7 @@ type MachineDeploymentClassBuilder struct { labels map[string]string annotations map[string]string machineHealthCheckClass *clusterv1.MachineHealthCheckClass + readinessGates []clusterv1.MachineReadinessGate failureDomain *string nodeDrainTimeout *metav1.Duration nodeVolumeDetachTimeout *metav1.Duration @@ -574,6 +585,12 @@ func (m *MachineDeploymentClassBuilder) WithMachineHealthCheckClass(mhc *cluster return m } +// WithReadinessGates sets the readinessGates for the MachineDeploymentClassBuilder. +func (m *MachineDeploymentClassBuilder) WithReadinessGates(readinessGates []clusterv1.MachineReadinessGate) *MachineDeploymentClassBuilder { + m.readinessGates = readinessGates + return m +} + // WithFailureDomain sets the FailureDomain for the MachineDeploymentClassBuilder. func (m *MachineDeploymentClassBuilder) WithFailureDomain(f *string) *MachineDeploymentClassBuilder { m.failureDomain = f @@ -636,6 +653,9 @@ func (m *MachineDeploymentClassBuilder) Build() *clusterv1.MachineDeploymentClas if m.machineHealthCheckClass != nil { obj.MachineHealthCheck = m.machineHealthCheckClass } + if m.readinessGates != nil { + obj.ReadinessGates = m.readinessGates + } if m.failureDomain != nil { obj.FailureDomain = m.failureDomain } diff --git a/util/test/builder/zz_generated.deepcopy.go b/util/test/builder/zz_generated.deepcopy.go index 696a9b99ee24..1a7ed63726f7 100644 --- a/util/test/builder/zz_generated.deepcopy.go +++ b/util/test/builder/zz_generated.deepcopy.go @@ -125,6 +125,11 @@ func (in *ClusterClassBuilder) DeepCopyInto(out *ClusterClassBuilder) { *out = new(v1beta1.ObjectMeta) (*in).DeepCopyInto(*out) } + if in.controlPlaneReadinessGates != nil { + in, out := &in.controlPlaneReadinessGates, &out.controlPlaneReadinessGates + *out = make([]v1beta1.MachineReadinessGate, len(*in)) + copy(*out, *in) + } if in.controlPlaneTemplate != nil { in, out := &in.controlPlaneTemplate, &out.controlPlaneTemplate *out = (*in).DeepCopy() @@ -512,6 +517,11 @@ func (in *MachineDeploymentClassBuilder) DeepCopyInto(out *MachineDeploymentClas *out = new(v1beta1.MachineHealthCheckClass) (*in).DeepCopyInto(*out) } + if in.readinessGates != nil { + in, out := &in.readinessGates, &out.readinessGates + *out = make([]v1beta1.MachineReadinessGate, len(*in)) + copy(*out, *in) + } if in.failureDomain != nil { in, out := &in.failureDomain, &out.failureDomain *out = new(string)