Skip to content

Commit 83036e1

Browse files
author
pweikai
committed
add nodedraintimeout to v1beta1 api type
1 parent bf89e4b commit 83036e1

File tree

8 files changed

+77
-15
lines changed

8 files changed

+77
-15
lines changed

api/v1alpha4/conversion.go

+11
Original file line numberDiff line numberDiff line change
@@ -42,13 +42,19 @@ func (src *Cluster) ConvertTo(dstRaw conversion.Hub) error {
4242
dst.Spec.Topology = &clusterv1.Topology{}
4343
}
4444
dst.Spec.Topology.Variables = restored.Spec.Topology.Variables
45+
46+
if restored.Spec.Topology.ControlPlane.NodeDrainTimeout != nil {
47+
dst.Spec.Topology.ControlPlane.NodeDrainTimeout = restored.Spec.Topology.ControlPlane.NodeDrainTimeout
48+
}
49+
4550
if restored.Spec.Topology.Workers != nil {
4651
if dst.Spec.Topology.Workers == nil {
4752
dst.Spec.Topology.Workers = &clusterv1.WorkersTopology{}
4853
}
4954
for i := range restored.Spec.Topology.Workers.MachineDeployments {
5055
dst.Spec.Topology.Workers.MachineDeployments[i].FailureDomain = restored.Spec.Topology.Workers.MachineDeployments[i].FailureDomain
5156
dst.Spec.Topology.Workers.MachineDeployments[i].Variables = restored.Spec.Topology.Workers.MachineDeployments[i].Variables
57+
dst.Spec.Topology.Workers.MachineDeployments[i].NodeDrainTimeout = restored.Spec.Topology.Workers.MachineDeployments[i].NodeDrainTimeout
5258
}
5359
}
5460
}
@@ -317,3 +323,8 @@ func Convert_v1beta1_ControlPlaneClass_To_v1alpha4_ControlPlaneClass(in *cluster
317323
// controlPlaneClass.machineHealthCheck has been added with v1beta1.
318324
return autoConvert_v1beta1_ControlPlaneClass_To_v1alpha4_ControlPlaneClass(in, out, s)
319325
}
326+
327+
func Convert_v1beta1_ControlPlaneTopology_To_v1alpha4_ControlPlaneTopology(in *clusterv1.ControlPlaneTopology, out *ControlPlaneTopology, s apiconversion.Scope) error {
328+
// controlPlaneTopology.nodeDrainTimeout has been added with v1beta1.
329+
return autoConvert_v1beta1_ControlPlaneTopology_To_v1alpha4_ControlPlaneTopology(in, out, s)
330+
}

api/v1alpha4/zz_generated.conversion.go

+7-10
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

api/v1beta1/cluster_types.go

+12
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,12 @@ type ControlPlaneTopology struct {
115115
// When specified against a control plane provider that lacks support for this field, this value will be ignored.
116116
// +optional
117117
Replicas *int32 `json:"replicas,omitempty"`
118+
119+
// NodeDrainTimeout is the total amount of time that the controller will spend on draining a node.
120+
// The default value is 0, meaning that the node can be drained without any time limitations.
121+
// NOTE: NodeDrainTimeout is different from `kubectl drain --timeout`
122+
// +optional
123+
NodeDrainTimeout *metav1.Duration `json:"nodeDrainTimeout,omitempty"`
118124
}
119125

120126
// WorkersTopology represents the different sets of worker nodes in the cluster.
@@ -155,6 +161,12 @@ type MachineDeploymentTopology struct {
155161
// +optional
156162
Replicas *int32 `json:"replicas,omitempty"`
157163

164+
// NodeDrainTimeout is the total amount of time that the controller will spend on draining a node.
165+
// The default value is 0, meaning that the node can be drained without any time limitations.
166+
// NOTE: NodeDrainTimeout is different from `kubectl drain --timeout`
167+
// +optional
168+
NodeDrainTimeout *metav1.Duration `json:"nodeDrainTimeout,omitempty"`
169+
158170
// Variables can be used to customize the MachineDeployment through patches.
159171
// +optional
160172
Variables *MachineDeploymentVariables `json:"variables,omitempty"`

api/v1beta1/zz_generated.deepcopy.go

+10
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

config/crd/bases/cluster.x-k8s.io_clusters.yaml

+15
Original file line numberDiff line numberDiff line change
@@ -857,6 +857,13 @@ spec:
857857
More info: http://kubernetes.io/docs/user-guide/labels'
858858
type: object
859859
type: object
860+
nodeDrainTimeout:
861+
description: 'NodeDrainTimeout is the total amount of time
862+
that the controller will spend on draining a node. The default
863+
value is 0, meaning that the node can be drained without
864+
any time limitations. NOTE: NodeDrainTimeout is different
865+
from `kubectl drain --timeout`'
866+
type: string
860867
replicas:
861868
description: Replicas is the number of control plane nodes.
862869
If the value is nil, the ControlPlane object is created
@@ -959,6 +966,14 @@ spec:
959966
is greater than the allowed maximum length, the values
960967
are hashed together.
961968
type: string
969+
nodeDrainTimeout:
970+
description: 'NodeDrainTimeout is the total amount of
971+
time that the controller will spend on draining a
972+
node. The default value is 0, meaning that the node
973+
can be drained without any time limitations. NOTE:
974+
NodeDrainTimeout is different from `kubectl drain
975+
--timeout`'
976+
type: string
962977
replicas:
963978
description: Replicas is the number of worker nodes
964979
belonging to this set. If the value is nil, the MachineDeployment

internal/controllers/topology/cluster/desired_state.go

+8
Original file line numberDiff line numberDiff line change
@@ -212,6 +212,13 @@ func computeControlPlane(_ context.Context, s *scope.Scope, infrastructureMachin
212212
}
213213
}
214214

215+
// If it is required to manage the NodeDrainTimeout for the control plane, set the corresponding field.
216+
if s.Blueprint.Topology.ControlPlane.NodeDrainTimeout != nil {
217+
if err := contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Set(controlPlane, *s.Blueprint.Topology.ControlPlane.NodeDrainTimeout); err != nil {
218+
return nil, errors.Wrap(err, "failed to set spec.machineTemplate.nodeDrainTimeout in the ControlPlane object")
219+
}
220+
}
221+
215222
// Sets the desired Kubernetes version for the control plane.
216223
version, err := computeControlPlaneVersion(s)
217224
if err != nil {
@@ -446,6 +453,7 @@ func computeMachineDeployment(_ context.Context, s *scope.Scope, desiredControlP
446453
Bootstrap: clusterv1.Bootstrap{ConfigRef: contract.ObjToRef(desiredMachineDeployment.BootstrapTemplate)},
447454
InfrastructureRef: *contract.ObjToRef(desiredMachineDeployment.InfrastructureMachineTemplate),
448455
FailureDomain: machineDeploymentTopology.FailureDomain,
456+
NodeDrainTimeout: machineDeploymentTopology.NodeDrainTimeout,
449457
},
450458
},
451459
},

internal/controllers/topology/cluster/desired_state_test.go

+13-5
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ limitations under the License.
1717
package cluster
1818

1919
import (
20+
"fmt"
2021
"strings"
2122
"testing"
2223
"time"
@@ -234,6 +235,8 @@ func TestComputeControlPlane(t *testing.T) {
234235
// current cluster objects
235236
version := "v1.21.2"
236237
replicas := int32(3)
238+
duration := 10 * time.Second
239+
nodeDrainTimeout := metav1.Duration{Duration: duration}
237240
cluster := &clusterv1.Cluster{
238241
ObjectMeta: metav1.ObjectMeta{
239242
Name: "cluster1",
@@ -247,7 +250,8 @@ func TestComputeControlPlane(t *testing.T) {
247250
Labels: map[string]string{"l2": ""},
248251
Annotations: map[string]string{"a2": ""},
249252
},
250-
Replicas: &replicas,
253+
Replicas: &replicas,
254+
NodeDrainTimeout: &nodeDrainTimeout,
251255
},
252256
},
253257
},
@@ -282,6 +286,7 @@ func TestComputeControlPlane(t *testing.T) {
282286

283287
assertNestedField(g, obj, version, contract.ControlPlane().Version().Path()...)
284288
assertNestedField(g, obj, int64(replicas), contract.ControlPlane().Replicas().Path()...)
289+
assertNestedField(g, obj, fmt.Sprintf("%q", duration), contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Path()...)
285290
assertNestedFieldUnset(g, obj, contract.ControlPlane().MachineTemplate().InfrastructureRef().Path()...)
286291

287292
// Ensure no ownership is added to generated ControlPlane.
@@ -750,14 +755,16 @@ func TestComputeMachineDeployment(t *testing.T) {
750755

751756
replicas := int32(5)
752757
failureDomain := "always-up-region"
758+
nodeDrainTimeout := metav1.Duration{Duration: 10 * time.Second}
753759
mdTopology := clusterv1.MachineDeploymentTopology{
754760
Metadata: clusterv1.ObjectMeta{
755761
Labels: map[string]string{"foo": "baz"},
756762
},
757-
Class: "linux-worker",
758-
Name: "big-pool-of-machines",
759-
Replicas: &replicas,
760-
FailureDomain: &failureDomain,
763+
Class: "linux-worker",
764+
Name: "big-pool-of-machines",
765+
Replicas: &replicas,
766+
FailureDomain: &failureDomain,
767+
NodeDrainTimeout: &nodeDrainTimeout,
761768
}
762769

763770
t.Run("Generates the machine deployment and the referenced templates", func(t *testing.T) {
@@ -785,6 +792,7 @@ func TestComputeMachineDeployment(t *testing.T) {
785792
actualMd := actual.Object
786793
g.Expect(*actualMd.Spec.Replicas).To(Equal(replicas))
787794
g.Expect(*actualMd.Spec.Template.Spec.FailureDomain).To(Equal(failureDomain))
795+
g.Expect(*actualMd.Spec.Template.Spec.NodeDrainTimeout).To(Equal(nodeDrainTimeout))
788796
g.Expect(actualMd.Spec.ClusterName).To(Equal("cluster1"))
789797
g.Expect(actualMd.Name).To(ContainSubstring("cluster1"))
790798
g.Expect(actualMd.Name).To(ContainSubstring("big-pool-of-machines"))

internal/controllers/topology/cluster/patches/engine.go

+1
Original file line numberDiff line numberDiff line change
@@ -326,6 +326,7 @@ func updateDesiredState(ctx context.Context, req *api.GenerateRequest, blueprint
326326
if err := patchObject(ctx, desired.ControlPlane.Object, controlPlaneTemplate, PreserveFields{
327327
contract.ControlPlane().MachineTemplate().Metadata().Path(),
328328
contract.ControlPlane().MachineTemplate().InfrastructureRef().Path(),
329+
contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Path(),
329330
contract.ControlPlane().Replicas().Path(),
330331
contract.ControlPlane().Version().Path(),
331332
}); err != nil {

0 commit comments

Comments
 (0)