Skip to content

Commit 451c720

Browse files
committed
Feedback #3
1 parent f4409bf commit 451c720

5 files changed

+23
-49
lines changed

api/v1alpha3/machinehealthcheck_webhook.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ func (m *MachineHealthCheck) validate(old *MachineHealthCheck) error {
102102
if m.Spec.NodeStartupTimeout != nil && m.Spec.NodeStartupTimeout.Seconds() < 30 {
103103
allErrs = append(
104104
allErrs,
105-
field.Invalid(field.NewPath("spec", "nodeStartupTimeout"), m.Spec.NodeStartupTimeout, "must be greater at least 30s"),
105+
field.Invalid(field.NewPath("spec", "nodeStartupTimeout"), m.Spec.NodeStartupTimeout, "must be at least 30s"),
106106
)
107107
}
108108

controllers/machinehealthcheck_controller.go

+1-20
Original file line numberDiff line numberDiff line change
@@ -303,26 +303,7 @@ func (r *MachineHealthCheckReconciler) nodeToMachineHealthCheck(o handler.MapObj
303303
return nil
304304
}
305305

306-
mhcList := &clusterv1.MachineHealthCheckList{}
307-
if err := r.Client.List(
308-
context.Background(),
309-
mhcList,
310-
&client.ListOptions{Namespace: machine.Namespace},
311-
client.MatchingFields{mhcClusterNameIndex: machine.Spec.ClusterName},
312-
); err != nil {
313-
r.Log.Error(err, "Unable to list MachineHealthChecks", "node", node.Name, "machine", machine.Name, "namespace", machine.Namespace)
314-
return nil
315-
}
316-
317-
var requests []reconcile.Request
318-
for k := range mhcList.Items {
319-
mhc := &mhcList.Items[k]
320-
if hasMatchingLabels(mhc.Spec.Selector, machine.Labels) {
321-
key := util.ObjectKey(mhc)
322-
requests = append(requests, reconcile.Request{NamespacedName: key})
323-
}
324-
}
325-
return requests
306+
return r.machineToMachineHealthCheck(handler.MapObject{Object: machine})
326307
}
327308

328309
func (r *MachineHealthCheckReconciler) getMachineFromNode(nodeName string) (*clusterv1.Machine, error) {

controllers/machinehealthcheck_controller_test.go

+10-19
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ import (
3333
"k8s.io/client-go/kubernetes/scheme"
3434
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
3535
"sigs.k8s.io/cluster-api/controllers/external"
36+
"sigs.k8s.io/cluster-api/util"
3637
"sigs.k8s.io/controller-runtime/pkg/client"
3738
"sigs.k8s.io/controller-runtime/pkg/controller"
3839
"sigs.k8s.io/controller-runtime/pkg/envtest"
@@ -114,7 +115,7 @@ var _ = Describe("MachineHealthCheck Reconciler", func() {
114115

115116
Eventually(func() map[string]string {
116117
mhc := &clusterv1.MachineHealthCheck{}
117-
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: mhcToCreate.GetNamespace(), Name: mhcToCreate.GetName()}, mhc)
118+
err := k8sClient.Get(ctx, util.ObjectKey(mhcToCreate), mhc)
118119
if err != nil {
119120
return nil
120121
}
@@ -165,7 +166,7 @@ var _ = Describe("MachineHealthCheck Reconciler", func() {
165166

166167
Eventually(func() []metav1.OwnerReference {
167168
mhc := &clusterv1.MachineHealthCheck{}
168-
err := k8sClient.Get(ctx, client.ObjectKey{Namespace: mhcToCreate.GetNamespace(), Name: mhcToCreate.GetName()}, mhc)
169+
err := k8sClient.Get(ctx, util.ObjectKey(mhcToCreate), mhc)
169170
if err != nil {
170171
return []metav1.OwnerReference{}
171172
}
@@ -207,7 +208,7 @@ func cleanupTestMachineHealthChecks(ctx context.Context, c client.Client) error
207208
func ownerReferenceForCluster(ctx context.Context, c *clusterv1.Cluster) metav1.OwnerReference {
208209
// Fetch the cluster to populate the UID
209210
cc := &clusterv1.Cluster{}
210-
Expect(k8sClient.Get(ctx, client.ObjectKey{Namespace: c.GetNamespace(), Name: c.GetName()}, cc)).To(Succeed())
211+
Expect(k8sClient.Get(ctx, util.ObjectKey(c), cc)).To(Succeed())
211212

212213
return metav1.OwnerReference{
213214
APIVersion: clusterv1.GroupVersion.String(),
@@ -341,11 +342,8 @@ func TestClusterToMachineHealthCheck(t *testing.T) {
341342
gs.Expect(r.Client.Delete(ctx, &o)).To(Succeed())
342343
}()
343344
// Check the cache is populated
344-
key, err := client.ObjectKeyFromObject(&o)
345-
gs.Expect(err).ToNot(HaveOccurred())
346-
347345
getObj := func() error {
348-
return r.Client.Get(ctx, key, &clusterv1.MachineHealthCheck{})
346+
return r.Client.Get(ctx, util.ObjectKey(&o), &clusterv1.MachineHealthCheck{})
349347
}
350348
gs.Eventually(getObj, timeout).Should(Succeed())
351349
}
@@ -539,11 +537,8 @@ func TestMachineToMachineHealthCheck(t *testing.T) {
539537
gs.Expect(r.Client.Delete(ctx, &o)).To(Succeed())
540538
}()
541539
// Check the cache is populated
542-
key, err := client.ObjectKeyFromObject(&o)
543-
gs.Expect(err).ToNot(HaveOccurred())
544-
545540
getObj := func() error {
546-
return r.Client.Get(ctx, key, &clusterv1.MachineHealthCheck{})
541+
return r.Client.Get(ctx, util.ObjectKey(&o), &clusterv1.MachineHealthCheck{})
547542
}
548543
gs.Eventually(getObj, timeout).Should(Succeed())
549544
}
@@ -675,7 +670,7 @@ func TestNodeToMachineHealthCheck(t *testing.T) {
675670
expected: []reconcile.Request{mhc1Req},
676671
},
677672
{
678-
name: "when two NachineHealthChecks exist for the Node in the Machine's namespace",
673+
name: "when two MachineHealthChecks exist for the Node in the Machine's namespace",
679674
mhcToCreate: []clusterv1.MachineHealthCheck{*mhc1, *mhc2},
680675
mToCreate: []clusterv1.Machine{*machine1},
681676
object: handler.MapObject{
@@ -706,9 +701,7 @@ func TestNodeToMachineHealthCheck(t *testing.T) {
706701
gs.Expect(r.Client.Delete(ctx, &o)).To(Succeed())
707702
}()
708703
// Check the cache is populated
709-
key, err := client.ObjectKeyFromObject(&o)
710-
gs.Expect(err).ToNot(HaveOccurred())
711-
704+
key := util.ObjectKey(&o)
712705
getObj := func() error {
713706
return r.Client.Get(ctx, key, &clusterv1.MachineHealthCheck{})
714707
}
@@ -720,14 +713,12 @@ func TestNodeToMachineHealthCheck(t *testing.T) {
720713
defer func() {
721714
gs.Expect(r.Client.Delete(ctx, &o)).To(Succeed())
722715
}()
723-
// Ensure the status is set (required for matchin node to machine)
716+
// Ensure the status is set (required for matching node to machine)
724717
o.Status = obj.Status
725718
gs.Expect(r.Client.Status().Update(ctx, &o)).To(Succeed())
726719

727720
// Check the cache is up to date with the status update
728-
key, err := client.ObjectKeyFromObject(&o)
729-
gs.Expect(err).ToNot(HaveOccurred())
730-
721+
key := util.ObjectKey(&o)
731722
checkStatus := func() clusterv1.MachineStatus {
732723
m := &clusterv1.Machine{}
733724
err := r.Client.Get(ctx, key, m)

controllers/machinehealthcheck_targets.go

+6-8
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ func (t *healthCheckTarget) nodeName() string {
6363
}
6464

6565
// Determine whether or not a given target needs remediation.
66-
// The node will be need rememdiation if any of the following are true:
66+
// The node will need remediation if any of the following are true:
6767
// - The Machine has failed for some reason
6868
// - The Machine did not get a node before `timeoutForMachineToHaveNode` elapses
6969
// - The Node has gone away
@@ -103,7 +103,6 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi
103103

104104
// check conditions
105105
for _, c := range t.MHC.Spec.UnhealthyConditions {
106-
now := time.Now()
107106
nodeCondition := getNodeCondition(t.Node, c.Type)
108107

109108
// Skip when current node condition is different from the one reported
@@ -165,7 +164,7 @@ func (r *MachineHealthCheckReconciler) getTargetsFromMHC(clusterClient client.Cl
165164
func (r *MachineHealthCheckReconciler) getMachinesFromMHC(mhc *clusterv1.MachineHealthCheck) ([]clusterv1.Machine, error) {
166165
selector, err := metav1.LabelSelectorAsSelector(&mhc.Spec.Selector)
167166
if err != nil {
168-
return nil, errors.New("failed to build selector")
167+
return nil, errors.Wrap(err, "failed to build selector")
169168
}
170169

171170
options := client.ListOptions{
@@ -188,8 +187,7 @@ func (r *MachineHealthCheckReconciler) getNodeFromMachine(clusterClient client.C
188187

189188
node := &corev1.Node{}
190189
nodeKey := types.NamespacedName{
191-
Namespace: machine.Status.NodeRef.Namespace,
192-
Name: machine.Status.NodeRef.Name,
190+
Name: machine.Status.NodeRef.Name,
193191
}
194192
err := clusterClient.Get(context.TODO(), nodeKey, node)
195193
return node, err
@@ -248,9 +246,9 @@ func minDuration(durations []time.Duration) time.Duration {
248246
return time.Duration(0)
249247
}
250248

251-
// durations should all be less than 1 Hour
252-
minDuration := time.Hour
253-
for _, nc := range durations {
249+
minDuration := durations[0]
250+
// Ignore first element as that is already minDuration
251+
for _, nc := range durations[1:] {
254252
if nc < minDuration {
255253
minDuration = nc
256254
}

controllers/machinehealthcheck_targets_test.go

+5-1
Original file line numberDiff line numberDiff line change
@@ -231,7 +231,7 @@ func TestHealthCheckTargets(t *testing.T) {
231231
Node: testNodeUnknown400,
232232
}
233233

234-
// Traget for when a node is healthy
234+
// Target for when a node is healthy
235235
testNodeHealthy := newTestNode("node1")
236236
testNodeHealthy.UID = "12345"
237237
nodeHealthy := healthCheckTarget{
@@ -355,6 +355,10 @@ func newTestMachine(name, namespace, clusterName, nodeName string, labels map[st
355355

356356
func newTestNode(name string) *corev1.Node {
357357
return &corev1.Node{
358+
TypeMeta: metav1.TypeMeta{
359+
APIVersion: "v1",
360+
Kind: "Node",
361+
},
358362
ObjectMeta: metav1.ObjectMeta{
359363
Name: name,
360364
},

0 commit comments

Comments
 (0)