|
| 1 | +/* |
| 2 | +Copyright 2019 The Kubernetes Authors. |
| 3 | +
|
| 4 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | +you may not use this file except in compliance with the License. |
| 6 | +You may obtain a copy of the License at |
| 7 | +
|
| 8 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | +
|
| 10 | +Unless required by applicable law or agreed to in writing, software |
| 11 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | +See the License for the specific language governing permissions and |
| 14 | +limitations under the License. |
| 15 | +*/ |
| 16 | + |
| 17 | +package controllers |
| 18 | + |
| 19 | +import ( |
| 20 | + "context" |
| 21 | + "fmt" |
| 22 | + |
| 23 | + "github.com/pkg/errors" |
| 24 | + corev1 "k8s.io/api/core/v1" |
| 25 | + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" |
| 26 | + "sigs.k8s.io/cluster-api/controllers/noderefutil" |
| 27 | + "sigs.k8s.io/cluster-api/util" |
| 28 | + "sigs.k8s.io/cluster-api/util/conditions" |
| 29 | + "sigs.k8s.io/controller-runtime/pkg/client" |
| 30 | + "sigs.k8s.io/controller-runtime/pkg/reconcile" |
| 31 | +) |
| 32 | + |
| 33 | +var ( |
| 34 | + ErrNodeNotFound = errors.New("cannot find node with matching ProviderID") |
| 35 | +) |
| 36 | + |
| 37 | +func (r *MachineReconciler) reconcileNode(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) (reconcile.Result, error) { |
| 38 | + logger := r.Log.WithValues("machine", machine.Name, "namespace", machine.Namespace) |
| 39 | + |
| 40 | + // Check that the Machine has a valid ProviderID. |
| 41 | + if machine.Spec.ProviderID == nil || *machine.Spec.ProviderID == "" { |
| 42 | + logger.Info("Cannot reconcile node, the machine doesn't have a valid ProviderID yet") |
| 43 | + conditions.MarkFalse(machine, clusterv1.MachineNodeHealthyCondition, clusterv1.WaitingForNodeRefReason, clusterv1.ConditionSeverityInfo, "") |
| 44 | + return reconcile.Result{}, nil |
| 45 | + } |
| 46 | + |
| 47 | + providerID, err := noderefutil.NewProviderID(*machine.Spec.ProviderID) |
| 48 | + if err != nil { |
| 49 | + return reconcile.Result{}, err |
| 50 | + } |
| 51 | + |
| 52 | + remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster)) |
| 53 | + if err != nil { |
| 54 | + return reconcile.Result{}, err |
| 55 | + } |
| 56 | + |
| 57 | + // Even if Status.NodeRef exists, continue to do the following checks to make sure Node is healthy |
| 58 | + node, err := r.getNode(remoteClient, providerID) |
| 59 | + if err != nil { |
| 60 | + if err == ErrNodeNotFound { |
| 61 | + // While a NodeRef is set in the status, failing to get that node means the node is deleted. |
| 62 | + // If Status.NodeRef is not set before, node still can be in the provisioning state. |
| 63 | + if machine.Status.NodeRef != nil { |
| 64 | + conditions.MarkFalse(machine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeNotFoundReason, clusterv1.ConditionSeverityError, "") |
| 65 | + return reconcile.Result{}, errors.Wrapf(err, "no matching Node for Machine %q in namespace %q", machine.Name, machine.Namespace) |
| 66 | + } |
| 67 | + conditions.MarkFalse(machine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeProvisioningReason, clusterv1.ConditionSeverityWarning, "") |
| 68 | + return reconcile.Result{Requeue: true}, nil |
| 69 | + } |
| 70 | + logger.Error(err, "Failed to retrieve Node by ProviderID") |
| 71 | + r.recorder.Event(machine, corev1.EventTypeWarning, "Failed to retrieve Node by ProviderID", err.Error()) |
| 72 | + return reconcile.Result{}, err |
| 73 | + } |
| 74 | + |
| 75 | + // Set the Machine NodeRef. |
| 76 | + if machine.Status.NodeRef == nil { |
| 77 | + machine.Status.NodeRef = &corev1.ObjectReference{ |
| 78 | + Kind: node.Kind, |
| 79 | + APIVersion: node.APIVersion, |
| 80 | + Name: node.Name, |
| 81 | + UID: node.UID, |
| 82 | + } |
| 83 | + logger.Info("Set Machine's NodeRef", "noderef", machine.Status.NodeRef.Name) |
| 84 | + r.recorder.Event(machine, corev1.EventTypeNormal, "SuccessfulSetNodeRef", machine.Status.NodeRef.Name) |
| 85 | + } |
| 86 | + |
| 87 | + // Do the remaining node health checks, then set the node health to true if all checks pass. |
| 88 | + status, message := summarizeNodeConditions(node) |
| 89 | + if status == corev1.ConditionFalse { |
| 90 | + conditions.MarkFalse(machine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeConditionsFailedReason, clusterv1.ConditionSeverityWarning, message) |
| 91 | + return reconcile.Result{}, nil |
| 92 | + } |
| 93 | + |
| 94 | + conditions.MarkTrue(machine, clusterv1.MachineNodeHealthyCondition) |
| 95 | + return reconcile.Result{}, nil |
| 96 | +} |
| 97 | + |
| 98 | +// summarizeNodeConditions summarizes a Node's conditions and returns the summary of condition statuses and concatenate failed condition messages: |
| 99 | +// if there is at least 1 semantically-negative condition, summarized status = False; |
| 100 | +// if there is at least 1 semantically-positive condition when there is 0 semantically negative condition, summarized status = True; |
| 101 | +// if all conditions are unknown, summarized status = Unknown. |
| 102 | +// (semantically true conditions: NodeMemoryPressure/NodeDiskPressure/NodePIDPressure == false or Ready == true.) |
| 103 | +func summarizeNodeConditions(node *corev1.Node) (corev1.ConditionStatus, string) { |
| 104 | + totalNumOfConditionsChecked := 4 |
| 105 | + semanticallyFalseStatus := 0 |
| 106 | + unknownStatus := 0 |
| 107 | + |
| 108 | + message := "" |
| 109 | + for _, condition := range node.Status.Conditions { |
| 110 | + switch condition.Type { |
| 111 | + case corev1.NodeMemoryPressure, corev1.NodeDiskPressure, corev1.NodePIDPressure: |
| 112 | + if condition.Status != corev1.ConditionFalse { |
| 113 | + message += fmt.Sprintf("Condition %s is %s", condition.Type, condition.Status) + ". " |
| 114 | + if condition.Status == corev1.ConditionUnknown { |
| 115 | + unknownStatus++ |
| 116 | + continue |
| 117 | + } |
| 118 | + semanticallyFalseStatus++ |
| 119 | + } |
| 120 | + case corev1.NodeReady: |
| 121 | + if condition.Status != corev1.ConditionTrue { |
| 122 | + message += fmt.Sprintf("Condition %s is %s", condition.Type, condition.Status) + ". " |
| 123 | + if condition.Status == corev1.ConditionUnknown { |
| 124 | + unknownStatus++ |
| 125 | + continue |
| 126 | + } |
| 127 | + semanticallyFalseStatus++ |
| 128 | + } |
| 129 | + } |
| 130 | + } |
| 131 | + if semanticallyFalseStatus > 0 { |
| 132 | + return corev1.ConditionFalse, message |
| 133 | + } |
| 134 | + if semanticallyFalseStatus+unknownStatus < totalNumOfConditionsChecked { |
| 135 | + return corev1.ConditionTrue, message |
| 136 | + } |
| 137 | + return corev1.ConditionUnknown, message |
| 138 | +} |
| 139 | + |
| 140 | +func (r *MachineReconciler) getNode(c client.Reader, providerID *noderefutil.ProviderID) (*corev1.Node, error) { |
| 141 | + logger := r.Log.WithValues("providerID", providerID) |
| 142 | + |
| 143 | + nodeList := corev1.NodeList{} |
| 144 | + for { |
| 145 | + if err := c.List(context.TODO(), &nodeList, client.Continue(nodeList.Continue)); err != nil { |
| 146 | + return nil, err |
| 147 | + } |
| 148 | + |
| 149 | + for _, node := range nodeList.Items { |
| 150 | + nodeProviderID, err := noderefutil.NewProviderID(node.Spec.ProviderID) |
| 151 | + if err != nil { |
| 152 | + logger.Error(err, "Failed to parse ProviderID", "node", node.Name) |
| 153 | + continue |
| 154 | + } |
| 155 | + |
| 156 | + if providerID.Equals(nodeProviderID) { |
| 157 | + return &node, nil |
| 158 | + } |
| 159 | + } |
| 160 | + |
| 161 | + if nodeList.Continue == "" { |
| 162 | + break |
| 163 | + } |
| 164 | + } |
| 165 | + return nil, ErrNodeNotFound |
| 166 | +} |
0 commit comments