@@ -198,8 +198,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re
198
198
return ctrl.Result {}, err
199
199
}
200
200
201
- log := ctrl .LoggerFrom (ctx ).WithValues ("Cluster" , klog .KRef (m .Namespace , m .Spec .ClusterName ))
202
- ctx = ctrl .LoggerInto (ctx , log )
201
+ ctx = ctrl .LoggerInto (ctx , ctrl .LoggerFrom (ctx ).WithValues ("Cluster" , klog .KRef (m .Namespace , m .Spec .ClusterName )))
203
202
204
203
// Add finalizer first if not set to avoid the race condition between init and delete.
205
204
if finalizerAdded , err := finalizers .EnsureFinalizer (ctx , r .Client , m , clusterv1 .MachineFinalizer ); err != nil || finalizerAdded {
@@ -208,7 +207,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re
208
207
209
208
// AddOwners adds the owners of Machine as k/v pairs to the logger.
210
209
// Specifically, it will add KubeadmControlPlane, MachineSet and MachineDeployment.
211
- ctx , log , err := clog .AddOwners (ctx , r .Client , m )
210
+ ctx , _ , err := clog .AddOwners (ctx , r .Client , m )
212
211
if err != nil {
213
212
return ctrl.Result {}, err
214
213
}
@@ -273,23 +272,11 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re
273
272
r .reconcileDelete ,
274
273
)
275
274
276
- res , err := doReconcile (ctx , reconcileDelete , s )
277
- // Requeue if the reconcile failed because connection to workload cluster was down.
278
- if errors .Is (err , clustercache .ErrClusterNotConnected ) {
279
- log .V (5 ).Info ("Requeuing because connection to the workload cluster is down" )
280
- return ctrl.Result {RequeueAfter : time .Minute }, nil
281
- }
282
- return res , err
275
+ return doReconcile (ctx , reconcileDelete , s )
283
276
}
284
277
285
278
// Handle normal reconciliation loop.
286
- res , err := doReconcile (ctx , alwaysReconcile , s )
287
- // Requeue if the reconcile failed because connection to workload cluster was down.
288
- if errors .Is (err , clustercache .ErrClusterNotConnected ) {
289
- log .V (5 ).Info ("Requeuing because connection to the workload cluster is down" )
290
- return ctrl.Result {RequeueAfter : time .Minute }, nil
291
- }
292
- return res , err
279
+ return doReconcile (ctx , alwaysReconcile , s )
293
280
}
294
281
295
282
func patchMachine (ctx context.Context , patchHelper * patch.Helper , machine * clusterv1.Machine , options ... patch.Option ) error {
@@ -816,14 +803,10 @@ func (r *Reconciler) drainNode(ctx context.Context, s *scope) (ctrl.Result, erro
816
803
817
804
remoteClient , err := r .ClusterCache .GetClient (ctx , util .ObjectKey (cluster ))
818
805
if err != nil {
819
- if errors .Is (err , clustercache .ErrClusterNotConnected ) {
820
- log .V (5 ).Info ("Requeuing drain Node because connection to the workload cluster is down" )
821
- s .deletingReason = clusterv1 .MachineDeletingDrainingNodeV1Beta2Reason
822
- s .deletingMessage = "Requeuing drain Node because connection to the workload cluster is down"
823
- return ctrl.Result {RequeueAfter : time .Minute }, nil
824
- }
825
- log .Error (err , "Error creating a remote client for cluster while draining Node, won't retry" )
826
- return ctrl.Result {}, nil
806
+ log .V (5 ).Info ("Waiting for Cluster connection to come up to drain the Node" )
807
+ s .deletingReason = clusterv1 .MachineDeletingDrainingNodeV1Beta2Reason
808
+ s .deletingMessage = "Waiting for Cluster connection to come up to drain the Node"
809
+ return ctrl.Result {}, err
827
810
}
828
811
829
812
node := & corev1.Node {}
@@ -989,15 +972,9 @@ func (r *Reconciler) shouldWaitForNodeVolumes(ctx context.Context, s *scope) (ct
989
972
}
990
973
991
974
func (r * Reconciler ) deleteNode (ctx context.Context , cluster * clusterv1.Cluster , name string ) error {
992
- log := ctrl .LoggerFrom (ctx )
993
-
994
975
remoteClient , err := r .ClusterCache .GetClient (ctx , util .ObjectKey (cluster ))
995
976
if err != nil {
996
- if errors .Is (err , clustercache .ErrClusterNotConnected ) {
997
- return errors .Wrapf (err , "failed deleting Node because connection to the workload cluster is down" )
998
- }
999
- log .Error (err , "Error creating a remote client for cluster while deleting Node, won't retry" )
1000
- return nil
977
+ return errors .Wrapf (err , "failed deleting Node because connection to the workload cluster is down" )
1001
978
}
1002
979
1003
980
node := & corev1.Node {
0 commit comments