@@ -18,7 +18,6 @@ package cluster
18
18
19
19
import (
20
20
"fmt"
21
- "sync"
22
21
"time"
23
22
24
23
"github.com/go-logr/logr"
@@ -237,36 +236,20 @@ const (
237
236
238
237
// createGroup creates all the Kubernetes objects into the target management cluster corresponding to the object graph nodes in a moveGroup.
239
238
func (o * objectMover ) createGroup (group moveGroup , toProxy Proxy ) error {
240
-
241
- // Creates - in parallel - all the nodes in the group.
242
- var wg sync.WaitGroup
243
239
errList := []error {}
244
- errCh := make (chan error )
245
- defer close (errCh )
246
-
247
- go func () {
248
- for e := range errCh {
249
- errList = append (errList , e )
240
+ for i := range group {
241
+ nodeToCreate := group [i ]
242
+
243
+ // Creates the Kubernetes object corresponding to the nodeToCreate.
244
+ // Nb. The operation is wrapped in a retry loop to make move more resilient to unexpected conditions.
245
+ err := retry (retryCreateTargetObject , retryIntervalCreateTargetObject , o .log , func () error {
246
+ return o .createTargetObject (nodeToCreate , toProxy )
247
+ })
248
+ if err != nil {
249
+ errList = append (errList , err )
250
250
}
251
- }()
252
-
253
- for _ , nodeToCreate := range group {
254
- wg .Add (1 )
255
- go func (node * node ) {
256
- defer wg .Done ()
257
-
258
- // Creates the Kubernetes object corresponding to the nodeToCreate.
259
- // Nb. The operation is wrapped in a retry loop to make move more resilient to unexpected conditions.
260
- err := retry (retryCreateTargetObject , retryIntervalCreateTargetObject , o .log , func () error {
261
- return o .createTargetObject (node , toProxy )
262
- })
263
- if err != nil {
264
- errCh <- err
265
- }
266
- }(nodeToCreate )
267
251
}
268
252
269
- wg .Wait ()
270
253
if len (errList ) > 0 {
271
254
return kerrors .NewAggregate (errList )
272
255
}
@@ -372,37 +355,21 @@ const (
372
355
373
356
// deleteGroup deletes all the Kubernetes objects from the source management cluster corresponding to the object graph nodes in a moveGroup.
374
357
func (o * objectMover ) deleteGroup (group moveGroup ) error {
375
- // Deletes - in parallel - the dependents nodes and the softDependents nodes (with the respective object tree).
376
- var wg sync.WaitGroup
377
358
errList := []error {}
378
- errCh := make (chan error )
379
- defer close (errCh )
380
-
381
- go func () {
382
- for e := range errCh {
383
- errList = append (errList , e )
384
- }
385
- }()
359
+ for i := range group {
360
+ nodeToDelete := group [i ]
386
361
387
- for _ , nodeToDelete := range group {
388
- wg .Add (1 )
389
- go func (node * node ) {
390
- defer wg .Done ()
362
+ // Delete the Kubernetes object corresponding to the current node.
363
+ // Nb. The operation is wrapped in a retry loop to make move more resilient to unexpected conditions.
364
+ err := retry (retryDeleteSourceObject , retryIntervalDeleteSourceObject , o .log , func () error {
365
+ return o .deleteSourceObject (nodeToDelete )
366
+ })
391
367
392
- // Delete the Kubernetes object corresponding to the current node.
393
- // Nb. The operation is wrapped in a retry loop to make move more resilient to unexpected conditions.
394
- err := retry (retryDeleteSourceObject , retryIntervalDeleteSourceObject , o .log , func () error {
395
- return o .deleteSourceObject (node )
396
- })
397
-
398
- if err != nil {
399
- errCh <- err
400
- }
401
-
402
- }(nodeToDelete )
368
+ if err != nil {
369
+ errList = append (errList , err )
370
+ }
403
371
}
404
372
405
- wg .Wait ()
406
373
return kerrors .NewAggregate (errList )
407
374
}
408
375
0 commit comments