@@ -38,6 +38,7 @@ import (
38
38
"k8s.io/klog/v2"
39
39
operatorv1alpha1 "sigs.k8s.io/cluster-api-operator/api/v1alpha1"
40
40
operatorv1 "sigs.k8s.io/cluster-api-operator/api/v1alpha2"
41
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
41
42
42
43
. "sigs.k8s.io/cluster-api-operator/test/framework"
43
44
"sigs.k8s.io/cluster-api/test/framework"
@@ -294,7 +295,7 @@ func initBootstrapCluster(bootstrapClusterProxy framework.ClusterProxy, config *
294
295
295
296
func initHelmCluster (clusterProxy framework.ClusterProxy , config * clusterctl.E2EConfig ) {
296
297
Expect (clusterProxy ).ToNot (BeNil (), "Invalid argument. bootstrapClusterProxy can't be nil when calling initHelmCluster" )
297
- logFolder := filepath .Join (artifactFolder , "clusters" , bootstrapClusterProxy .GetName ())
298
+ logFolder := filepath .Join (artifactFolder , "clusters" , helmClusterProxy .GetName ())
298
299
Expect (os .MkdirAll (logFolder , 0750 )).To (Succeed (), "Invalid argument. Log folder can't be created for initHelmCluster" )
299
300
ensureCertManager (clusterProxy , config )
300
301
}
@@ -374,6 +375,9 @@ var _ = SynchronizedAfterSuite(func() {
374
375
}, func () {
375
376
// After all ParallelNodes.
376
377
378
+ dumpClusterLogs (bootstrapClusterProxy )
379
+ dumpClusterLogs (helmClusterProxy )
380
+
377
381
By ("Tearing down the management clusters" )
378
382
if ! skipCleanup {
379
383
tearDown (bootstrapClusterProvider , bootstrapClusterProxy )
@@ -389,3 +393,40 @@ func tearDown(clusterProvider bootstrap.ClusterProvider, clusterProxy framework.
389
393
clusterProvider .Dispose (ctx )
390
394
}
391
395
}
396
+
397
+ func dumpClusterLogs (clusterProxy framework.ClusterProxy ) {
398
+ if clusterProxy == nil {
399
+ return
400
+ }
401
+
402
+ clusterLogCollector := clusterProxy .GetLogCollector ()
403
+ if clusterLogCollector == nil {
404
+ return
405
+ }
406
+
407
+ nodes , err := clusterProxy .GetClientSet ().CoreV1 ().Nodes ().List (ctx , metav1.ListOptions {})
408
+ if err != nil {
409
+ fmt .Printf ("Failed to get nodes for the bootstrap cluster: %v\n " , err )
410
+ return
411
+ }
412
+
413
+ for i := range nodes .Items {
414
+ nodeName := nodes .Items [i ].GetName ()
415
+ err = clusterLogCollector .CollectMachineLog (
416
+ ctx ,
417
+ clusterProxy .GetClient (),
418
+ // The bootstrap cluster is not expected to be a CAPI cluster, so in order to re-use the logCollector,
419
+ // we create a fake machine that wraps the node.
420
+ // NOTE: This assumes a naming convention between machines and nodes, which e.g. applies to the bootstrap clusters generated with kind.
421
+ // This might not work if you are using an existing bootstrap cluster provided by other means.
422
+ & clusterv1.Machine {
423
+ Spec : clusterv1.MachineSpec {ClusterName : nodeName },
424
+ ObjectMeta : metav1.ObjectMeta {Name : nodeName },
425
+ },
426
+ filepath .Join (artifactFolder , "clusters" , bootstrapClusterProxy .GetName (), "machines" , nodeName ),
427
+ )
428
+ if err != nil {
429
+ fmt .Printf ("Failed to get logs for the bootstrap cluster node %s: %v\n " , nodeName , err )
430
+ }
431
+ }
432
+ }
0 commit comments