Skip to content

Commit 07f1481

Browse files
authored
Merge pull request #3790 from fabriziopandini/rework-skipped-test
🌱 Rework skipped test
2 parents c60ae8b + eb011e6 commit 07f1481

File tree

7 files changed

+82
-78
lines changed

7 files changed

+82
-78
lines changed

bootstrap/kubeadm/controllers/kubeadmconfig_controller_test.go

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -126,10 +126,8 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfKubeadmConfigIsReady(t *
126126
g.Expect(result.RequeueAfter).To(Equal(time.Duration(0)))
127127
}
128128

129-
// Reconcile returns an error in this case because the owning machine should not go away before the things it owns.
130-
func TestKubeadmConfigReconciler_Reconcile_ReturnErrorIfReferencedMachineIsNotFound(t *testing.T) {
131-
t.Skip("This test doens't look correct, the reconciler returns nil if the owner isn't found")
132-
129+
// Reconcile returns nil if the referenced Machine cannot be found.
130+
func TestKubeadmConfigReconciler_Reconcile_ReturnNilIfReferencedMachineIsNotFound(t *testing.T) {
133131
g := NewWithT(t)
134132

135133
machine := newMachine(nil, "machine")
@@ -152,7 +150,7 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnErrorIfReferencedMachineIsNotFo
152150
},
153151
}
154152
_, err := k.Reconcile(ctx, request)
155-
g.Expect(err).To(HaveOccurred())
153+
g.Expect(err).To(BeNil())
156154
}
157155

158156
// If the machine has bootstrap data secret reference, there is no need to generate more bootstrap data.

cmd/clusterctl/client/cluster/inventory_managementgroup_test.go

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,6 @@ import (
2828
)
2929

3030
func Test_inventoryClient_GetManagementGroups(t *testing.T) {
31-
t.Skip("Some of these tests now fail, because they rely on ordering to compare items, needs some rework")
32-
3331
type fields struct {
3432
proxy Proxy
3533
}
@@ -157,7 +155,11 @@ func Test_inventoryClient_GetManagementGroups(t *testing.T) {
157155
return
158156
}
159157
g.Expect(err).NotTo(HaveOccurred())
160-
g.Expect(got).To(ConsistOf(tt.want))
158+
g.Expect(got).To(HaveLen(len(tt.want)))
159+
for i := range tt.want {
160+
g.Expect(got[i].CoreProvider).To(Equal(tt.want[i].CoreProvider))
161+
g.Expect(got[i].Providers).To(ConsistOf(tt.want[i].Providers))
162+
}
161163
})
162164
}
163165
}

cmd/clusterctl/client/cluster/mover_test.go

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -440,8 +440,6 @@ var moveTests = []struct {
440440
}
441441

442442
func Test_getMoveSequence(t *testing.T) {
443-
t.Skip("A_ClusterResourceSet_applied_to_a_cluster is now failing, needs to be investigated")
444-
445443
// NB. we are testing the move and move sequence using the same set of moveTests, but checking the results at different stages of the move process
446444
for _, tt := range moveTests {
447445
t.Run(tt.name, func(t *testing.T) {
@@ -474,8 +472,6 @@ func Test_getMoveSequence(t *testing.T) {
474472
}
475473

476474
func Test_objectMover_move_dryRun(t *testing.T) {
477-
t.Skip("A_ClusterResourceSet_applied_to_a_cluster is now failing, needs to be investigated")
478-
479475
// NB. we are testing the move and move sequence using the same set of moveTests, but checking the results at different stages of the move process
480476
for _, tt := range moveTests {
481477
t.Run(tt.name, func(t *testing.T) {
@@ -550,8 +546,6 @@ func Test_objectMover_move_dryRun(t *testing.T) {
550546
}
551547

552548
func Test_objectMover_move(t *testing.T) {
553-
t.Skip("A_ClusterResourceSet_applied_to_a_cluster is now failing, needs to be investigated")
554-
555549
// NB. we are testing the move and move sequence using the same set of moveTests, but checking the results at different stages of the move process
556550
for _, tt := range moveTests {
557551
t.Run(tt.name, func(t *testing.T) {

cmd/clusterctl/client/cluster/objectgraph_test.go

Lines changed: 7 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -101,13 +101,13 @@ type wantGraph struct {
101101
func assertGraph(t *testing.T, got *objectGraph, want wantGraph) {
102102
g := NewWithT(t)
103103

104-
g.Expect(len(got.uidToNode)).To(Equal(len(want.nodes)))
104+
g.Expect(len(got.uidToNode)).To(Equal(len(want.nodes)), "the number of nodes in the objectGraph doesn't match the number of expected nodes")
105105

106106
for uid, wantNode := range want.nodes {
107107
gotNode, ok := got.uidToNode[types.UID(uid)]
108-
g.Expect(ok).To(BeTrue(), "node ", uid, " not found")
109-
g.Expect(gotNode.virtual).To(Equal(wantNode.virtual))
110-
g.Expect(gotNode.owners).To(HaveLen(len(wantNode.owners)))
108+
g.Expect(ok).To(BeTrue(), "node %q not found", uid)
109+
g.Expect(gotNode.virtual).To(Equal(wantNode.virtual), "node %q.virtual does not have the expected value", uid)
110+
g.Expect(gotNode.owners).To(HaveLen(len(wantNode.owners)), "node %q.owner does not have the expected length", uid)
111111

112112
for _, wantOwner := range wantNode.owners {
113113
found := false
@@ -117,10 +117,10 @@ func assertGraph(t *testing.T, got *objectGraph, want wantGraph) {
117117
break
118118
}
119119
}
120-
g.Expect(found).To(BeTrue())
120+
g.Expect(found).To(BeTrue(), "node %q.owners does not contain %q", uid, wantOwner)
121121
}
122122

123-
g.Expect(gotNode.softOwners).To(HaveLen(len(wantNode.softOwners)))
123+
g.Expect(gotNode.softOwners).To(HaveLen(len(wantNode.softOwners)), "node %q.softOwners does not have the expected length", uid)
124124

125125
for _, wantOwner := range wantNode.softOwners {
126126
found := false
@@ -130,7 +130,7 @@ func assertGraph(t *testing.T, got *objectGraph, want wantGraph) {
130130
break
131131
}
132132
}
133-
g.Expect(found).To(BeTrue())
133+
g.Expect(found).To(BeTrue(), "node %q.softOwners does not contain %q", uid, wantOwner)
134134
}
135135
}
136136
}
@@ -1017,7 +1017,6 @@ var objectGraphsTests = []struct {
10171017
},
10181018
},
10191019
},
1020-
10211020
{
10221021
name: "Cluster and Global + Namespaced External Objects",
10231022
args: objectGraphTestArgs{
@@ -1116,8 +1115,6 @@ func getFakeDiscoveryTypes(graph *objectGraph) error {
11161115
}
11171116

11181117
func TestObjectGraph_Discovery(t *testing.T) {
1119-
t.Skip("TestObjectGraph_Discovery/A_ClusterResourceSet_applied_to_a_cluster is now failing, needs to be investigated")
1120-
11211118
// NB. we are testing the graph is properly built starting from objects (TestGraphBuilder_addObj_WithFakeObjects) or from the same objects read from the cluster (this test).
11221119
for _, tt := range objectGraphsTests {
11231120
t.Run(tt.name, func(t *testing.T) {

cmd/clusterctl/internal/test/fake_objects.go

Lines changed: 6 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1152,21 +1152,15 @@ func SelectClusterObj(objs []client.Object, namespace, name string) *clusterv1.C
11521152
continue
11531153
}
11541154

1155-
accessor, err := meta.Accessor(o)
1156-
if err != nil {
1157-
panic(fmt.Sprintf("failed to get accessor for %s: %v", o.GetObjectKind(), err))
1158-
}
1159-
1160-
if accessor.GetName() == name && accessor.GetNamespace() == namespace {
1161-
cluster := &clusterv1.Cluster{
1162-
TypeMeta: metav1.TypeMeta{
1163-
APIVersion: clusterv1.GroupVersion.String(),
1164-
Kind: "Cluster",
1165-
},
1166-
}
1155+
if o.GetName() == name && o.GetNamespace() == namespace {
1156+
// Converts the object to cluster
1157+
// NB. Convert returns an object without version/kind, so we are enforcing those values back.
1158+
cluster := &clusterv1.Cluster{}
11671159
if err := FakeScheme.Convert(o, cluster, nil); err != nil {
11681160
panic(fmt.Sprintf("failed to convert %s to cluster: %v", o.GetObjectKind(), err))
11691161
}
1162+
cluster.APIVersion = o.GetObjectKind().GroupVersionKind().GroupVersion().String()
1163+
cluster.Kind = o.GetObjectKind().GroupVersionKind().Kind
11701164
return cluster
11711165
}
11721166
}

controllers/cluster_controller_test.go

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -194,9 +194,7 @@ var _ = Describe("Cluster Reconciler", func() {
194194
}, timeout).Should(BeTrue())
195195
})
196196

197-
It("Should successfully patch a cluster object if only removing finalizers", func() {
198-
Skip("This test doesn't look correct, if we remove the finalizer the reconciler takes care of re-adding it")
199-
197+
It("Should re-apply finalizers if removed", func() {
200198
// Setup
201199
cluster := &clusterv1.Cluster{
202200
ObjectMeta: metav1.ObjectMeta{
@@ -219,7 +217,7 @@ var _ = Describe("Cluster Reconciler", func() {
219217
return len(cluster.Finalizers) > 0
220218
}, timeout).Should(BeTrue())
221219

222-
// Patch
220+
// Remove finalizers
223221
Eventually(func() bool {
224222
ph, err := patch.NewHelper(cluster, testEnv)
225223
Expect(err).ShouldNot(HaveOccurred())
@@ -230,14 +228,14 @@ var _ = Describe("Cluster Reconciler", func() {
230228

231229
Expect(cluster.Finalizers).Should(BeEmpty())
232230

233-
// Assertions
231+
// Check finalizers are re-applied
234232
Eventually(func() []string {
235233
instance := &clusterv1.Cluster{}
236234
if err := testEnv.Get(ctx, key, instance); err != nil {
237235
return []string{"not-empty"}
238236
}
239237
return instance.Finalizers
240-
}, timeout).Should(BeEmpty())
238+
}, timeout).ShouldNot(BeEmpty())
241239
})
242240

243241
It("Should successfully set Status.ControlPlaneInitialized on the cluster object if controlplane is ready", func() {

controlplane/kubeadm/internal/workload_cluster_coredns_test.go

Lines changed: 57 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ import (
2020
"testing"
2121

2222
. "github.com/onsi/gomega"
23+
apierrors "k8s.io/apimachinery/pkg/api/errors"
2324

2425
"github.com/pkg/errors"
2526
appsv1 "k8s.io/api/apps/v1"
@@ -36,8 +37,6 @@ import (
3637
)
3738

3839
func TestUpdateCoreDNS(t *testing.T) {
39-
t.Skip("This now fails because it's using Update instead of patch, needs rework")
40-
4140
validKCP := &controlplanev1.KubeadmControlPlane{
4241
Spec: controlplanev1.KubeadmControlPlaneSpec{
4342
KubeadmConfigSpec: cabpkv1.KubeadmConfigSpec{
@@ -284,44 +283,66 @@ kind: ClusterConfiguration
284283
},
285284
}
286285

286+
// We are using testEnv as a workload cluster, and given that each test case assumes well known objects with specific
287+
// Namespace/Name (e.g. The CoderDNS ConfigMap & Deployment, the kubeadm ConfigMap), it is not possible to run the use cases in parallel.
287288
for _, tt := range tests {
288-
t.Run(tt.name, func(t *testing.T) {
289-
g := NewWithT(t)
290-
fakeClient := fake.NewFakeClientWithScheme(scheme.Scheme, tt.objs...)
291-
w := &Workload{
292-
Client: fakeClient,
293-
CoreDNSMigrator: tt.migrator,
294-
}
295-
err := w.UpdateCoreDNS(ctx, tt.kcp)
296-
if tt.expectErr {
297-
g.Expect(err).To(HaveOccurred())
298-
return
299-
}
300-
g.Expect(err).ToNot(HaveOccurred())
289+
g := NewWithT(t)
290+
t.Log(tt.name)
291+
292+
for _, o := range tt.objs {
293+
// NB. deep copy test object so changes applied during a test does not affect other tests.
294+
o := o.DeepCopyObject().(client.Object)
295+
g.Expect(testEnv.Create(ctx, o)).To(Succeed())
296+
}
301297

302-
// Assert that CoreDNS updates have been made
303-
if tt.expectUpdates {
304-
// assert kubeadmConfigMap
305-
var expectedKubeadmConfigMap corev1.ConfigMap
306-
g.Expect(fakeClient.Get(ctx, ctrlclient.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, &expectedKubeadmConfigMap)).To(Succeed())
307-
g.Expect(expectedKubeadmConfigMap.Data).To(HaveKeyWithValue("ClusterConfiguration", ContainSubstring("1.7.2")))
308-
g.Expect(expectedKubeadmConfigMap.Data).To(HaveKeyWithValue("ClusterConfiguration", ContainSubstring("k8s.gcr.io/some-repo")))
309-
310-
// assert CoreDNS corefile
311-
var expectedConfigMap corev1.ConfigMap
312-
g.Expect(fakeClient.Get(ctx, ctrlclient.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &expectedConfigMap)).To(Succeed())
313-
g.Expect(expectedConfigMap.Data).To(HaveLen(2))
314-
g.Expect(expectedConfigMap.Data).To(HaveKeyWithValue("Corefile", "updated-core-file"))
315-
g.Expect(expectedConfigMap.Data).To(HaveKeyWithValue("Corefile-backup", expectedCorefile))
316-
317-
// assert CoreDNS deployment
318-
var actualDeployment appsv1.Deployment
319-
g.Expect(fakeClient.Get(ctx, ctrlclient.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &actualDeployment)).To(Succeed())
320-
// ensure the image is updated and the volumes point to the corefile
321-
g.Expect(actualDeployment.Spec.Template.Spec.Containers[0].Image).To(Equal("k8s.gcr.io/some-repo/coredns:1.7.2"))
298+
w := &Workload{
299+
Client: testEnv.GetClient(),
300+
CoreDNSMigrator: tt.migrator,
301+
}
302+
err := w.UpdateCoreDNS(ctx, tt.kcp)
303+
if tt.expectErr {
304+
g.Expect(err).To(HaveOccurred())
305+
return
306+
}
307+
g.Expect(err).ToNot(HaveOccurred())
308+
309+
// Assert that CoreDNS updates have been made
310+
if tt.expectUpdates {
311+
// assert kubeadmConfigMap
312+
var expectedKubeadmConfigMap corev1.ConfigMap
313+
g.Expect(testEnv.Get(ctx, ctrlclient.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, &expectedKubeadmConfigMap)).To(Succeed())
314+
g.Expect(expectedKubeadmConfigMap.Data).To(HaveKeyWithValue("ClusterConfiguration", ContainSubstring("1.7.2")))
315+
g.Expect(expectedKubeadmConfigMap.Data).To(HaveKeyWithValue("ClusterConfiguration", ContainSubstring("k8s.gcr.io/some-repo")))
322316

317+
// assert CoreDNS corefile
318+
var expectedConfigMap corev1.ConfigMap
319+
g.Expect(testEnv.Get(ctx, ctrlclient.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &expectedConfigMap)).To(Succeed())
320+
g.Expect(expectedConfigMap.Data).To(HaveLen(2))
321+
g.Expect(expectedConfigMap.Data).To(HaveKeyWithValue("Corefile", "updated-core-file"))
322+
g.Expect(expectedConfigMap.Data).To(HaveKeyWithValue("Corefile-backup", expectedCorefile))
323+
324+
// assert CoreDNS deployment
325+
var actualDeployment appsv1.Deployment
326+
g.Eventually(func() string {
327+
g.Expect(testEnv.Get(ctx, ctrlclient.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &actualDeployment)).To(Succeed())
328+
return actualDeployment.Spec.Template.Spec.Containers[0].Image
329+
}, "5s").Should(Equal("k8s.gcr.io/some-repo/coredns:1.7.2"))
330+
}
331+
332+
// Cleanup test objects (and wait for deletion to complete).
333+
testEnv.Cleanup(ctx, tt.objs...)
334+
g.Eventually(func() bool {
335+
for _, o := range []client.Object{cm, depl, kubeadmCM} {
336+
// NB. deep copy test object so changes applied during a test does not affect other tests.
337+
o := o.DeepCopyObject().(client.Object)
338+
key, _ := client.ObjectKeyFromObject(o)
339+
err := testEnv.Get(ctx, key, o)
340+
if err == nil || (err != nil && !apierrors.IsNotFound(err)) {
341+
return false
342+
}
323343
}
324-
})
344+
return true
345+
}, "10s").Should(BeTrue())
325346
}
326347
}
327348

0 commit comments

Comments
 (0)