Skip to content

Commit 463576e

Browse files
mco-1595: fix cleanup on error or completion
1 parent cca6a6c commit 463576e

File tree

2 files changed

+29
-46
lines changed

2 files changed

+29
-46
lines changed

test/extended/machine_config/helpers.go

+4-6
Original file line numberDiff line numberDiff line change
@@ -383,21 +383,19 @@ func isMCDDone(node corev1.Node) bool {
383383
return state == "Done"
384384
}
385385

386-
// WaitForMCPToBeReady waits for a pool to be in an updated state with more than one ready machine
387-
func WaitForMCPToBeReady(oc *exutil.CLI, poolName string) error {
388-
machineConfigClient, err := machineconfigclient.NewForConfig(oc.KubeFramework().ClientConfig())
389-
o.Expect(err).NotTo(o.HaveOccurred())
386+
// `WaitForMCPToBeReady` waits for a pool to be in an updated state with a specified number of ready machines
387+
func WaitForMCPToBeReady(oc *exutil.CLI, machineConfigClient *machineconfigclient.Clientset, poolName string, readyMachineCount int32) error {
390388
o.Eventually(func() bool {
391389
mcp, err := machineConfigClient.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), poolName, metav1.GetOptions{})
392390
if err != nil {
393391
framework.Logf("Failed to grab machineconfigpool %v, error :%v", poolName, err)
394392
return false
395393
}
396394
// Check if the pool is in an updated state with the correct number of ready machines
397-
if IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolUpdated) && mcp.Status.UpdatedMachineCount > 0 {
395+
if IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolUpdated) && mcp.Status.UpdatedMachineCount == readyMachineCount {
398396
return true
399397
}
400-
framework.Logf("Waiting for %v MCP to be updated with ready machines.", poolName)
398+
framework.Logf("Waiting for %v MCP to be updated with %v ready machines.", poolName, readyMachineCount)
401399
return false
402400
}, 5*time.Minute, 10*time.Second).Should(o.BeTrue())
403401
return nil

test/extended/machine_config/machine_config_node.go

+25-40
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ import (
66
"path/filepath"
77
"time"
88

9-
mcClient "github.com/openshift/client-go/machineconfiguration/clientset/versioned"
9+
machineconfigclient "github.com/openshift/client-go/machineconfiguration/clientset/versioned"
1010
exutil "github.com/openshift/origin/test/extended/util"
1111

1212
g "github.com/onsi/ginkgo/v2"
@@ -34,12 +34,6 @@ var _ = g.Describe("[sig-mco][OCPFeatureGate:MachineConfigNode][Serial]", func()
3434
oc = exutil.NewCLIWithoutNamespace("machine-config")
3535
)
3636

37-
// TODO: Update to properly cleanup after tests
38-
// g.AfterAll(func(ctx context.Context) {
39-
// // clean up the created custom MCP
40-
// CleanupCustomMCP(oc)
41-
// })
42-
4337
g.It("Should have MCN properties matching associated node properties [apigroup:machineconfiguration.openshift.io]", func() {
4438
ValidateMCNProperties(oc, infraMCPFixture)
4539
})
@@ -52,7 +46,7 @@ var _ = g.Describe("[sig-mco][OCPFeatureGate:MachineConfigNode][Serial]", func()
5246
// `ValidateMCNProperties` checks that MCN properties match the corresponding node properties
5347
func ValidateMCNProperties(oc *exutil.CLI, fixture string) {
5448
// Create client set for test
55-
clientSet, clientErr := mcClient.NewForConfig(oc.KubeFramework().ClientConfig())
49+
clientSet, clientErr := machineconfigclient.NewForConfig(oc.KubeFramework().ClientConfig())
5650
o.Expect(clientErr).NotTo(o.HaveOccurred())
5751

5852
// Grab a random node from each default pool
@@ -99,8 +93,29 @@ func ValidateMCNProperties(oc *exutil.CLI, fixture string) {
9993
labelErr := oc.Run("label").Args(fmt.Sprintf("node/%s", workerNode.Name), fmt.Sprintf("node-role.kubernetes.io/%s=", custom)).Execute()
10094
o.Expect(labelErr).NotTo(o.HaveOccurred())
10195

96+
defer func() {
97+
// Get starting state of default worker MCP
98+
workerMcp, err := clientSet.MachineconfigurationV1().MachineConfigPools().Get(context.TODO(), worker, metav1.GetOptions{})
99+
o.Expect(err).NotTo(o.HaveOccurred())
100+
workerMcpReadyMachines := workerMcp.Status.ReadyMachineCount
101+
102+
// Unlabel node
103+
framework.Logf("Removing label node-role.kubernetes.io/%v from node %v", custom, workerNode.Name)
104+
unlabelErr := oc.Run("label").Args(fmt.Sprintf("node/%s", workerNode.Name), fmt.Sprintf("node-role.kubernetes.io/%s-", custom)).Execute()
105+
o.Expect(unlabelErr).NotTo(o.HaveOccurred())
106+
107+
// Wait for infra pool to report no nodes & for worker MCP to be ready
108+
WaitForMCPToBeReady(oc, clientSet, custom, 0)
109+
WaitForMCPToBeReady(oc, clientSet, worker, workerMcpReadyMachines+1)
110+
111+
// Delete custom MCP
112+
framework.Logf("Deleting MCP %v", custom)
113+
deleteMCPErr := oc.Run("delete").Args("mcp", custom).Execute()
114+
o.Expect(deleteMCPErr).NotTo(o.HaveOccurred())
115+
}()
116+
102117
// Wait for the custom pool to be updated with the node ready
103-
WaitForMCPToBeReady(oc, custom)
118+
WaitForMCPToBeReady(oc, clientSet, custom, 1)
104119

105120
// Get node desired and current config versions
106121
customNodes, customNodeErr := GetNodesByRole(oc, custom)
@@ -134,7 +149,7 @@ func ValidateMCNProperties(oc *exutil.CLI, fixture string) {
134149
// `ValidateMCNConditionTransitions` check that Conditions properly update on a node update
135150
func ValidateMCNConditionTransitions(oc *exutil.CLI, fixture string) {
136151
// Create client set for test
137-
clientSet, clientErr := mcClient.NewForConfig(oc.KubeFramework().ClientConfig())
152+
clientSet, clientErr := machineconfigclient.NewForConfig(oc.KubeFramework().ClientConfig())
138153
o.Expect(clientErr).NotTo(o.HaveOccurred())
139154

140155
// Apply MC targeting worker pool
@@ -207,33 +222,3 @@ func ValidateMCNConditionTransitions(oc *exutil.CLI, fixture string) {
207222
framework.Logf("Checking all conditions other than 'Updated' are False.")
208223
o.Expect(confirmUpdatedMCNStatus(clientSet, workerNode.Name)).Should(o.BeTrue())
209224
}
210-
211-
// TODO: test this cleanup works when running full test
212-
// `CleanupCustomMCP` deletes the custom MCP for the MCN tests
213-
func CleanupCustomMCP(oc *exutil.CLI) {
214-
// TODO: add length check to see if any nodes are labeled with custom role
215-
// TODO: add check if mcp exists before trying to delete it
216-
217-
// Remove custom role from nodes
218-
customNodes, customNodeErr := GetNodesByRole(oc, custom)
219-
o.Expect(customNodeErr).NotTo(o.HaveOccurred())
220-
for _, node := range customNodes {
221-
framework.Logf("Unlabeling node %v", node.Name)
222-
unlabelErr := oc.Run("label").Args(fmt.Sprintf("node/%s", node.Name), fmt.Sprintf("node-role.kubernetes.io/%s-", custom)).Execute()
223-
o.Expect(unlabelErr).NotTo(o.HaveOccurred())
224-
}
225-
226-
// Wait for worker MCP to be updated
227-
// TODO: fix this since it seemes to not wait long enough to actually catch the mcp needing an update and being updated
228-
// TODO: maybe check the node annotations instead?
229-
// TODO: Maybe update WaitForMCPToBeReady to take an int again but have it be a number representing the previous number of machines in the pool? so that the updated can also chek if ready machine count is greater than the previous count.
230-
framework.Logf("Waiting for worker MCP to re-sync.")
231-
WaitForMCPToBeReady(oc, worker)
232-
233-
// Delete custom MCP
234-
framework.Logf("Deleting MCP %v", custom)
235-
deleteMCPErr := oc.Run("delete").Args("mcp", custom).Execute()
236-
o.Expect(deleteMCPErr).NotTo(o.HaveOccurred())
237-
238-
framework.Logf("Custom MCP %v has been cleaned up.", custom)
239-
}

0 commit comments

Comments
 (0)