Skip to content

Commit ca42cf4

Browse files
committed
e2e/storage: test provisioned volume on multiple nodes
Whether the read test after writing was done on the same node was random for drivers that weren't locked onto a single node. Now it is deterministic: it always happens on the same node. The case with reading on another node is covered separately for test configurations that support it (not locked onto a single node, more than one node in the test cluster). As before, the TestConfig.ClientNodeSelector is ignored by the provisioning testsuite.
1 parent 54d8f16 commit ca42cf4

File tree

3 files changed

+125
-22
lines changed

3 files changed

+125
-22
lines changed

test/e2e/storage/regional_pd.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
115115
err = verifyZonesInPV(volume, sets.NewString(cloudZones...), true /* match */)
116116
Expect(err).NotTo(HaveOccurred(), "verifyZonesInPV")
117117

118-
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
118+
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
119119
},
120120
},
121121
{
@@ -137,7 +137,7 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
137137
err = verifyZonesInPV(volume, zones, false /* match */)
138138
Expect(err).NotTo(HaveOccurred(), "verifyZonesInPV")
139139

140-
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
140+
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
141141
},
142142
},
143143
}

test/e2e/storage/testsuites/provisioning.go

+111-8
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,7 @@ type provisioningTestInput struct {
182182
func testProvisioning(input *provisioningTestInput) {
183183
// common checker for most of the test cases below
184184
pvcheck := func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
185-
PVWriteReadCheck(input.cs, claim, volume, NodeSelection{Name: input.nodeName})
185+
PVWriteReadSingleNodeCheck(input.cs, claim, volume, NodeSelection{Name: input.nodeName})
186186
}
187187

188188
It("should provision storage with defaults", func() {
@@ -200,6 +200,25 @@ func testProvisioning(input *provisioningTestInput) {
200200
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
201201
})
202202

203+
It("should access volume from different nodes", func() {
204+
// The assumption is that if the test hasn't been
205+
// locked onto a single node, then the driver is
206+
// usable on all of them *and* supports accessing a volume
207+
// from any node.
208+
if input.nodeName != "" {
209+
framework.Skipf("Driver %q only supports testing on one node - skipping", input.dInfo.Name)
210+
}
211+
// Ensure that we actually have more than one node.
212+
nodes := framework.GetReadySchedulableNodesOrDie(input.cs)
213+
if len(nodes.Items) <= 1 {
214+
framework.Skipf("need more than one node - skipping")
215+
}
216+
input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
217+
PVMultiNodeCheck(input.cs, claim, volume, NodeSelection{Name: input.nodeName})
218+
}
219+
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
220+
})
221+
203222
It("should create and delete block persistent volumes", func() {
204223
if !input.dInfo.Capabilities[CapBlock] {
205224
framework.Skipf("Driver %q does not support BlockVolume - skipping", input.dInfo.Name)
@@ -317,16 +336,20 @@ func TestDynamicProvisioning(t StorageClassTest, client clientset.Interface, cla
317336
return pv
318337
}
319338

320-
// PVWriteReadCheck checks that a PV retains data.
339+
// PVWriteReadSingleNodeCheck checks that a PV retains data on a single node.
321340
//
322341
// It starts two pods:
323-
// - The first writes 'hello word' to the /mnt/test (= the volume).
324-
// - The second one runs grep 'hello world' on /mnt/test.
342+
// - The first pod writes 'hello word' to the /mnt/test (= the volume) on one node.
343+
// - The second pod runs grep 'hello world' on /mnt/test on the same node.
344+
//
345+
// The node is selected by Kubernetes when scheduling the first
346+
// pod. It's then selected via its name for the second pod.
347+
//
325348
// If both succeed, Kubernetes actually allocated something that is
326349
// persistent across pods.
327350
//
328351
// This is a common test that can be called from a StorageClassTest.PvCheck.
329-
func PVWriteReadCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume, node NodeSelection) {
352+
func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume, node NodeSelection) {
330353
By(fmt.Sprintf("checking the created volume is writable and has the PV's mount options on node %+v", node))
331354
command := "echo 'hello world' > /mnt/test/data"
332355
// We give the first pod the secondary responsibility of checking the volume has
@@ -336,11 +359,91 @@ func PVWriteReadCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
336359
command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option)
337360
}
338361
command += " || (mount | grep 'on /mnt/test'; false)"
339-
RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node)
362+
pod := StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node)
363+
defer func() {
364+
// pod might be nil now.
365+
StopPod(client, pod)
366+
}()
367+
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
368+
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
369+
Expect(err).NotTo(HaveOccurred(), "get pod")
370+
actualNodeName := runningPod.Spec.NodeName
371+
StopPod(client, pod)
372+
pod = nil // Don't stop twice.
373+
374+
By(fmt.Sprintf("checking the created volume is readable and retains data on the same node %q", actualNodeName))
375+
command = "grep 'hello world' /mnt/test/data"
376+
RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, NodeSelection{Name: actualNodeName})
377+
}
378+
379+
// PVMultiNodeCheck checks that a PV retains data when moved between nodes.
380+
//
381+
// It starts these pods:
382+
// - The first pod writes 'hello word' to the /mnt/test (= the volume) on one node.
383+
// - The second pod runs grep 'hello world' on /mnt/test on another node.
384+
//
385+
// The first node is selected by Kubernetes when scheduling the first pod. The second pod uses the same criteria, except that a special anti-affinity
386+
// for the first node gets added. This test can only pass if the cluster has more than one
387+
// suitable node. The caller has to ensure that.
388+
//
389+
// If all succeeds, Kubernetes actually allocated something that is
390+
// persistent across pods and across nodes.
391+
//
392+
// This is a common test that can be called from a StorageClassTest.PvCheck.
393+
func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume, node NodeSelection) {
394+
Expect(node.Name).To(Equal(""), "this test only works when not locked onto a single node")
395+
396+
var pod *v1.Pod
397+
defer func() {
398+
// passing pod = nil is okay.
399+
StopPod(client, pod)
400+
}()
401+
402+
By(fmt.Sprintf("checking the created volume is writable and has the PV's mount options on node %+v", node))
403+
command := "echo 'hello world' > /mnt/test/data"
404+
pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node)
405+
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
406+
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
407+
Expect(err).NotTo(HaveOccurred(), "get pod")
408+
actualNodeName := runningPod.Spec.NodeName
409+
StopPod(client, pod)
410+
pod = nil // Don't stop twice.
411+
412+
// Add node-anti-affinity.
413+
secondNode := node
414+
if secondNode.Affinity == nil {
415+
secondNode.Affinity = &v1.Affinity{}
416+
}
417+
if secondNode.Affinity.NodeAffinity == nil {
418+
secondNode.Affinity.NodeAffinity = &v1.NodeAffinity{}
419+
}
420+
if secondNode.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
421+
secondNode.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{}
422+
}
423+
secondNode.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = append(secondNode.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms,
424+
v1.NodeSelectorTerm{
425+
// https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity warns
426+
// that "the value of kubernetes.io/hostname may be the same as the Node name in some environments and a different value in other environments".
427+
// So this might be cleaner:
428+
// MatchFields: []v1.NodeSelectorRequirement{
429+
// {Key: "name", Operator: v1.NodeSelectorOpNotIn, Values: []string{actualNodeName}},
430+
// },
431+
// However, "name", "Name", "ObjectMeta.Name" all got rejected with "not a valid field selector key".
432+
433+
MatchExpressions: []v1.NodeSelectorRequirement{
434+
{Key: "kubernetes.io/hostname", Operator: v1.NodeSelectorOpNotIn, Values: []string{actualNodeName}},
435+
},
436+
})
340437

341-
By(fmt.Sprintf("checking the created volume is readable and retains data on the same node %+v", node))
438+
By(fmt.Sprintf("checking the created volume is readable and retains data on another node %+v", secondNode))
342439
command = "grep 'hello world' /mnt/test/data"
343-
RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, node)
440+
pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-reader-node2", command, secondNode)
441+
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
442+
runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
443+
Expect(err).NotTo(HaveOccurred(), "get pod")
444+
Expect(runningPod.Spec.NodeName).NotTo(Equal(actualNodeName), "second pod should have run on a different node")
445+
StopPod(client, pod)
446+
pod = nil
344447
}
345448

346449
func TestBindingWaitForFirstConsumer(t StorageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass, nodeSelector map[string]string, expectUnschedulable bool) (*v1.PersistentVolume, *v1.Node) {

test/e2e/storage/volume_provisioning.go

+12-12
Original file line numberDiff line numberDiff line change
@@ -276,7 +276,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
276276
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
277277
err := checkGCEPD(volume, "pd-ssd")
278278
Expect(err).NotTo(HaveOccurred(), "checkGCEPD pd-ssd")
279-
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
279+
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
280280
},
281281
},
282282
{
@@ -291,7 +291,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
291291
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
292292
err := checkGCEPD(volume, "pd-standard")
293293
Expect(err).NotTo(HaveOccurred(), "checkGCEPD pd-standard")
294-
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
294+
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
295295
},
296296
},
297297
// AWS
@@ -308,7 +308,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
308308
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
309309
err := checkAWSEBS(volume, "gp2", false)
310310
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS gp2")
311-
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
311+
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
312312
},
313313
},
314314
{
@@ -324,7 +324,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
324324
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
325325
err := checkAWSEBS(volume, "io1", false)
326326
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS io1")
327-
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
327+
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
328328
},
329329
},
330330
{
@@ -339,7 +339,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
339339
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
340340
err := checkAWSEBS(volume, "sc1", false)
341341
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS sc1")
342-
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
342+
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
343343
},
344344
},
345345
{
@@ -354,7 +354,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
354354
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
355355
err := checkAWSEBS(volume, "st1", false)
356356
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS st1")
357-
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
357+
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
358358
},
359359
},
360360
{
@@ -369,7 +369,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
369369
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
370370
err := checkAWSEBS(volume, "gp2", true)
371371
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS gp2 encrypted")
372-
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
372+
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
373373
},
374374
},
375375
// OpenStack generic tests (works on all OpenStack deployments)
@@ -381,7 +381,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
381381
ClaimSize: "1.5Gi",
382382
ExpectedSize: "2Gi",
383383
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
384-
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
384+
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
385385
},
386386
},
387387
{
@@ -395,7 +395,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
395395
ClaimSize: "1.5Gi",
396396
ExpectedSize: "2Gi",
397397
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
398-
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
398+
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
399399
},
400400
},
401401
// vSphere generic test
@@ -407,7 +407,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
407407
ClaimSize: "1.5Gi",
408408
ExpectedSize: "1.5Gi",
409409
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
410-
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
410+
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
411411
},
412412
},
413413
// Azure
@@ -419,7 +419,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
419419
ClaimSize: "1Gi",
420420
ExpectedSize: "1Gi",
421421
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
422-
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
422+
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
423423
},
424424
},
425425
}
@@ -476,7 +476,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
476476
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
477477
err := checkGCEPD(volume, "pd-standard")
478478
Expect(err).NotTo(HaveOccurred(), "checkGCEPD")
479-
testsuites.PVWriteReadCheck(c, claim, volume, testsuites.NodeSelection{})
479+
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
480480
},
481481
}
482482
class := newStorageClass(test, ns, "reclaimpolicy")

0 commit comments

Comments
 (0)