@@ -182,7 +182,7 @@ type provisioningTestInput struct {
182
182
func testProvisioning (input * provisioningTestInput ) {
183
183
// common checker for most of the test cases below
184
184
pvcheck := func (claim * v1.PersistentVolumeClaim , volume * v1.PersistentVolume ) {
185
- PVWriteReadCheck (input .cs , claim , volume , NodeSelection {Name : input .nodeName })
185
+ PVWriteReadSingleNodeCheck (input .cs , claim , volume , NodeSelection {Name : input .nodeName })
186
186
}
187
187
188
188
It ("should provision storage with defaults" , func () {
@@ -200,6 +200,25 @@ func testProvisioning(input *provisioningTestInput) {
200
200
TestDynamicProvisioning (input .testCase , input .cs , input .pvc , input .sc )
201
201
})
202
202
203
+ It ("should access volume from different nodes" , func () {
204
+ // The assumption is that if the test hasn't been
205
+ // locked onto a single node, then the driver is
206
+ // usable on all of them *and* supports accessing a volume
207
+ // from any node.
208
+ if input .nodeName != "" {
209
+ framework .Skipf ("Driver %q only supports testing on one node - skipping" , input .dInfo .Name )
210
+ }
211
+ // Ensure that we actually have more than one node.
212
+ nodes := framework .GetReadySchedulableNodesOrDie (input .cs )
213
+ if len (nodes .Items ) <= 1 {
214
+ framework .Skipf ("need more than one node - skipping" )
215
+ }
216
+ input .testCase .PvCheck = func (claim * v1.PersistentVolumeClaim , volume * v1.PersistentVolume ) {
217
+ PVMultiNodeCheck (input .cs , claim , volume , NodeSelection {Name : input .nodeName })
218
+ }
219
+ TestDynamicProvisioning (input .testCase , input .cs , input .pvc , input .sc )
220
+ })
221
+
203
222
It ("should create and delete block persistent volumes" , func () {
204
223
if ! input .dInfo .Capabilities [CapBlock ] {
205
224
framework .Skipf ("Driver %q does not support BlockVolume - skipping" , input .dInfo .Name )
@@ -317,16 +336,20 @@ func TestDynamicProvisioning(t StorageClassTest, client clientset.Interface, cla
317
336
return pv
318
337
}
319
338
320
- // PVWriteReadCheck checks that a PV retains data.
339
+ // PVWriteReadSingleNodeCheck checks that a PV retains data on a single node .
321
340
//
322
341
// It starts two pods:
323
- // - The first writes 'hello word' to the /mnt/test (= the volume).
324
- // - The second one runs grep 'hello world' on /mnt/test.
342
+ // - The first pod writes 'hello word' to the /mnt/test (= the volume) on one node.
343
+ // - The second pod runs grep 'hello world' on /mnt/test on the same node.
344
+ //
345
+ // The node is selected by Kubernetes when scheduling the first
346
+ // pod. It's then selected via its name for the second pod.
347
+ //
325
348
// If both succeed, Kubernetes actually allocated something that is
326
349
// persistent across pods.
327
350
//
328
351
// This is a common test that can be called from a StorageClassTest.PvCheck.
329
- func PVWriteReadCheck (client clientset.Interface , claim * v1.PersistentVolumeClaim , volume * v1.PersistentVolume , node NodeSelection ) {
352
+ func PVWriteReadSingleNodeCheck (client clientset.Interface , claim * v1.PersistentVolumeClaim , volume * v1.PersistentVolume , node NodeSelection ) {
330
353
By (fmt .Sprintf ("checking the created volume is writable and has the PV's mount options on node %+v" , node ))
331
354
command := "echo 'hello world' > /mnt/test/data"
332
355
// We give the first pod the secondary responsibility of checking the volume has
@@ -336,11 +359,91 @@ func PVWriteReadCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
336
359
command += fmt .Sprintf (" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )" , option )
337
360
}
338
361
command += " || (mount | grep 'on /mnt/test'; false)"
339
- RunInPodWithVolume (client , claim .Namespace , claim .Name , "pvc-volume-tester-writer" , command , node )
362
+ pod := StartInPodWithVolume (client , claim .Namespace , claim .Name , "pvc-volume-tester-writer" , command , node )
363
+ defer func () {
364
+ // pod might be nil now.
365
+ StopPod (client , pod )
366
+ }()
367
+ framework .ExpectNoError (framework .WaitForPodSuccessInNamespaceSlow (client , pod .Name , pod .Namespace ))
368
+ runningPod , err := client .CoreV1 ().Pods (pod .Namespace ).Get (pod .Name , metav1.GetOptions {})
369
+ Expect (err ).NotTo (HaveOccurred (), "get pod" )
370
+ actualNodeName := runningPod .Spec .NodeName
371
+ StopPod (client , pod )
372
+ pod = nil // Don't stop twice.
373
+
374
+ By (fmt .Sprintf ("checking the created volume is readable and retains data on the same node %q" , actualNodeName ))
375
+ command = "grep 'hello world' /mnt/test/data"
376
+ RunInPodWithVolume (client , claim .Namespace , claim .Name , "pvc-volume-tester-reader" , command , NodeSelection {Name : actualNodeName })
377
+ }
378
+
379
+ // PVMultiNodeCheck checks that a PV retains data when moved between nodes.
380
+ //
381
+ // It starts these pods:
382
+ // - The first pod writes 'hello word' to the /mnt/test (= the volume) on one node.
383
+ // - The second pod runs grep 'hello world' on /mnt/test on another node.
384
+ //
385
+ // The first node is selected by Kubernetes when scheduling the first pod. The second pod uses the same criteria, except that a special anti-affinity
386
+ // for the first node gets added. This test can only pass if the cluster has more than one
387
+ // suitable node. The caller has to ensure that.
388
+ //
389
+ // If all succeeds, Kubernetes actually allocated something that is
390
+ // persistent across pods and across nodes.
391
+ //
392
+ // This is a common test that can be called from a StorageClassTest.PvCheck.
393
+ func PVMultiNodeCheck (client clientset.Interface , claim * v1.PersistentVolumeClaim , volume * v1.PersistentVolume , node NodeSelection ) {
394
+ Expect (node .Name ).To (Equal ("" ), "this test only works when not locked onto a single node" )
395
+
396
+ var pod * v1.Pod
397
+ defer func () {
398
+ // passing pod = nil is okay.
399
+ StopPod (client , pod )
400
+ }()
401
+
402
+ By (fmt .Sprintf ("checking the created volume is writable and has the PV's mount options on node %+v" , node ))
403
+ command := "echo 'hello world' > /mnt/test/data"
404
+ pod = StartInPodWithVolume (client , claim .Namespace , claim .Name , "pvc-writer-node1" , command , node )
405
+ framework .ExpectNoError (framework .WaitForPodSuccessInNamespaceSlow (client , pod .Name , pod .Namespace ))
406
+ runningPod , err := client .CoreV1 ().Pods (pod .Namespace ).Get (pod .Name , metav1.GetOptions {})
407
+ Expect (err ).NotTo (HaveOccurred (), "get pod" )
408
+ actualNodeName := runningPod .Spec .NodeName
409
+ StopPod (client , pod )
410
+ pod = nil // Don't stop twice.
411
+
412
+ // Add node-anti-affinity.
413
+ secondNode := node
414
+ if secondNode .Affinity == nil {
415
+ secondNode .Affinity = & v1.Affinity {}
416
+ }
417
+ if secondNode .Affinity .NodeAffinity == nil {
418
+ secondNode .Affinity .NodeAffinity = & v1.NodeAffinity {}
419
+ }
420
+ if secondNode .Affinity .NodeAffinity .RequiredDuringSchedulingIgnoredDuringExecution == nil {
421
+ secondNode .Affinity .NodeAffinity .RequiredDuringSchedulingIgnoredDuringExecution = & v1.NodeSelector {}
422
+ }
423
+ secondNode .Affinity .NodeAffinity .RequiredDuringSchedulingIgnoredDuringExecution .NodeSelectorTerms = append (secondNode .Affinity .NodeAffinity .RequiredDuringSchedulingIgnoredDuringExecution .NodeSelectorTerms ,
424
+ v1.NodeSelectorTerm {
425
+ // https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity warns
426
+ // that "the value of kubernetes.io/hostname may be the same as the Node name in some environments and a different value in other environments".
427
+ // So this might be cleaner:
428
+ // MatchFields: []v1.NodeSelectorRequirement{
429
+ // {Key: "name", Operator: v1.NodeSelectorOpNotIn, Values: []string{actualNodeName}},
430
+ // },
431
+ // However, "name", "Name", "ObjectMeta.Name" all got rejected with "not a valid field selector key".
432
+
433
+ MatchExpressions : []v1.NodeSelectorRequirement {
434
+ {Key : "kubernetes.io/hostname" , Operator : v1 .NodeSelectorOpNotIn , Values : []string {actualNodeName }},
435
+ },
436
+ })
340
437
341
- By (fmt .Sprintf ("checking the created volume is readable and retains data on the same node %+v" , node ))
438
+ By (fmt .Sprintf ("checking the created volume is readable and retains data on another node %+v" , secondNode ))
342
439
command = "grep 'hello world' /mnt/test/data"
343
- RunInPodWithVolume (client , claim .Namespace , claim .Name , "pvc-volume-tester-reader" , command , node )
440
+ pod = StartInPodWithVolume (client , claim .Namespace , claim .Name , "pvc-reader-node2" , command , secondNode )
441
+ framework .ExpectNoError (framework .WaitForPodSuccessInNamespaceSlow (client , pod .Name , pod .Namespace ))
442
+ runningPod , err = client .CoreV1 ().Pods (pod .Namespace ).Get (pod .Name , metav1.GetOptions {})
443
+ Expect (err ).NotTo (HaveOccurred (), "get pod" )
444
+ Expect (runningPod .Spec .NodeName ).NotTo (Equal (actualNodeName ), "second pod should have run on a different node" )
445
+ StopPod (client , pod )
446
+ pod = nil
344
447
}
345
448
346
449
func TestBindingWaitForFirstConsumer (t StorageClassTest , client clientset.Interface , claim * v1.PersistentVolumeClaim , class * storage.StorageClass , nodeSelector map [string ]string , expectUnschedulable bool ) (* v1.PersistentVolume , * v1.Node ) {
0 commit comments