Skip to content

Commit 03d352f

Browse files
committed
e2e/storage: test usage of volume in multiple pods at once
This is a special case that both kubelet and the volume driver should support, because users might expect it. One Kubernetes mechanism to deploy pods like this is via pod affinity. However, strictly speaking the CSI spec does not allow this usage mode (see container-storage-interface/spec#150) and there is an on-going debate to enable it (see container-storage-interface/spec#178). Therefore this test gets skipped unless explicitly enabled for a driver. CSI drivers which create a block device for a remote volume in NodePublishVolume fail this test. They have to make the volume available in NodeStageVolume and then in NodePublishVolume merely do a bind mount (as for example in https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver/blob/master/pkg/gce-pd-csi-driver/node.go#L150).
1 parent ca42cf4 commit 03d352f

File tree

3 files changed

+57
-2
lines changed

3 files changed

+57
-2
lines changed

test/e2e/storage/drivers/csi.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ var _ testsuites.SnapshottableTestDriver = &hostpathCSIDriver{}
8484
// InitHostPathCSIDriver returns hostpathCSIDriver that implements TestDriver interface
8585
func InitHostPathCSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
8686
return initHostPathCSIDriver("csi-hostpath", config,
87-
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapDataSource: true},
87+
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapDataSource: true, testsuites.CapMultiPODs: true},
8888
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
8989
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
9090
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
@@ -259,7 +259,7 @@ func (m *mockCSIDriver) CleanupDriver() {
259259
// InitHostPathV0CSIDriver returns a variant of hostpathCSIDriver with different manifests.
260260
func InitHostPathV0CSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
261261
return initHostPathCSIDriver("csi-hostpath-v0", config,
262-
map[testsuites.Capability]bool{testsuites.CapPersistence: true},
262+
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapMultiPODs: true},
263263
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
264264
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
265265
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",

test/e2e/storage/testsuites/provisioning.go

+48
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ package testsuites
1818

1919
import (
2020
"fmt"
21+
"sync"
2122
"time"
2223

2324
. "github.com/onsi/ginkgo"
@@ -245,6 +246,50 @@ func testProvisioning(input *provisioningTestInput) {
245246
}
246247
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
247248
})
249+
250+
It("should allow concurrent writes on the same node", func() {
251+
if !input.dInfo.Capabilities[CapMultiPODs] {
252+
framework.Skipf("Driver %q does not support multiple concurrent pods - skipping", input.dInfo.Name)
253+
}
254+
input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
255+
// We start two pods concurrently on the same node,
256+
// using the same PVC. Both wait for other to create a
257+
// file before returning. The pods are forced onto the
258+
// same node via pod affinity.
259+
wg := sync.WaitGroup{}
260+
wg.Add(2)
261+
firstPodName := "pvc-tester-first"
262+
secondPodName := "pvc-tester-second"
263+
run := func(podName, command string) {
264+
defer GinkgoRecover()
265+
defer wg.Done()
266+
node := NodeSelection{
267+
Name: input.nodeName,
268+
}
269+
if podName == secondPodName {
270+
node.Affinity = &v1.Affinity{
271+
PodAffinity: &v1.PodAffinity{
272+
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
273+
{LabelSelector: &metav1.LabelSelector{
274+
MatchLabels: map[string]string{
275+
// Set by RunInPodWithVolume.
276+
"app": firstPodName,
277+
},
278+
},
279+
TopologyKey: "kubernetes.io/hostname",
280+
},
281+
},
282+
},
283+
}
284+
}
285+
RunInPodWithVolume(input.cs, claim.Namespace, claim.Name, podName, command, node)
286+
}
287+
go run(firstPodName, "touch /mnt/test/first && while ! [ -f /mnt/test/second ]; do sleep 1; done")
288+
go run(secondPodName, "touch /mnt/test/second && while ! [ -f /mnt/test/first ]; do sleep 1; done")
289+
wg.Wait()
290+
}
291+
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
292+
})
248293
}
249294

250295
// TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest and storageClass
@@ -561,6 +606,9 @@ func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command
561606
},
562607
ObjectMeta: metav1.ObjectMeta{
563608
GenerateName: podName + "-",
609+
Labels: map[string]string{
610+
"app": podName,
611+
},
564612
},
565613
Spec: v1.PodSpec{
566614
NodeName: node.Name,

test/e2e/storage/testsuites/testdriver.go

+7
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,13 @@ const (
9797
CapFsGroup Capability = "fsGroup" // volume ownership via fsGroup
9898
CapExec Capability = "exec" // exec a file in the volume
9999
CapDataSource Capability = "dataSource" // support populate data from snapshot
100+
101+
// multiple pods on a node can use the same volume concurrently;
102+
// for CSI, see:
103+
// - https://github.com/container-storage-interface/spec/pull/150
104+
// - https://github.com/container-storage-interface/spec/issues/178
105+
// - NodeStageVolume in the spec
106+
CapMultiPODs Capability = "multipods"
100107
)
101108

102109
// DriverInfo represents a combination of parameters to be used in implementation of TestDriver

0 commit comments

Comments
 (0)