Skip to content

Commit 0dde7cf

Browse files
committed
Update test suite setup
1 parent a6420e8 commit 0dde7cf

File tree

5 files changed

+32
-28
lines changed

5 files changed

+32
-28
lines changed

cmd/gce-pd-csi-driver/main.go

+10-11
Original file line numberDiff line numberDiff line change
@@ -74,14 +74,13 @@ var (
7474
maxConcurrentFormat = flag.Int("max-concurrent-format", 1, "The maximum number of concurrent format exec calls")
7575
concurrentFormatTimeout = flag.Duration("concurrent-format-timeout", 1*time.Minute, "The maximum duration of a format operation before its concurrency token is released")
7676

77-
maxConcurrentFormatAndMount = flag.Int("max-concurrent-format-and-mount", 1, "If set then format and mount operations are serialized on each node. This is stronger than max-concurrent-format as it includes fsck and other mount operations")
78-
formatAndMountTimeout = flag.Duration("format-and-mount-timeout", 1*time.Minute, "The maximum duration of a format and mount operation before another such operation will be started. Used only if --serialize-format-and-mount")
79-
fallbackRequisiteZonesFlag = flag.String("fallback-requisite-zones", "", "Comma separated list of requisite zones that will be used if there are not sufficient zones present in requisite topologies when provisioning a disk")
80-
enableStoragePoolsFlag = flag.Bool("enable-storage-pools", false, "If set to true, the CSI Driver will allow volumes to be provisioned in Storage Pools")
81-
enableHdHAFlag = flag.Bool("allow-hdha-provisioning", false, "If set to true, will allow the driver to provision Hyperdisk-balanced High Availability disks")
82-
enableControllerDataCacheFlag = flag.Bool("enable-controller-data-cache", false, "If set to true, the CSI Driver will allow volumes to be provisioned with data cache configuration")
83-
enableNodeDataCacheFlag = flag.Bool("enable-node-data-cache", false, "If set to true, the CSI Driver will allow volumes to be provisioned with data cache configuration")
84-
nodeName = flag.String("node-name", "", "The node this driver is running on")
77+
maxConcurrentFormatAndMount = flag.Int("max-concurrent-format-and-mount", 1, "If set then format and mount operations are serialized on each node. This is stronger than max-concurrent-format as it includes fsck and other mount operations")
78+
formatAndMountTimeout = flag.Duration("format-and-mount-timeout", 1*time.Minute, "The maximum duration of a format and mount operation before another such operation will be started. Used only if --serialize-format-and-mount")
79+
fallbackRequisiteZonesFlag = flag.String("fallback-requisite-zones", "", "Comma separated list of requisite zones that will be used if there are not sufficient zones present in requisite topologies when provisioning a disk")
80+
enableStoragePoolsFlag = flag.Bool("enable-storage-pools", false, "If set to true, the CSI Driver will allow volumes to be provisioned in Storage Pools")
81+
enableHdHAFlag = flag.Bool("allow-hdha-provisioning", false, "If set to true, will allow the driver to provision Hyperdisk-balanced High Availability disks")
82+
enableDataCacheFlag = flag.Bool("enable-data-cache", false, "If set to true, the CSI Driver will allow volumes to be provisioned with data cache configuration")
83+
nodeName = flag.String("node-name", "", "The node this driver is running on")
8584

8685
multiZoneVolumeHandleDiskTypesFlag = flag.String("multi-zone-volume-handle-disk-types", "", "Comma separated list of allowed disk types that can use the multi-zone volumeHandle. Used only if --multi-zone-volume-handle-enable")
8786
multiZoneVolumeHandleEnableFlag = flag.Bool("multi-zone-volume-handle-enable", false, "If set to true, the multi-zone volumeHandle feature will be enabled")
@@ -234,7 +233,7 @@ func handle() {
234233
}
235234
initialBackoffDuration := time.Duration(*errorBackoffInitialDurationMs) * time.Millisecond
236235
maxBackoffDuration := time.Duration(*errorBackoffMaxDurationMs) * time.Millisecond
237-
controllerServer = driver.NewControllerServer(gceDriver, cloudProvider, initialBackoffDuration, maxBackoffDuration, fallbackRequisiteZones, *enableStoragePoolsFlag, *enableControllerDataCacheFlag, multiZoneVolumeHandleConfig, listVolumesConfig, provisionableDisksConfig, *enableHdHAFlag)
236+
controllerServer = driver.NewControllerServer(gceDriver, cloudProvider, initialBackoffDuration, maxBackoffDuration, fallbackRequisiteZones, *enableStoragePoolsFlag, *enableDataCacheFlag, multiZoneVolumeHandleConfig, listVolumesConfig, provisionableDisksConfig, *enableHdHAFlag)
238237
} else if *cloudConfigFilePath != "" {
239238
klog.Warningf("controller service is disabled but cloud config given - it has no effect")
240239
}
@@ -255,15 +254,15 @@ func handle() {
255254
nsArgs := driver.NodeServerArgs{
256255
EnableDeviceInUseCheck: *enableDeviceInUseCheck,
257256
DeviceInUseTimeout: *deviceInUseTimeout,
258-
EnableDataCache: *enableNodeDataCacheFlag,
257+
EnableDataCache: *enableDataCacheFlag,
259258
}
260259
nodeServer = driver.NewNodeServer(gceDriver, mounter, deviceUtils, meta, statter, nsArgs)
261260
if *maxConcurrentFormatAndMount > 0 {
262261
nodeServer = nodeServer.WithSerializedFormatAndMount(*formatAndMountTimeout, *maxConcurrentFormatAndMount)
263262
}
264263
}
265264

266-
if *enableNodeDataCacheFlag {
265+
if *enableDataCacheFlag {
267266
if nodeName == nil || *nodeName == "" {
268267
klog.Errorf("Data cache enabled, but --node-name not passed")
269268
}

deploy/kubernetes/base/node_linux/node.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ spec:
4646
- "--v=5"
4747
- "--endpoint=unix:/csi/csi.sock"
4848
- "--run-controller-service=false"
49-
- "--enable-node-data-cache"
49+
- "--enable-data-cache"
5050
- "--node-name=$(KUBE_NODE_NAME)"
5151
securityContext:
5252
privileged: true

test/e2e/tests/setup_e2e_test.go

+20-13
Original file line numberDiff line numberDiff line change
@@ -81,8 +81,6 @@ var _ = BeforeSuite(func() {
8181
defer close(hdtcc)
8282

8383
zones := strings.Split(*zones, ",")
84-
// Create 2 instances for each zone as we need 2 instances each zone for certain test cases
85-
8684
rand.Seed(time.Now().UnixNano())
8785

8886
computeService, err = remote.GetComputeClient()
@@ -109,27 +107,31 @@ var _ = BeforeSuite(func() {
109107

110108
numberOfInstancesPerZone := 2
111109

112-
setupContext := func(zones []string, randInt int) {
113-
for _, zone := range zones {
114-
go func(curZone string) {
110+
setupContext := func(zone string) {
111+
// Create 2 instances for each zone as we need 2 instances each zone for certain test cases
112+
for j := 0; j < numberOfInstancesPerZone; j++ {
113+
go func(curZone string, randInt int) {
115114
defer GinkgoRecover()
116115
tcc <- NewDefaultTestContext(curZone, strconv.Itoa(randInt))
117-
}(zone)
118-
go func(curZone string) {
119-
defer GinkgoRecover()
120-
hdtcc <- NewTestContext(curZone, *hdMinCpuPlatform, *hdMachineType, strconv.Itoa(randInt))
121-
}(zone)
116+
}(zone, j)
122117
}
118+
go func(curZone string) {
119+
defer GinkgoRecover()
120+
hdtcc <- NewTestContext(curZone, *hdMinCpuPlatform, *hdMachineType, "0")
121+
}(zone)
123122
}
124-
for j := 0; j < numberOfInstancesPerZone; j++ {
125-
setupContext(zones, j)
123+
124+
for _, zone := range zones {
125+
setupContext(zone)
126126
}
127127

128128
for i := 0; i < len(zones)*numberOfInstancesPerZone; i++ {
129129
tc := <-tcc
130130
testContexts = append(testContexts, tc)
131131
klog.Infof("Added TestContext for node %s", tc.Instance.GetName())
132-
tc = <-hdtcc
132+
}
133+
for i := 0; i < len(zones); i++ {
134+
tc := <-hdtcc
133135
hyperdiskTestContexts = append(hyperdiskTestContexts, tc)
134136
klog.Infof("Added TestContext for node %s", tc.Instance.GetName())
135137
}
@@ -178,6 +180,11 @@ func NewTestContext(zone, minCpuPlatform, machineType string, instanceNumber str
178180
ComputeService: computeService,
179181
LocalSSDCount: localSSDCount,
180182
}
183+
184+
if machineType == *hdMachineType {
185+
// Machine type is defaulted to c3-standard-2 which doesn't support LSSD and we don't need LSSD for HdHA test context
186+
instanceConfig.LocalSSDCount = 0
187+
}
181188
i, err := remote.SetupInstance(instanceConfig)
182189
if err != nil {
183190
klog.Fatalf("Failed to setup instance %v: %v", nodeID, err)

test/e2e/utils/utils.go

+1-2
Original file line numberDiff line numberDiff line change
@@ -71,8 +71,7 @@ func GCEClientAndDriverSetup(instance *remote.InstanceInfo, driverConfig DriverC
7171
"--allow-hdha-provisioning",
7272
"--device-in-use-timeout=10s", // Set lower than the usual value to expedite tests
7373
fmt.Sprintf("--fallback-requisite-zones=%s", strings.Join(driverConfig.Zones, ",")),
74-
"--enable-controller-data-cache",
75-
"--enable-node-data-cache",
74+
"--enable-data-cache",
7675
fmt.Sprintf("--node-name=%s", utilcommon.TestNode),
7776
}
7877
extra_flags = append(extra_flags, fmt.Sprintf("--compute-endpoint=%s", driverConfig.ComputeEndpoint))

test/remote/instance.go

-1
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,6 @@ func (i *InstanceInfo) CreateOrGetInstance(localSSDCount int) error {
148148
EnableConfidentialCompute: true,
149149
}
150150
}
151-
klog.Infof("=======Adding LocalSSD %v=============", localSSDCount)
152151

153152
localSSDConfig := &compute.AttachedDisk{
154153
Type: "SCRATCH",

0 commit comments

Comments
 (0)