|
1 | 1 | package integrationtests
|
2 | 2 |
|
3 | 3 | import (
|
| 4 | + "context" |
| 5 | + "fmt" |
| 6 | + "math/rand" |
| 7 | + "strconv" |
| 8 | + "strings" |
4 | 9 | "testing"
|
| 10 | + "time" |
| 11 | + |
| 12 | + "github.com/kubernetes-csi/csi-proxy/pkg/disk" |
| 13 | + diskapi "github.com/kubernetes-csi/csi-proxy/pkg/disk/api" |
| 14 | + "github.com/stretchr/testify/assert" |
| 15 | + "github.com/stretchr/testify/require" |
5 | 16 | )
|
6 | 17 |
|
| 18 | +// This test is meant to run on GCE where the page83 ID of the first disk contains |
| 19 | +// the host name |
| 20 | +// Skip on Github Actions as it is expected to fail |
| 21 | +func TestDisk(t *testing.T) { |
| 22 | + t.Run("ListDiskIDs,ListDiskLocations", func(t *testing.T) { |
| 23 | + // even though this test doesn't need the VHD API it failed in Github Actions |
| 24 | + // disk_v1beta3_test.go:30: |
| 25 | + // Error Trace: disk_v1beta3_test.go:30 |
| 26 | + // Error: Expected nil, but got: &status.statusError{state:impl.MessageState{NoUnkeyedLiterals:pragma.NoUnkeyedLiterals{}, DoNotCompare:pragma.DoNotCompare{}, DoNotCopy:pragma.DoNotCopy{}, atomicMessageInfo:(*impl.MessageInfo)(nil)}, sizeCache:0, unknownFields:[]uint8(nil), Code:2, Message:"Could not get page83 ID: IOCTL_STORAGE_QUERY_PROPERTY failed: Incorrect function.", Details:[]*anypb.Any(nil)} |
| 27 | + // Test: TestDiskAPIGroup/v1beta3Tests/ListDiskIDs,ListDiskLocations |
| 28 | + skipTestOnCondition(t, isRunningOnGhActions()) |
| 29 | + |
| 30 | + client, err := disk.New(diskapi.New()) |
| 31 | + require.Nil(t, err) |
| 32 | + |
| 33 | + listRequest := &disk.ListDiskIDsRequest{} |
| 34 | + diskIDsResponse, err := client.ListDiskIDs(context.TODO(), listRequest) |
| 35 | + require.Nil(t, err) |
| 36 | + |
| 37 | + // example output for GCE (0 is ok, others are virtual disks) |
| 38 | + // diskIDs:{key:0 value:{page83:"Google persistent-disk-0" serial_number:" "}} |
| 39 | + // diskIDs:{key:1 value:{page83:"4d53465420202020328d59b360875845ac645473be8267bf"}} |
| 40 | + // diskIDs:{key:2 value:{page83:"4d534654202020208956a91dadfe3d48865f9b9bcbdb8d3e"}} |
| 41 | + // diskIDs:{key:3 value:{page83:"4d534654202020207a3d18d72787ee47bdc127cb4f06403a"}} |
| 42 | + t.Logf("diskIDsResponse=%v", diskIDsResponse) |
| 43 | + |
| 44 | + cmd := "hostname" |
| 45 | + hostname, err := runPowershellCmd(t, cmd) |
| 46 | + if err != nil { |
| 47 | + t.Errorf("Error: %v. Command: %s. Out: %s", err, cmd, hostname) |
| 48 | + } |
| 49 | + |
| 50 | + diskIDsMap := diskIDsResponse.DiskIDs |
| 51 | + if len(diskIDsMap) == 0 { |
| 52 | + t.Errorf("Expected to get at least one diskIDs, instead got diskIDsResponse.DiskIDs=%+v", diskIDsMap) |
| 53 | + } |
| 54 | + |
| 55 | + // some disks may have the field Page83, if it's a GCE Persistent disk |
| 56 | + // it'll have a nonempty SerialNumber |
| 57 | + // first disk is the VM disk (other disks might be VHD) |
| 58 | + for diskNumber, diskIDs := range diskIDsMap { |
| 59 | + if len(diskIDs.SerialNumber) > 0 { |
| 60 | + // the nvme disks don't have a Page83 number |
| 61 | + if strings.HasPrefix(diskIDs.SerialNumber, "nvme") { |
| 62 | + continue |
| 63 | + } |
| 64 | + page83 := diskIDs.Page83 |
| 65 | + if page83 == "" { |
| 66 | + t.Errorf("page83 field of diskNumber=%d should be defined, instead got diskIDs=%v", diskNumber, diskIDs) |
| 67 | + } |
| 68 | + } |
| 69 | + } |
| 70 | + |
| 71 | + listDiskLocationsRequest := &disk.ListDiskLocationsRequest{} |
| 72 | + listDiskLocationsResponse, err := client.ListDiskLocations(context.TODO(), listDiskLocationsRequest) |
| 73 | + require.Nil(t, err) |
| 74 | + t.Logf("listDiskLocationsResponse=%v", listDiskLocationsResponse) |
| 75 | + if len(listDiskLocationsResponse.DiskLocations) == 0 { |
| 76 | + t.Errorf("Expected to get at least one diskLocation, instead got DiskLocations=%+v", listDiskLocationsResponse.DiskLocations) |
| 77 | + } |
| 78 | + }) |
| 79 | + |
| 80 | + t.Run("Get/SetDiskState", func(t *testing.T) { |
| 81 | + skipTestOnCondition(t, isRunningOnGhActions()) |
| 82 | + |
| 83 | + client, err := disk.New(diskapi.New()) |
| 84 | + require.Nil(t, err) |
| 85 | + |
| 86 | + // initialize disk |
| 87 | + vhd, vhdCleanup := diskInit(t) |
| 88 | + defer vhdCleanup() |
| 89 | + |
| 90 | + // disk stats |
| 91 | + diskStatsRequest := &disk.GetDiskStatsRequest{ |
| 92 | + DiskNumber: vhd.DiskNumber, |
| 93 | + } |
| 94 | + diskStatsResponse, err := client.GetDiskStats(context.TODO(), diskStatsRequest) |
| 95 | + require.NoError(t, err) |
| 96 | + if !sizeIsAround(t, diskStatsResponse.TotalBytes, vhd.InitialSize) { |
| 97 | + t.Fatalf("DiskStats doesn't have the expected size, wanted (close to)=%d got=%d", vhd.InitialSize, diskStatsResponse.TotalBytes) |
| 98 | + } |
| 99 | + |
| 100 | + // Rescan |
| 101 | + _, err = client.Rescan(context.TODO(), &disk.RescanRequest{}) |
| 102 | + require.NoError(t, err) |
| 103 | + |
| 104 | + // change disk state |
| 105 | + out, err := runPowershellCmd(t, fmt.Sprintf("Get-Disk -Number %d | Set-Disk -IsOffline $true", vhd.DiskNumber)) |
| 106 | + require.NoError(t, err, "failed setting disk offline, out=%v", out) |
| 107 | + |
| 108 | + getReq := &disk.GetDiskStateRequest{DiskNumber: vhd.DiskNumber} |
| 109 | + getResp, err := client.GetDiskState(context.TODO(), getReq) |
| 110 | + |
| 111 | + if assert.NoError(t, err) { |
| 112 | + assert.False(t, getResp.IsOnline, "Expected disk to be offline") |
| 113 | + } |
| 114 | + |
| 115 | + setReq := &disk.SetDiskStateRequest{DiskNumber: vhd.DiskNumber, IsOnline: true} |
| 116 | + _, err = client.SetDiskState(context.TODO(), setReq) |
| 117 | + assert.NoError(t, err) |
| 118 | + |
| 119 | + out, err = runPowershellCmd(t, fmt.Sprintf("Get-Disk -Number %d | Select-Object -ExpandProperty IsOffline", vhd.DiskNumber)) |
| 120 | + assert.NoError(t, err) |
| 121 | + |
| 122 | + result, err := strconv.ParseBool(strings.TrimSpace(out)) |
| 123 | + assert.NoError(t, err) |
| 124 | + assert.False(t, result, "Expected disk to be online") |
| 125 | + |
| 126 | + getReq = &disk.GetDiskStateRequest{DiskNumber: vhd.DiskNumber} |
| 127 | + getResp, err = client.GetDiskState(context.TODO(), getReq) |
| 128 | + |
| 129 | + if assert.NoError(t, err) { |
| 130 | + assert.True(t, getResp.IsOnline, "Expected disk is online") |
| 131 | + } |
| 132 | + |
| 133 | + setReq = &disk.SetDiskStateRequest{DiskNumber: vhd.DiskNumber, IsOnline: false} |
| 134 | + _, err = client.SetDiskState(context.TODO(), setReq) |
| 135 | + assert.NoError(t, err) |
| 136 | + |
| 137 | + out, err = runPowershellCmd(t, fmt.Sprintf("Get-Disk -Number %d | Select-Object -ExpandProperty IsOffline", vhd.DiskNumber)) |
| 138 | + assert.NoError(t, err) |
| 139 | + |
| 140 | + result, err = strconv.ParseBool(strings.TrimSpace(out)) |
| 141 | + assert.NoError(t, err) |
| 142 | + assert.True(t, result, "Expected disk to be offline") |
| 143 | + }) |
| 144 | + |
| 145 | + t.Run("PartitionDisk", func(t *testing.T) { |
| 146 | + skipTestOnCondition(t, isRunningOnGhActions()) |
| 147 | + |
| 148 | + var err error |
| 149 | + client, err := disk.New(diskapi.New()) |
| 150 | + require.Nil(t, err) |
| 151 | + |
| 152 | + // initialize disk but don't partition it using `diskInit` |
| 153 | + s1 := rand.NewSource(time.Now().UTC().UnixNano()) |
| 154 | + r1 := rand.New(s1) |
| 155 | + |
| 156 | + testPluginPath := fmt.Sprintf("C:\\var\\lib\\kubelet\\plugins\\testplugin-%d.csi.io\\", r1.Intn(100)) |
| 157 | + mountPath := fmt.Sprintf("%smount-%d", testPluginPath, r1.Intn(100)) |
| 158 | + vhdxPath := fmt.Sprintf("%sdisk-%d.vhdx", testPluginPath, r1.Intn(100)) |
| 159 | + |
| 160 | + var cmd, out string |
| 161 | + const initialSize = 1 * 1024 * 1024 * 1024 |
| 162 | + |
| 163 | + cmd = fmt.Sprintf("mkdir %s", mountPath) |
| 164 | + if out, err = runPowershellCmd(t, cmd); err != nil { |
| 165 | + t.Fatalf("Error: %v. Command: %q. Out: %s", err, cmd, out) |
| 166 | + } |
| 167 | + cmd = fmt.Sprintf("New-VHD -Path %s -SizeBytes %d", vhdxPath, initialSize) |
| 168 | + if out, err = runPowershellCmd(t, cmd); err != nil { |
| 169 | + t.Fatalf("Error: %v. Command: %q. Out: %s.", err, cmd, out) |
| 170 | + } |
| 171 | + cmd = fmt.Sprintf("Mount-VHD -Path %s", vhdxPath) |
| 172 | + if out, err = runPowershellCmd(t, cmd); err != nil { |
| 173 | + t.Fatalf("Error: %v. Command: %q. Out: %s", err, cmd, out) |
| 174 | + } |
| 175 | + |
| 176 | + var diskNum uint64 |
| 177 | + var diskNumUnparsed string |
| 178 | + cmd = fmt.Sprintf("(Get-VHD -Path %s).DiskNumber", vhdxPath) |
| 179 | + if diskNumUnparsed, err = runPowershellCmd(t, cmd); err != nil { |
| 180 | + t.Fatalf("Error: %v. Command: %s", err, cmd) |
| 181 | + } |
| 182 | + if diskNum, err = strconv.ParseUint(strings.TrimRight(diskNumUnparsed, "\r\n"), 10, 32); err != nil { |
| 183 | + t.Fatalf("Error: %v", err) |
| 184 | + } |
| 185 | + |
| 186 | + // make disk partition request |
| 187 | + diskPartitionRequest := &disk.PartitionDiskRequest{ |
| 188 | + DiskNumber: uint32(diskNum), |
| 189 | + } |
| 190 | + _, err = client.PartitionDisk(context.TODO(), diskPartitionRequest) |
| 191 | + require.NoError(t, err) |
| 192 | + }) |
| 193 | +} |
| 194 | + |
7 | 195 | // This test is meant to run on GCE where the page83 ID of the first disk contains
|
8 | 196 | // the host name
|
9 | 197 | // Skip on Github Actions as it is expected to fail
|
|
0 commit comments