@@ -20,7 +20,6 @@ import (
20
20
"fmt"
21
21
"math"
22
22
"os"
23
- "path/filepath"
24
23
"sort"
25
24
"strconv"
26
25
@@ -35,17 +34,12 @@ import (
35
34
36
35
"github.com/container-storage-interface/spec/lib/go/csi"
37
36
utilexec "k8s.io/utils/exec"
38
- )
39
37
40
- const (
41
- deviceID = "deviceID"
38
+ "github.com/kubernetes-csi/csi-driver-host-path/pkg/state"
42
39
)
43
40
44
- type accessType int
45
-
46
41
const (
47
- mountAccess accessType = iota
48
- blockAccess
42
+ deviceID = "deviceID"
49
43
)
50
44
51
45
func (hp * hostPath ) CreateVolume (ctx context.Context , req * csi.CreateVolumeRequest ) (resp * csi.CreateVolumeResponse , finalErr error ) {
@@ -84,13 +78,13 @@ func (hp *hostPath) CreateVolume(ctx context.Context, req *csi.CreateVolumeReque
84
78
return nil , status .Error (codes .InvalidArgument , "cannot have both block and mount access type" )
85
79
}
86
80
87
- var requestedAccessType accessType
81
+ var requestedAccessType state. AccessType
88
82
89
83
if accessTypeBlock {
90
- requestedAccessType = blockAccess
84
+ requestedAccessType = state . BlockAccess
91
85
} else {
92
86
// Default to mount.
93
- requestedAccessType = mountAccess
87
+ requestedAccessType = state . MountAccess
94
88
}
95
89
96
90
// Lock before acting on global state. A production-quality
@@ -106,7 +100,7 @@ func (hp *hostPath) CreateVolume(ctx context.Context, req *csi.CreateVolumeReque
106
100
107
101
// Need to check for already existing volume name, and if found
108
102
// check for the requested capacity and already allocated capacity
109
- if exVol , err := hp .getVolumeByName (req .GetName ()); err == nil {
103
+ if exVol , err := hp .state . GetVolumeByName (req .GetName ()); err == nil {
110
104
// Since err is nil, it means the volume with the same name already exists
111
105
// need to check if the size of existing volume is the same as in new
112
106
// request
@@ -149,7 +143,7 @@ func (hp *hostPath) CreateVolume(ctx context.Context, req *csi.CreateVolumeReque
149
143
glog .V (4 ).Infof ("created volume %s at path %s" , vol .VolID , vol .VolPath )
150
144
151
145
if req .GetVolumeContentSource () != nil {
152
- path := getVolumePath (volumeID )
146
+ path := hp . getVolumePath (volumeID )
153
147
volumeSource := req .VolumeContentSource
154
148
switch volumeSource .Type .(type ) {
155
149
case * csi.VolumeContentSource_Snapshot :
@@ -203,7 +197,7 @@ func (hp *hostPath) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeReque
203
197
defer hp .mutex .Unlock ()
204
198
205
199
volId := req .GetVolumeId ()
206
- vol , err := hp .getVolumeByID (volId )
200
+ vol , err := hp .state . GetVolumeByID (volId )
207
201
if err != nil {
208
202
// Volume not found: might have already deleted
209
203
return & csi.DeleteVolumeResponse {}, nil
@@ -243,7 +237,7 @@ func (hp *hostPath) ValidateVolumeCapabilities(ctx context.Context, req *csi.Val
243
237
hp .mutex .Lock ()
244
238
defer hp .mutex .Unlock ()
245
239
246
- if _ , err := hp .getVolumeByID (req .GetVolumeId ()); err != nil {
240
+ if _ , err := hp .state . GetVolumeByID (req .GetVolumeId ()); err != nil {
247
241
return nil , err
248
242
}
249
243
@@ -287,7 +281,7 @@ func (hp *hostPath) ControllerPublishVolume(ctx context.Context, req *csi.Contro
287
281
hp .mutex .Lock ()
288
282
defer hp .mutex .Unlock ()
289
283
290
- vol , err := hp .getVolumeByID (req .VolumeId )
284
+ vol , err := hp .state . GetVolumeByID (req .VolumeId )
291
285
if err != nil {
292
286
return nil , status .Error (codes .NotFound , err .Error ())
293
287
}
@@ -311,8 +305,8 @@ func (hp *hostPath) ControllerPublishVolume(ctx context.Context, req *csi.Contro
311
305
312
306
vol .IsAttached = true
313
307
vol .ReadOnlyAttach = req .GetReadonly ()
314
- if err := hp .updateVolume ( vol . VolID , vol ); err != nil {
315
- return nil , status . Errorf ( codes . Internal , "failed to update volume %s: %v" , vol . VolID , err )
308
+ if err := hp .state . UpdateVolume ( vol ); err != nil {
309
+ return nil , err
316
310
}
317
311
318
312
return & csi.ControllerPublishVolumeResponse {
@@ -337,7 +331,7 @@ func (hp *hostPath) ControllerUnpublishVolume(ctx context.Context, req *csi.Cont
337
331
hp .mutex .Lock ()
338
332
defer hp .mutex .Unlock ()
339
333
340
- vol , err := hp .getVolumeByID (req .VolumeId )
334
+ vol , err := hp .state . GetVolumeByID (req .VolumeId )
341
335
if err != nil {
342
336
// Not an error: a non-existent volume is not published.
343
337
// See also https://github.com/kubernetes-csi/external-attacher/pull/165
@@ -351,7 +345,7 @@ func (hp *hostPath) ControllerUnpublishVolume(ctx context.Context, req *csi.Cont
351
345
}
352
346
353
347
vol .IsAttached = false
354
- if err := hp .updateVolume ( vol . VolID , vol ); err != nil {
348
+ if err := hp .state . UpdateVolume ( vol ); err != nil {
355
349
return nil , status .Errorf (codes .Internal , "could not update volume %s: %v" , vol .VolID , err )
356
350
}
357
351
@@ -399,15 +393,20 @@ func (hp *hostPath) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest
399
393
400
394
var (
401
395
startIdx , volumesLength , maxLength int64
402
- hpVolume hostPathVolume
396
+ hpVolume state. Volume
403
397
)
404
398
405
399
// Lock before acting on global state. A production-quality
406
400
// driver might use more fine-grained locking.
407
401
hp .mutex .Lock ()
408
402
defer hp .mutex .Unlock ()
409
403
410
- volumeIds := hp .getSortedVolumeIDs ()
404
+ // Sort by volume ID.
405
+ volumes := hp .state .GetVolumes ()
406
+ sort .Slice (volumes , func (i , j int ) bool {
407
+ return volumes [i ].VolID < volumes [j ].VolID
408
+ })
409
+
411
410
if req .StartingToken == "" {
412
411
req .StartingToken = "1"
413
412
}
@@ -417,16 +416,16 @@ func (hp *hostPath) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest
417
416
return nil , status .Error (codes .Aborted , "The type of startingToken should be integer" )
418
417
}
419
418
420
- volumesLength = int64 (len (volumeIds ))
419
+ volumesLength = int64 (len (volumes ))
421
420
maxLength = int64 (req .MaxEntries )
422
421
423
422
if maxLength > volumesLength || maxLength <= 0 {
424
423
maxLength = volumesLength
425
424
}
426
425
427
426
for index := startIdx - 1 ; index < volumesLength && index < maxLength ; index ++ {
428
- hpVolume = hp. volumes [volumeIds [ index ] ]
429
- healthy , msg := hp .doHealthCheckInControllerSide (volumeIds [ index ] )
427
+ hpVolume = volumes [index ]
428
+ healthy , msg := hp .doHealthCheckInControllerSide (hpVolume . VolID )
430
429
glog .V (3 ).Infof ("Healthy state: %s Volume: %t" , hpVolume .VolName , healthy )
431
430
volumeRes .Entries = append (volumeRes .Entries , & csi.ListVolumesResponse_Entry {
432
431
Volume : & csi.Volume {
@@ -453,7 +452,7 @@ func (hp *hostPath) ControllerGetVolume(ctx context.Context, req *csi.Controller
453
452
hp .mutex .Lock ()
454
453
defer hp .mutex .Unlock ()
455
454
456
- volume , err := hp .getVolumeByID (req .GetVolumeId ())
455
+ volume , err := hp .state . GetVolumeByID (req .GetVolumeId ())
457
456
if err != nil {
458
457
return nil , err
459
458
}
@@ -475,11 +474,6 @@ func (hp *hostPath) ControllerGetVolume(ctx context.Context, req *csi.Controller
475
474
}, nil
476
475
}
477
476
478
- // getSnapshotPath returns the full path to where the snapshot is stored
479
- func getSnapshotPath (snapshotID string ) string {
480
- return filepath .Join (dataRoot , fmt .Sprintf ("%s%s" , snapshotID , snapshotExt ))
481
- }
482
-
483
477
// CreateSnapshot uses tar command to create snapshot for hostpath volume. The tar command can quickly create
484
478
// archives of entire directories. The host image must have "tar" binaries in /bin, /usr/sbin, or /usr/bin.
485
479
func (hp * hostPath ) CreateSnapshot (ctx context.Context , req * csi.CreateSnapshotRequest ) (* csi.CreateSnapshotResponse , error ) {
@@ -503,7 +497,7 @@ func (hp *hostPath) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotR
503
497
504
498
// Need to check for already existing snapshot name, and if found check for the
505
499
// requested sourceVolumeId and sourceVolumeId of snapshot that has been created.
506
- if exSnap , err := hp .getSnapshotByName (req .GetName ()); err == nil {
500
+ if exSnap , err := hp .state . GetSnapshotByName (req .GetName ()); err == nil {
507
501
// Since err is nil, it means the snapshot with the same name already exists need
508
502
// to check if the sourceVolumeId of existing snapshot is the same as in new request.
509
503
if exSnap .VolID == req .GetSourceVolumeId () {
@@ -522,18 +516,18 @@ func (hp *hostPath) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotR
522
516
}
523
517
524
518
volumeID := req .GetSourceVolumeId ()
525
- hostPathVolume , err := hp .getVolumeByID (volumeID )
519
+ hostPathVolume , err := hp .state . GetVolumeByID (volumeID )
526
520
if err != nil {
527
521
return nil , err
528
522
}
529
523
530
524
snapshotID := uuid .NewUUID ().String ()
531
525
creationTime := ptypes .TimestampNow ()
532
526
volPath := hostPathVolume .VolPath
533
- file := getSnapshotPath (snapshotID )
527
+ file := hp . getSnapshotPath (snapshotID )
534
528
535
529
var cmd []string
536
- if hostPathVolume .VolAccessType == blockAccess {
530
+ if hostPathVolume .VolAccessType == state . BlockAccess {
537
531
glog .V (4 ).Infof ("Creating snapshot of Raw Block Mode Volume" )
538
532
cmd = []string {"cp" , volPath , file }
539
533
} else {
@@ -547,7 +541,7 @@ func (hp *hostPath) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotR
547
541
}
548
542
549
543
glog .V (4 ).Infof ("create volume snapshot %s" , file )
550
- snapshot := hostPathSnapshot {}
544
+ snapshot := state. Snapshot {}
551
545
snapshot .Name = req .GetName ()
552
546
snapshot .Id = snapshotID
553
547
snapshot .VolID = volumeID
@@ -556,8 +550,9 @@ func (hp *hostPath) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotR
556
550
snapshot .SizeBytes = hostPathVolume .VolSize
557
551
snapshot .ReadyToUse = true
558
552
559
- hp .snapshots [snapshotID ] = snapshot
560
-
553
+ if err := hp .state .UpdateSnapshot (snapshot ); err != nil {
554
+ return nil , err
555
+ }
561
556
return & csi.CreateSnapshotResponse {
562
557
Snapshot : & csi.Snapshot {
563
558
SnapshotId : snapshot .Id ,
@@ -587,9 +582,11 @@ func (hp *hostPath) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotR
587
582
defer hp .mutex .Unlock ()
588
583
589
584
glog .V (4 ).Infof ("deleting snapshot %s" , snapshotID )
590
- path := getSnapshotPath (snapshotID )
585
+ path := hp . getSnapshotPath (snapshotID )
591
586
os .RemoveAll (path )
592
- delete (hp .snapshots , snapshotID )
587
+ if err := hp .state .DeleteSnapshot (snapshotID ); err != nil {
588
+ return nil , err
589
+ }
593
590
return & csi.DeleteSnapshotResponse {}, nil
594
591
}
595
592
@@ -607,14 +604,14 @@ func (hp *hostPath) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsReq
607
604
// case 1: SnapshotId is not empty, return snapshots that match the snapshot id.
608
605
if len (req .GetSnapshotId ()) != 0 {
609
606
snapshotID := req .SnapshotId
610
- if snapshot , ok := hp .snapshots [ snapshotID ]; ok {
607
+ if snapshot , err := hp .state . GetSnapshotByID ( snapshotID ); err == nil {
611
608
return convertSnapshot (snapshot ), nil
612
609
}
613
610
}
614
611
615
612
// case 2: SourceVolumeId is not empty, return snapshots that match the source volume id.
616
613
if len (req .GetSourceVolumeId ()) != 0 {
617
- for _ , snapshot := range hp .snapshots {
614
+ for _ , snapshot := range hp .state . GetSnapshots () {
618
615
if snapshot .VolID == req .SourceVolumeId {
619
616
return convertSnapshot (snapshot ), nil
620
617
}
@@ -623,14 +620,12 @@ func (hp *hostPath) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsReq
623
620
624
621
var snapshots []csi.Snapshot
625
622
// case 3: no parameter is set, so we return all the snapshots.
626
- sortedKeys := make ([]string , 0 )
627
- for k := range hp .snapshots {
628
- sortedKeys = append (sortedKeys , k )
629
- }
630
- sort .Strings (sortedKeys )
623
+ hpSnapshots := hp .state .GetSnapshots ()
624
+ sort .Slice (hpSnapshots , func (i , j int ) bool {
625
+ return hpSnapshots [i ].Id < hpSnapshots [j ].Id
626
+ })
631
627
632
- for _ , key := range sortedKeys {
633
- snap := hp .snapshots [key ]
628
+ for _ , snap := range hpSnapshots {
634
629
snapshot := csi.Snapshot {
635
630
SnapshotId : snap .Id ,
636
631
SourceVolumeId : snap .VolID ,
@@ -725,15 +720,15 @@ func (hp *hostPath) ControllerExpandVolume(ctx context.Context, req *csi.Control
725
720
hp .mutex .Lock ()
726
721
defer hp .mutex .Unlock ()
727
722
728
- exVol , err := hp .getVolumeByID (volID )
723
+ exVol , err := hp .state . GetVolumeByID (volID )
729
724
if err != nil {
730
725
return nil , err
731
726
}
732
727
733
728
if exVol .VolSize < capacity {
734
729
exVol .VolSize = capacity
735
- if err := hp .updateVolume ( volID , exVol ); err != nil {
736
- return nil , fmt . Errorf ( "could not update volume %s: %w" , volID , err )
730
+ if err := hp .state . UpdateVolume ( exVol ); err != nil {
731
+ return nil , err
737
732
}
738
733
}
739
734
@@ -743,7 +738,7 @@ func (hp *hostPath) ControllerExpandVolume(ctx context.Context, req *csi.Control
743
738
}, nil
744
739
}
745
740
746
- func convertSnapshot (snap hostPathSnapshot ) * csi.ListSnapshotsResponse {
741
+ func convertSnapshot (snap state. Snapshot ) * csi.ListSnapshotsResponse {
747
742
entries := []* csi.ListSnapshotsResponse_Entry {
748
743
{
749
744
Snapshot : & csi.Snapshot {
0 commit comments