@@ -123,10 +123,10 @@ func makePodWithPIDStats(name string, priority int32, processCount uint64) (*v1.
123
123
return pod , podStats
124
124
}
125
125
126
- func makePodWithDiskStats (name string , priority int32 , requests v1.ResourceList , limits v1.ResourceList , rootFsUsed , logsUsed , perLocalVolumeUsed string ) (* v1.Pod , statsapi.PodStats ) {
126
+ func makePodWithDiskStats (name string , priority int32 , requests v1.ResourceList , limits v1.ResourceList , rootFsUsed , logsUsed , perLocalVolumeUsed string , volumes []v1. Volume ) (* v1.Pod , statsapi.PodStats ) {
127
127
pod := newPod (name , priority , []v1.Container {
128
128
newContainer (name , requests , limits ),
129
- }, nil )
129
+ }, volumes )
130
130
podStats := newPodDiskStats (pod , parseQuantity (rootFsUsed ), parseQuantity (logsUsed ), parseQuantity (perLocalVolumeUsed ))
131
131
return pod , podStats
132
132
}
@@ -505,7 +505,7 @@ func TestDiskPressureNodeFs_VerifyPodStatus(t *testing.T) {
505
505
Quantity : quantityMustParse ("2Gi" ),
506
506
},
507
507
},
508
- evictionMessage : "The node was low on resource: ephemeral-storage. Threshold quantity: 2Gi, available: 1536Mi. " ,
508
+ evictionMessage : "The node was low on resource: ephemeral-storage. Threshold quantity: 2Gi, available: 1536Mi. Container above-requests was using 700Mi, request is 100Mi, has larger consumption of ephemeral-storage. " ,
509
509
podToMakes : []podToMake {
510
510
{name : "below-requests" , requests : newResourceList ("" , "" , "1Gi" ), limits : newResourceList ("" , "" , "1Gi" ), rootFsUsed : "900Mi" },
511
511
{name : "above-requests" , requests : newResourceList ("" , "" , "100Mi" ), limits : newResourceList ("" , "" , "1Gi" ), rootFsUsed : "700Mi" },
@@ -516,7 +516,7 @@ func TestDiskPressureNodeFs_VerifyPodStatus(t *testing.T) {
516
516
nodeFsStats : "1Gi" ,
517
517
imageFsStats : "10Gi" ,
518
518
containerFsStats : "10Gi" ,
519
- evictionMessage : "The node was low on resource: ephemeral-storage. Threshold quantity: 50Gi, available: 10Gi. " ,
519
+ evictionMessage : "The node was low on resource: ephemeral-storage. Threshold quantity: 50Gi, available: 10Gi. Container above-requests was using 80Gi, request is 50Gi, has larger consumption of ephemeral-storage. " ,
520
520
thresholdToMonitor : evictionapi.Threshold {
521
521
Signal : evictionapi .SignalImageFsAvailable ,
522
522
Operator : evictionapi .OpLessThan ,
@@ -537,7 +537,7 @@ func TestDiskPressureNodeFs_VerifyPodStatus(t *testing.T) {
537
537
nodeFsStats : "1Gi" ,
538
538
imageFsStats : "100Gi" ,
539
539
containerFsStats : "10Gi" ,
540
- evictionMessage : "The node was low on resource: ephemeral-storage. Threshold quantity: 50Gi, available: 10Gi. " ,
540
+ evictionMessage : "The node was low on resource: ephemeral-storage. Threshold quantity: 50Gi, available: 10Gi.Container above-requests was using 80Gi, request is 50Gi, has larger consumption of ephemeral-storage. " ,
541
541
thresholdToMonitor : evictionapi.Threshold {
542
542
Signal : evictionapi .SignalContainerFsAvailable ,
543
543
Operator : evictionapi .OpLessThan ,
@@ -557,7 +557,7 @@ func TestDiskPressureNodeFs_VerifyPodStatus(t *testing.T) {
557
557
nodeFsStats : "10Gi" ,
558
558
imageFsStats : "100Gi" ,
559
559
containerFsStats : "10Gi" ,
560
- evictionMessage : "The node was low on resource: ephemeral-storage. Threshold quantity: 50Gi, available: 10Gi. " ,
560
+ evictionMessage : "The node was low on resource: ephemeral-storage. Threshold quantity: 50Gi, available: 10Gi. Container above-requests was using 80Gi, request is 50Gi, has larger consumption of ephemeral-storage. " ,
561
561
thresholdToMonitor : evictionapi.Threshold {
562
562
Signal : evictionapi .SignalNodeFsAvailable ,
563
563
Operator : evictionapi .OpLessThan ,
@@ -588,7 +588,7 @@ func TestDiskPressureNodeFs_VerifyPodStatus(t *testing.T) {
588
588
pods := []* v1.Pod {}
589
589
podStats := map [* v1.Pod ]statsapi.PodStats {}
590
590
for _ , podToMake := range podsToMake {
591
- pod , podStat := podMaker (podToMake .name , podToMake .priority , podToMake .requests , podToMake .limits , podToMake .rootFsUsed , podToMake .logsFsUsed , podToMake .perLocalVolumeUsed )
591
+ pod , podStat := podMaker (podToMake .name , podToMake .priority , podToMake .requests , podToMake .limits , podToMake .rootFsUsed , podToMake .logsFsUsed , podToMake .perLocalVolumeUsed , nil )
592
592
pods = append (pods , pod )
593
593
podStats [pod ] = podStat
594
594
}
@@ -835,8 +835,8 @@ func TestMemoryPressure(t *testing.T) {
835
835
t .Errorf ("Manager chose to kill pod: %v, but should have chosen %v" , podKiller .pod .Name , podToEvict .Name )
836
836
}
837
837
observedGracePeriod = * podKiller .gracePeriodOverride
838
- if observedGracePeriod != int64 (0 ) {
839
- t .Errorf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 0 , observedGracePeriod )
838
+ if observedGracePeriod != int64 (1 ) {
839
+ t .Errorf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 1 , observedGracePeriod )
840
840
}
841
841
842
842
// the best-effort pod should not admit, burstable should
@@ -1106,8 +1106,8 @@ func TestPIDPressure(t *testing.T) {
1106
1106
t .Errorf ("Manager chose to kill pod but should have had a grace period override." )
1107
1107
}
1108
1108
observedGracePeriod = * podKiller .gracePeriodOverride
1109
- if observedGracePeriod != int64 (0 ) {
1110
- t .Errorf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 0 , observedGracePeriod )
1109
+ if observedGracePeriod != int64 (1 ) {
1110
+ t .Errorf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 1 , observedGracePeriod )
1111
1111
}
1112
1112
1113
1113
// try to admit our pod (should fail)
@@ -1336,7 +1336,7 @@ func TestDiskPressureNodeFs(t *testing.T) {
1336
1336
pods := []* v1.Pod {}
1337
1337
podStats := map [* v1.Pod ]statsapi.PodStats {}
1338
1338
for _ , podToMake := range podsToMake {
1339
- pod , podStat := podMaker (podToMake .name , podToMake .priority , podToMake .requests , podToMake .limits , podToMake .rootFsUsed , podToMake .logsFsUsed , podToMake .perLocalVolumeUsed )
1339
+ pod , podStat := podMaker (podToMake .name , podToMake .priority , podToMake .requests , podToMake .limits , podToMake .rootFsUsed , podToMake .logsFsUsed , podToMake .perLocalVolumeUsed , nil )
1340
1340
pods = append (pods , pod )
1341
1341
podStats [pod ] = podStat
1342
1342
}
@@ -1379,7 +1379,7 @@ func TestDiskPressureNodeFs(t *testing.T) {
1379
1379
}
1380
1380
1381
1381
// create a best effort pod to test admission
1382
- podToAdmit , _ := podMaker ("pod-to-admit" , defaultPriority , newResourceList ("" , "" , "" ), newResourceList ("" , "" , "" ), "0Gi" , "0Gi" , "0Gi" )
1382
+ podToAdmit , _ := podMaker ("pod-to-admit" , defaultPriority , newResourceList ("" , "" , "" ), newResourceList ("" , "" , "" ), "0Gi" , "0Gi" , "0Gi" , nil )
1383
1383
1384
1384
// synchronize
1385
1385
_ , err := manager .synchronize (diskInfoProvider , activePodsFunc )
@@ -1494,8 +1494,8 @@ func TestDiskPressureNodeFs(t *testing.T) {
1494
1494
t .Fatalf ("Manager chose to kill pod: %v, but should have chosen %v" , podKiller .pod .Name , podToEvict .Name )
1495
1495
}
1496
1496
observedGracePeriod = * podKiller .gracePeriodOverride
1497
- if observedGracePeriod != int64 (0 ) {
1498
- t .Fatalf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 0 , observedGracePeriod )
1497
+ if observedGracePeriod != int64 (1 ) {
1498
+ t .Fatalf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 1 , observedGracePeriod )
1499
1499
}
1500
1500
1501
1501
// try to admit our pod (should fail)
@@ -1644,8 +1644,8 @@ func TestMinReclaim(t *testing.T) {
1644
1644
t .Errorf ("Manager chose to kill pod: %v, but should have chosen %v" , podKiller .pod .Name , podToEvict .Name )
1645
1645
}
1646
1646
observedGracePeriod := * podKiller .gracePeriodOverride
1647
- if observedGracePeriod != int64 (0 ) {
1648
- t .Errorf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 0 , observedGracePeriod )
1647
+ if observedGracePeriod != int64 (1 ) {
1648
+ t .Errorf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 1 , observedGracePeriod )
1649
1649
}
1650
1650
1651
1651
// reduce memory pressure, but not below the min-reclaim amount
@@ -1668,8 +1668,8 @@ func TestMinReclaim(t *testing.T) {
1668
1668
t .Errorf ("Manager chose to kill pod: %v, but should have chosen %v" , podKiller .pod .Name , podToEvict .Name )
1669
1669
}
1670
1670
observedGracePeriod = * podKiller .gracePeriodOverride
1671
- if observedGracePeriod != int64 (0 ) {
1672
- t .Errorf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 0 , observedGracePeriod )
1671
+ if observedGracePeriod != int64 (1 ) {
1672
+ t .Errorf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 1 , observedGracePeriod )
1673
1673
}
1674
1674
1675
1675
// reduce memory pressure and ensure the min-reclaim amount
@@ -1858,7 +1858,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
1858
1858
pods := []* v1.Pod {}
1859
1859
podStats := map [* v1.Pod ]statsapi.PodStats {}
1860
1860
for _ , podToMake := range podsToMake {
1861
- pod , podStat := podMaker (podToMake .name , podToMake .priority , podToMake .requests , podToMake .limits , podToMake .rootFsUsed , podToMake .logsFsUsed , podToMake .perLocalVolumeUsed )
1861
+ pod , podStat := podMaker (podToMake .name , podToMake .priority , podToMake .requests , podToMake .limits , podToMake .rootFsUsed , podToMake .logsFsUsed , podToMake .perLocalVolumeUsed , nil )
1862
1862
pods = append (pods , pod )
1863
1863
podStats [pod ] = podStat
1864
1864
}
@@ -2060,8 +2060,8 @@ func TestNodeReclaimFuncs(t *testing.T) {
2060
2060
t .Fatalf ("Manager chose to kill pod: %v, but should have chosen %v" , podKiller .pod .Name , podToEvict .Name )
2061
2061
}
2062
2062
observedGracePeriod := * podKiller .gracePeriodOverride
2063
- if observedGracePeriod != int64 (0 ) {
2064
- t .Fatalf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 0 , observedGracePeriod )
2063
+ if observedGracePeriod != int64 (1 ) {
2064
+ t .Fatalf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 1 , observedGracePeriod )
2065
2065
}
2066
2066
2067
2067
// reduce disk pressure
@@ -2458,8 +2458,8 @@ func TestInodePressureFsInodes(t *testing.T) {
2458
2458
t .Fatalf ("Manager chose to kill pod: %v, but should have chosen %v" , podKiller .pod .Name , podToEvict .Name )
2459
2459
}
2460
2460
observedGracePeriod = * podKiller .gracePeriodOverride
2461
- if observedGracePeriod != int64 (0 ) {
2462
- t .Fatalf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 0 , observedGracePeriod )
2461
+ if observedGracePeriod != int64 (1 ) {
2462
+ t .Fatalf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 1 , observedGracePeriod )
2463
2463
}
2464
2464
2465
2465
// try to admit our pod (should fail)
@@ -2666,6 +2666,111 @@ func TestStaticCriticalPodsAreNotEvicted(t *testing.T) {
2666
2666
}
2667
2667
}
2668
2668
2669
+ func TestStorageLimitEvictions (t * testing.T ) {
2670
+ volumeSizeLimit := resource .MustParse ("1Gi" )
2671
+
2672
+ testCases := map [string ]struct {
2673
+ pod podToMake
2674
+ volumes []v1.Volume
2675
+ }{
2676
+ "eviction due to rootfs above limit" : {
2677
+ pod : podToMake {name : "rootfs-above-limits" , priority : defaultPriority , requests : newResourceList ("" , "" , "1Gi" ), limits : newResourceList ("" , "" , "1Gi" ), rootFsUsed : "2Gi" },
2678
+ },
2679
+ "eviction due to logsfs above limit" : {
2680
+ pod : podToMake {name : "logsfs-above-limits" , priority : defaultPriority , requests : newResourceList ("" , "" , "1Gi" ), limits : newResourceList ("" , "" , "1Gi" ), logsFsUsed : "2Gi" },
2681
+ },
2682
+ "eviction due to local volume above limit" : {
2683
+ pod : podToMake {name : "localvolume-above-limits" , priority : defaultPriority , requests : newResourceList ("" , "" , "" ), limits : newResourceList ("" , "" , "" ), perLocalVolumeUsed : "2Gi" },
2684
+ volumes : []v1.Volume {{
2685
+ Name : "emptyDirVolume" ,
2686
+ VolumeSource : v1.VolumeSource {
2687
+ EmptyDir : & v1.EmptyDirVolumeSource {
2688
+ SizeLimit : & volumeSizeLimit ,
2689
+ },
2690
+ },
2691
+ }},
2692
+ },
2693
+ }
2694
+ for name , tc := range testCases {
2695
+ t .Run (name , func (t * testing.T ) {
2696
+ podMaker := makePodWithDiskStats
2697
+ summaryStatsMaker := makeDiskStats
2698
+ podsToMake := []podToMake {
2699
+ tc .pod ,
2700
+ }
2701
+ pods := []* v1.Pod {}
2702
+ podStats := map [* v1.Pod ]statsapi.PodStats {}
2703
+ for _ , podToMake := range podsToMake {
2704
+ pod , podStat := podMaker (podToMake .name , podToMake .priority , podToMake .requests , podToMake .limits , podToMake .rootFsUsed , podToMake .logsFsUsed , podToMake .perLocalVolumeUsed , tc .volumes )
2705
+ pods = append (pods , pod )
2706
+ podStats [pod ] = podStat
2707
+ }
2708
+
2709
+ podToEvict := pods [0 ]
2710
+ activePodsFunc := func () []* v1.Pod {
2711
+ return pods
2712
+ }
2713
+
2714
+ fakeClock := testingclock .NewFakeClock (time .Now ())
2715
+ podKiller := & mockPodKiller {}
2716
+ diskInfoProvider := & mockDiskInfoProvider {dedicatedImageFs : ptr .To (false )}
2717
+ diskGC := & mockDiskGC {err : nil }
2718
+ nodeRef := & v1.ObjectReference {
2719
+ Kind : "Node" , Name : "test" , UID : types .UID ("test" ), Namespace : "" ,
2720
+ }
2721
+
2722
+ config := Config {
2723
+ MaxPodGracePeriodSeconds : 5 ,
2724
+ PressureTransitionPeriod : time .Minute * 5 ,
2725
+ Thresholds : []evictionapi.Threshold {
2726
+ {
2727
+ Signal : evictionapi .SignalNodeFsAvailable ,
2728
+ Operator : evictionapi .OpLessThan ,
2729
+ Value : evictionapi.ThresholdValue {
2730
+ Quantity : quantityMustParse ("1Gi" ),
2731
+ },
2732
+ },
2733
+ },
2734
+ }
2735
+
2736
+ diskStat := diskStats {
2737
+ rootFsAvailableBytes : "200Mi" ,
2738
+ imageFsAvailableBytes : "200Mi" ,
2739
+ podStats : podStats ,
2740
+ }
2741
+ summaryProvider := & fakeSummaryProvider {result : summaryStatsMaker (diskStat )}
2742
+ manager := & managerImpl {
2743
+ clock : fakeClock ,
2744
+ killPodFunc : podKiller .killPodNow ,
2745
+ imageGC : diskGC ,
2746
+ containerGC : diskGC ,
2747
+ config : config ,
2748
+ recorder : & record.FakeRecorder {},
2749
+ summaryProvider : summaryProvider ,
2750
+ nodeRef : nodeRef ,
2751
+ nodeConditionsLastObservedAt : nodeConditionsObservedAt {},
2752
+ thresholdsFirstObservedAt : thresholdsObservedAt {},
2753
+ localStorageCapacityIsolation : true ,
2754
+ }
2755
+
2756
+ _ , err := manager .synchronize (diskInfoProvider , activePodsFunc )
2757
+ if err != nil {
2758
+ t .Fatalf ("Manager expects no error but got %v" , err )
2759
+ }
2760
+
2761
+ if podKiller .pod == nil {
2762
+ t .Fatalf ("Manager should have selected a pod for eviction" )
2763
+ }
2764
+ if podKiller .pod != podToEvict {
2765
+ t .Errorf ("Manager should have killed pod: %v, but instead killed: %v" , podToEvict .Name , podKiller .pod .Name )
2766
+ }
2767
+ if * podKiller .gracePeriodOverride != 1 {
2768
+ t .Errorf ("Manager should have evicted with gracePeriodOverride of 1, but used: %v" , * podKiller .gracePeriodOverride )
2769
+ }
2770
+ })
2771
+ }
2772
+ }
2773
+
2669
2774
// TestAllocatableMemoryPressure
2670
2775
func TestAllocatableMemoryPressure (t * testing.T ) {
2671
2776
podMaker := makePodWithMemoryStats
@@ -2767,8 +2872,8 @@ func TestAllocatableMemoryPressure(t *testing.T) {
2767
2872
t .Errorf ("Manager chose to kill pod: %v, but should have chosen %v" , podKiller .pod .Name , podToEvict .Name )
2768
2873
}
2769
2874
observedGracePeriod := * podKiller .gracePeriodOverride
2770
- if observedGracePeriod != int64 (0 ) {
2771
- t .Errorf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 0 , observedGracePeriod )
2875
+ if observedGracePeriod != int64 (1 ) {
2876
+ t .Errorf ("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d" , 1 , observedGracePeriod )
2772
2877
}
2773
2878
// reset state
2774
2879
podKiller .pod = nil
0 commit comments