Skip to content

Commit f868289

Browse files
Merge pull request #2164 from openshift-kannon92/OCPBUGS-17373-release-417
[release-4.17]: OCPBUGS-46364: Fix grace period used for immediate evictions
2 parents 13aad92 + c0eea14 commit f868289

File tree

5 files changed

+204
-32
lines changed

5 files changed

+204
-32
lines changed

Diff for: pkg/kubelet/eviction/eviction_manager.go

+7-4
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,9 @@ const (
5656
signalEphemeralPodFsLimit string = "ephemeralpodfs.limit"
5757
// signalEmptyDirFsLimit is amount of storage available on filesystem requested by an emptyDir
5858
signalEmptyDirFsLimit string = "emptydirfs.limit"
59+
// immediateEvictionGracePeriodSeconds is how long we give pods to shut down when we
60+
// need them to evict quickly due to resource pressure
61+
immediateEvictionGracePeriodSeconds = 1
5962
)
6063

6164
// managerImpl implements Manager
@@ -405,7 +408,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act
405408
// we kill at most a single pod during each eviction interval
406409
for i := range activePods {
407410
pod := activePods[i]
408-
gracePeriodOverride := int64(0)
411+
gracePeriodOverride := int64(immediateEvictionGracePeriodSeconds)
409412
if !isHardEvictionThreshold(thresholdToReclaim) {
410413
gracePeriodOverride = m.config.MaxPodGracePeriodSeconds
411414
}
@@ -525,7 +528,7 @@ func (m *managerImpl) emptyDirLimitEviction(podStats statsapi.PodStats, pod *v1.
525528
used := podVolumeUsed[pod.Spec.Volumes[i].Name]
526529
if used != nil && size != nil && size.Sign() == 1 && used.Cmp(*size) > 0 {
527530
// the emptyDir usage exceeds the size limit, evict the pod
528-
if m.evictPod(pod, 0, fmt.Sprintf(emptyDirMessageFmt, pod.Spec.Volumes[i].Name, size.String()), nil, nil) {
531+
if m.evictPod(pod, immediateEvictionGracePeriodSeconds, fmt.Sprintf(emptyDirMessageFmt, pod.Spec.Volumes[i].Name, size.String()), nil, nil) {
529532
metrics.Evictions.WithLabelValues(signalEmptyDirFsLimit).Inc()
530533
return true
531534
}
@@ -553,7 +556,7 @@ func (m *managerImpl) podEphemeralStorageLimitEviction(podStats statsapi.PodStat
553556
if podEphemeralStorageTotalUsage.Cmp(podEphemeralStorageLimit) > 0 {
554557
// the total usage of pod exceeds the total size limit of containers, evict the pod
555558
message := fmt.Sprintf(podEphemeralStorageMessageFmt, podEphemeralStorageLimit.String())
556-
if m.evictPod(pod, 0, message, nil, nil) {
559+
if m.evictPod(pod, immediateEvictionGracePeriodSeconds, message, nil, nil) {
557560
metrics.Evictions.WithLabelValues(signalEphemeralPodFsLimit).Inc()
558561
return true
559562
}
@@ -579,7 +582,7 @@ func (m *managerImpl) containerEphemeralStorageLimitEviction(podStats statsapi.P
579582

580583
if ephemeralStorageThreshold, ok := thresholdsMap[containerStat.Name]; ok {
581584
if ephemeralStorageThreshold.Cmp(*containerUsed) < 0 {
582-
if m.evictPod(pod, 0, fmt.Sprintf(containerEphemeralStorageMessageFmt, containerStat.Name, ephemeralStorageThreshold.String()), nil, nil) {
585+
if m.evictPod(pod, immediateEvictionGracePeriodSeconds, fmt.Sprintf(containerEphemeralStorageMessageFmt, containerStat.Name, ephemeralStorageThreshold.String()), nil, nil) {
583586
metrics.Evictions.WithLabelValues(signalEphemeralContainerFsLimit).Inc()
584587
return true
585588
}

Diff for: pkg/kubelet/eviction/eviction_manager_test.go

+131-26
Original file line numberDiff line numberDiff line change
@@ -123,10 +123,10 @@ func makePodWithPIDStats(name string, priority int32, processCount uint64) (*v1.
123123
return pod, podStats
124124
}
125125

126-
func makePodWithDiskStats(name string, priority int32, requests v1.ResourceList, limits v1.ResourceList, rootFsUsed, logsUsed, perLocalVolumeUsed string) (*v1.Pod, statsapi.PodStats) {
126+
func makePodWithDiskStats(name string, priority int32, requests v1.ResourceList, limits v1.ResourceList, rootFsUsed, logsUsed, perLocalVolumeUsed string, volumes []v1.Volume) (*v1.Pod, statsapi.PodStats) {
127127
pod := newPod(name, priority, []v1.Container{
128128
newContainer(name, requests, limits),
129-
}, nil)
129+
}, volumes)
130130
podStats := newPodDiskStats(pod, parseQuantity(rootFsUsed), parseQuantity(logsUsed), parseQuantity(perLocalVolumeUsed))
131131
return pod, podStats
132132
}
@@ -505,7 +505,7 @@ func TestDiskPressureNodeFs_VerifyPodStatus(t *testing.T) {
505505
Quantity: quantityMustParse("2Gi"),
506506
},
507507
},
508-
evictionMessage: "The node was low on resource: ephemeral-storage. Threshold quantity: 2Gi, available: 1536Mi. ",
508+
evictionMessage: "The node was low on resource: ephemeral-storage. Threshold quantity: 2Gi, available: 1536Mi. Container above-requests was using 700Mi, request is 100Mi, has larger consumption of ephemeral-storage. ",
509509
podToMakes: []podToMake{
510510
{name: "below-requests", requests: newResourceList("", "", "1Gi"), limits: newResourceList("", "", "1Gi"), rootFsUsed: "900Mi"},
511511
{name: "above-requests", requests: newResourceList("", "", "100Mi"), limits: newResourceList("", "", "1Gi"), rootFsUsed: "700Mi"},
@@ -516,7 +516,7 @@ func TestDiskPressureNodeFs_VerifyPodStatus(t *testing.T) {
516516
nodeFsStats: "1Gi",
517517
imageFsStats: "10Gi",
518518
containerFsStats: "10Gi",
519-
evictionMessage: "The node was low on resource: ephemeral-storage. Threshold quantity: 50Gi, available: 10Gi. ",
519+
evictionMessage: "The node was low on resource: ephemeral-storage. Threshold quantity: 50Gi, available: 10Gi. Container above-requests was using 80Gi, request is 50Gi, has larger consumption of ephemeral-storage. ",
520520
thresholdToMonitor: evictionapi.Threshold{
521521
Signal: evictionapi.SignalImageFsAvailable,
522522
Operator: evictionapi.OpLessThan,
@@ -537,7 +537,7 @@ func TestDiskPressureNodeFs_VerifyPodStatus(t *testing.T) {
537537
nodeFsStats: "1Gi",
538538
imageFsStats: "100Gi",
539539
containerFsStats: "10Gi",
540-
evictionMessage: "The node was low on resource: ephemeral-storage. Threshold quantity: 50Gi, available: 10Gi. ",
540+
evictionMessage: "The node was low on resource: ephemeral-storage. Threshold quantity: 50Gi, available: 10Gi.Container above-requests was using 80Gi, request is 50Gi, has larger consumption of ephemeral-storage. ",
541541
thresholdToMonitor: evictionapi.Threshold{
542542
Signal: evictionapi.SignalContainerFsAvailable,
543543
Operator: evictionapi.OpLessThan,
@@ -557,7 +557,7 @@ func TestDiskPressureNodeFs_VerifyPodStatus(t *testing.T) {
557557
nodeFsStats: "10Gi",
558558
imageFsStats: "100Gi",
559559
containerFsStats: "10Gi",
560-
evictionMessage: "The node was low on resource: ephemeral-storage. Threshold quantity: 50Gi, available: 10Gi. ",
560+
evictionMessage: "The node was low on resource: ephemeral-storage. Threshold quantity: 50Gi, available: 10Gi. Container above-requests was using 80Gi, request is 50Gi, has larger consumption of ephemeral-storage. ",
561561
thresholdToMonitor: evictionapi.Threshold{
562562
Signal: evictionapi.SignalNodeFsAvailable,
563563
Operator: evictionapi.OpLessThan,
@@ -588,7 +588,7 @@ func TestDiskPressureNodeFs_VerifyPodStatus(t *testing.T) {
588588
pods := []*v1.Pod{}
589589
podStats := map[*v1.Pod]statsapi.PodStats{}
590590
for _, podToMake := range podsToMake {
591-
pod, podStat := podMaker(podToMake.name, podToMake.priority, podToMake.requests, podToMake.limits, podToMake.rootFsUsed, podToMake.logsFsUsed, podToMake.perLocalVolumeUsed)
591+
pod, podStat := podMaker(podToMake.name, podToMake.priority, podToMake.requests, podToMake.limits, podToMake.rootFsUsed, podToMake.logsFsUsed, podToMake.perLocalVolumeUsed, nil)
592592
pods = append(pods, pod)
593593
podStats[pod] = podStat
594594
}
@@ -835,8 +835,8 @@ func TestMemoryPressure(t *testing.T) {
835835
t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod.Name, podToEvict.Name)
836836
}
837837
observedGracePeriod = *podKiller.gracePeriodOverride
838-
if observedGracePeriod != int64(0) {
839-
t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod)
838+
if observedGracePeriod != int64(1) {
839+
t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 1, observedGracePeriod)
840840
}
841841

842842
// the best-effort pod should not admit, burstable should
@@ -1106,8 +1106,8 @@ func TestPIDPressure(t *testing.T) {
11061106
t.Errorf("Manager chose to kill pod but should have had a grace period override.")
11071107
}
11081108
observedGracePeriod = *podKiller.gracePeriodOverride
1109-
if observedGracePeriod != int64(0) {
1110-
t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod)
1109+
if observedGracePeriod != int64(1) {
1110+
t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 1, observedGracePeriod)
11111111
}
11121112

11131113
// try to admit our pod (should fail)
@@ -1336,7 +1336,7 @@ func TestDiskPressureNodeFs(t *testing.T) {
13361336
pods := []*v1.Pod{}
13371337
podStats := map[*v1.Pod]statsapi.PodStats{}
13381338
for _, podToMake := range podsToMake {
1339-
pod, podStat := podMaker(podToMake.name, podToMake.priority, podToMake.requests, podToMake.limits, podToMake.rootFsUsed, podToMake.logsFsUsed, podToMake.perLocalVolumeUsed)
1339+
pod, podStat := podMaker(podToMake.name, podToMake.priority, podToMake.requests, podToMake.limits, podToMake.rootFsUsed, podToMake.logsFsUsed, podToMake.perLocalVolumeUsed, nil)
13401340
pods = append(pods, pod)
13411341
podStats[pod] = podStat
13421342
}
@@ -1379,7 +1379,7 @@ func TestDiskPressureNodeFs(t *testing.T) {
13791379
}
13801380

13811381
// create a best effort pod to test admission
1382-
podToAdmit, _ := podMaker("pod-to-admit", defaultPriority, newResourceList("", "", ""), newResourceList("", "", ""), "0Gi", "0Gi", "0Gi")
1382+
podToAdmit, _ := podMaker("pod-to-admit", defaultPriority, newResourceList("", "", ""), newResourceList("", "", ""), "0Gi", "0Gi", "0Gi", nil)
13831383

13841384
// synchronize
13851385
_, err := manager.synchronize(diskInfoProvider, activePodsFunc)
@@ -1494,8 +1494,8 @@ func TestDiskPressureNodeFs(t *testing.T) {
14941494
t.Fatalf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod.Name, podToEvict.Name)
14951495
}
14961496
observedGracePeriod = *podKiller.gracePeriodOverride
1497-
if observedGracePeriod != int64(0) {
1498-
t.Fatalf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod)
1497+
if observedGracePeriod != int64(1) {
1498+
t.Fatalf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 1, observedGracePeriod)
14991499
}
15001500

15011501
// try to admit our pod (should fail)
@@ -1644,8 +1644,8 @@ func TestMinReclaim(t *testing.T) {
16441644
t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod.Name, podToEvict.Name)
16451645
}
16461646
observedGracePeriod := *podKiller.gracePeriodOverride
1647-
if observedGracePeriod != int64(0) {
1648-
t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod)
1647+
if observedGracePeriod != int64(1) {
1648+
t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 1, observedGracePeriod)
16491649
}
16501650

16511651
// reduce memory pressure, but not below the min-reclaim amount
@@ -1668,8 +1668,8 @@ func TestMinReclaim(t *testing.T) {
16681668
t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod.Name, podToEvict.Name)
16691669
}
16701670
observedGracePeriod = *podKiller.gracePeriodOverride
1671-
if observedGracePeriod != int64(0) {
1672-
t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod)
1671+
if observedGracePeriod != int64(1) {
1672+
t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 1, observedGracePeriod)
16731673
}
16741674

16751675
// reduce memory pressure and ensure the min-reclaim amount
@@ -1858,7 +1858,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
18581858
pods := []*v1.Pod{}
18591859
podStats := map[*v1.Pod]statsapi.PodStats{}
18601860
for _, podToMake := range podsToMake {
1861-
pod, podStat := podMaker(podToMake.name, podToMake.priority, podToMake.requests, podToMake.limits, podToMake.rootFsUsed, podToMake.logsFsUsed, podToMake.perLocalVolumeUsed)
1861+
pod, podStat := podMaker(podToMake.name, podToMake.priority, podToMake.requests, podToMake.limits, podToMake.rootFsUsed, podToMake.logsFsUsed, podToMake.perLocalVolumeUsed, nil)
18621862
pods = append(pods, pod)
18631863
podStats[pod] = podStat
18641864
}
@@ -2060,8 +2060,8 @@ func TestNodeReclaimFuncs(t *testing.T) {
20602060
t.Fatalf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod.Name, podToEvict.Name)
20612061
}
20622062
observedGracePeriod := *podKiller.gracePeriodOverride
2063-
if observedGracePeriod != int64(0) {
2064-
t.Fatalf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod)
2063+
if observedGracePeriod != int64(1) {
2064+
t.Fatalf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 1, observedGracePeriod)
20652065
}
20662066

20672067
// reduce disk pressure
@@ -2458,8 +2458,8 @@ func TestInodePressureFsInodes(t *testing.T) {
24582458
t.Fatalf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod.Name, podToEvict.Name)
24592459
}
24602460
observedGracePeriod = *podKiller.gracePeriodOverride
2461-
if observedGracePeriod != int64(0) {
2462-
t.Fatalf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod)
2461+
if observedGracePeriod != int64(1) {
2462+
t.Fatalf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 1, observedGracePeriod)
24632463
}
24642464

24652465
// try to admit our pod (should fail)
@@ -2666,6 +2666,111 @@ func TestStaticCriticalPodsAreNotEvicted(t *testing.T) {
26662666
}
26672667
}
26682668

2669+
func TestStorageLimitEvictions(t *testing.T) {
2670+
volumeSizeLimit := resource.MustParse("1Gi")
2671+
2672+
testCases := map[string]struct {
2673+
pod podToMake
2674+
volumes []v1.Volume
2675+
}{
2676+
"eviction due to rootfs above limit": {
2677+
pod: podToMake{name: "rootfs-above-limits", priority: defaultPriority, requests: newResourceList("", "", "1Gi"), limits: newResourceList("", "", "1Gi"), rootFsUsed: "2Gi"},
2678+
},
2679+
"eviction due to logsfs above limit": {
2680+
pod: podToMake{name: "logsfs-above-limits", priority: defaultPriority, requests: newResourceList("", "", "1Gi"), limits: newResourceList("", "", "1Gi"), logsFsUsed: "2Gi"},
2681+
},
2682+
"eviction due to local volume above limit": {
2683+
pod: podToMake{name: "localvolume-above-limits", priority: defaultPriority, requests: newResourceList("", "", ""), limits: newResourceList("", "", ""), perLocalVolumeUsed: "2Gi"},
2684+
volumes: []v1.Volume{{
2685+
Name: "emptyDirVolume",
2686+
VolumeSource: v1.VolumeSource{
2687+
EmptyDir: &v1.EmptyDirVolumeSource{
2688+
SizeLimit: &volumeSizeLimit,
2689+
},
2690+
},
2691+
}},
2692+
},
2693+
}
2694+
for name, tc := range testCases {
2695+
t.Run(name, func(t *testing.T) {
2696+
podMaker := makePodWithDiskStats
2697+
summaryStatsMaker := makeDiskStats
2698+
podsToMake := []podToMake{
2699+
tc.pod,
2700+
}
2701+
pods := []*v1.Pod{}
2702+
podStats := map[*v1.Pod]statsapi.PodStats{}
2703+
for _, podToMake := range podsToMake {
2704+
pod, podStat := podMaker(podToMake.name, podToMake.priority, podToMake.requests, podToMake.limits, podToMake.rootFsUsed, podToMake.logsFsUsed, podToMake.perLocalVolumeUsed, tc.volumes)
2705+
pods = append(pods, pod)
2706+
podStats[pod] = podStat
2707+
}
2708+
2709+
podToEvict := pods[0]
2710+
activePodsFunc := func() []*v1.Pod {
2711+
return pods
2712+
}
2713+
2714+
fakeClock := testingclock.NewFakeClock(time.Now())
2715+
podKiller := &mockPodKiller{}
2716+
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: ptr.To(false)}
2717+
diskGC := &mockDiskGC{err: nil}
2718+
nodeRef := &v1.ObjectReference{
2719+
Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: "",
2720+
}
2721+
2722+
config := Config{
2723+
MaxPodGracePeriodSeconds: 5,
2724+
PressureTransitionPeriod: time.Minute * 5,
2725+
Thresholds: []evictionapi.Threshold{
2726+
{
2727+
Signal: evictionapi.SignalNodeFsAvailable,
2728+
Operator: evictionapi.OpLessThan,
2729+
Value: evictionapi.ThresholdValue{
2730+
Quantity: quantityMustParse("1Gi"),
2731+
},
2732+
},
2733+
},
2734+
}
2735+
2736+
diskStat := diskStats{
2737+
rootFsAvailableBytes: "200Mi",
2738+
imageFsAvailableBytes: "200Mi",
2739+
podStats: podStats,
2740+
}
2741+
summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker(diskStat)}
2742+
manager := &managerImpl{
2743+
clock: fakeClock,
2744+
killPodFunc: podKiller.killPodNow,
2745+
imageGC: diskGC,
2746+
containerGC: diskGC,
2747+
config: config,
2748+
recorder: &record.FakeRecorder{},
2749+
summaryProvider: summaryProvider,
2750+
nodeRef: nodeRef,
2751+
nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
2752+
thresholdsFirstObservedAt: thresholdsObservedAt{},
2753+
localStorageCapacityIsolation: true,
2754+
}
2755+
2756+
_, err := manager.synchronize(diskInfoProvider, activePodsFunc)
2757+
if err != nil {
2758+
t.Fatalf("Manager expects no error but got %v", err)
2759+
}
2760+
2761+
if podKiller.pod == nil {
2762+
t.Fatalf("Manager should have selected a pod for eviction")
2763+
}
2764+
if podKiller.pod != podToEvict {
2765+
t.Errorf("Manager should have killed pod: %v, but instead killed: %v", podToEvict.Name, podKiller.pod.Name)
2766+
}
2767+
if *podKiller.gracePeriodOverride != 1 {
2768+
t.Errorf("Manager should have evicted with gracePeriodOverride of 1, but used: %v", *podKiller.gracePeriodOverride)
2769+
}
2770+
})
2771+
}
2772+
}
2773+
26692774
// TestAllocatableMemoryPressure
26702775
func TestAllocatableMemoryPressure(t *testing.T) {
26712776
podMaker := makePodWithMemoryStats
@@ -2767,8 +2872,8 @@ func TestAllocatableMemoryPressure(t *testing.T) {
27672872
t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod.Name, podToEvict.Name)
27682873
}
27692874
observedGracePeriod := *podKiller.gracePeriodOverride
2770-
if observedGracePeriod != int64(0) {
2771-
t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod)
2875+
if observedGracePeriod != int64(1) {
2876+
t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 1, observedGracePeriod)
27722877
}
27732878
// reset state
27742879
podKiller.pod = nil

Diff for: pkg/kubelet/eviction/helpers_test.go

+2-1
Original file line numberDiff line numberDiff line change
@@ -3053,8 +3053,9 @@ func newPodDiskStats(pod *v1.Pod, rootFsUsed, logsUsed, perLocalVolumeUsed resou
30533053

30543054
rootFsUsedBytes := uint64(rootFsUsed.Value())
30553055
logsUsedBytes := uint64(logsUsed.Value())
3056-
for range pod.Spec.Containers {
3056+
for _, container := range pod.Spec.Containers {
30573057
result.Containers = append(result.Containers, statsapi.ContainerStats{
3058+
Name: container.Name,
30583059
Rootfs: &statsapi.FsStats{
30593060
UsedBytes: &rootFsUsedBytes,
30603061
},

Diff for: pkg/kubelet/pod_workers.go

+4-1
Original file line numberDiff line numberDiff line change
@@ -979,23 +979,26 @@ func calculateEffectiveGracePeriod(status *podSyncStatus, pod *v1.Pod, options *
979979
// enforce the restriction that a grace period can only decrease and track whatever our value is,
980980
// then ensure a calculated value is passed down to lower levels
981981
gracePeriod := status.gracePeriod
982+
overridden := false
982983
// this value is bedrock truth - the apiserver owns telling us this value calculated by apiserver
983984
if override := pod.DeletionGracePeriodSeconds; override != nil {
984985
if gracePeriod == 0 || *override < gracePeriod {
985986
gracePeriod = *override
987+
overridden = true
986988
}
987989
}
988990
// we allow other parts of the kubelet (namely eviction) to request this pod be terminated faster
989991
if options != nil {
990992
if override := options.PodTerminationGracePeriodSecondsOverride; override != nil {
991993
if gracePeriod == 0 || *override < gracePeriod {
992994
gracePeriod = *override
995+
overridden = true
993996
}
994997
}
995998
}
996999
// make a best effort to default this value to the pod's desired intent, in the event
9971000
// the kubelet provided no requested value (graceful termination?)
998-
if gracePeriod == 0 && pod.Spec.TerminationGracePeriodSeconds != nil {
1001+
if !overridden && gracePeriod == 0 && pod.Spec.TerminationGracePeriodSeconds != nil {
9991002
gracePeriod = *pod.Spec.TerminationGracePeriodSeconds
10001003
}
10011004
// no matter what, we always supply a grace period of 1

0 commit comments

Comments
 (0)