Skip to content

Commit 52ab771

Browse files
authored
Merge pull request kubernetes#130917 from vinaykul/ippr-mem-req-ucr
Invoke UpdateContainerResources or trigger container restarts when memory requests are resized
2 parents 322083c + d62e766 commit 52ab771

File tree

8 files changed

+320
-115
lines changed

8 files changed

+320
-115
lines changed

pkg/kubelet/kubelet.go

+1-2
Original file line numberDiff line numberDiff line change
@@ -2994,7 +2994,6 @@ func (kl *Kubelet) handlePodResourcesResize(pod *v1.Pod, podStatus *kubecontaine
29942994
// for any running containers. Specifically, the following differences are ignored:
29952995
// - Non-resizable containers: non-restartable init containers, ephemeral containers
29962996
// - Non-resizable resources: only CPU & memory are resizable
2997-
// - Non-actuated resources: memory requests are not actuated
29982997
// - Non-running containers: they will be sized correctly when (re)started
29992998
func (kl *Kubelet) isPodResizeInProgress(allocatedPod *v1.Pod, podStatus *kubecontainer.PodStatus) bool {
30002999
return !podutil.VisitContainers(&allocatedPod.Spec, podutil.InitContainers|podutil.Containers,
@@ -3012,9 +3011,9 @@ func (kl *Kubelet) isPodResizeInProgress(allocatedPod *v1.Pod, podStatus *kubeco
30123011
actuatedResources, _ := kl.allocationManager.GetActuatedResources(allocatedPod.UID, allocatedContainer.Name)
30133012
allocatedResources := allocatedContainer.Resources
30143013

3015-
// Memory requests are excluded since they don't need to be actuated.
30163014
return allocatedResources.Requests[v1.ResourceCPU].Equal(actuatedResources.Requests[v1.ResourceCPU]) &&
30173015
allocatedResources.Limits[v1.ResourceCPU].Equal(actuatedResources.Limits[v1.ResourceCPU]) &&
3016+
allocatedResources.Requests[v1.ResourceMemory].Equal(actuatedResources.Requests[v1.ResourceMemory]) &&
30183017
allocatedResources.Limits[v1.ResourceMemory].Equal(actuatedResources.Limits[v1.ResourceMemory])
30193018
})
30203019
}

pkg/kubelet/kubelet_pods.go

+7
Original file line numberDiff line numberDiff line change
@@ -2111,6 +2111,13 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon
21112111
} else {
21122112
preserveOldResourcesValue(v1.ResourceCPU, oldStatus.Resources.Requests, resources.Requests)
21132113
}
2114+
// TODO(tallclair,vinaykul,InPlacePodVerticalScaling): Investigate defaulting to actuated resources instead of allocated resources above
2115+
if _, exists := resources.Requests[v1.ResourceMemory]; exists {
2116+
// Get memory requests from actuated resources
2117+
if actuatedResources, found := kl.allocationManager.GetActuatedResources(pod.UID, allocatedContainer.Name); found {
2118+
resources.Requests[v1.ResourceMemory] = *actuatedResources.Requests.Memory()
2119+
}
2120+
}
21142121
}
21152122

21162123
return resources

pkg/kubelet/kubelet_test.go

+1-2
Original file line numberDiff line numberDiff line change
@@ -3865,8 +3865,7 @@ func TestIsPodResizeInProgress(t *testing.T) {
38653865
actuated: &testResources{100, 200, 150, 200},
38663866
isRunning: true,
38673867
}},
3868-
// Memory requests aren't actuated and should be ignored.
3869-
expectHasResize: false,
3868+
expectHasResize: true,
38703869
}, {
38713870
name: "simple resized container/cpu+mem req",
38723871
containers: []testContainer{{

pkg/kubelet/kuberuntime/kuberuntime_manager.go

+9-6
Original file line numberDiff line numberDiff line change
@@ -634,8 +634,8 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe
634634
return true
635635
}
636636

637-
determineContainerResize := func(rName v1.ResourceName, specValue, statusValue int64) (resize, restart bool) {
638-
if specValue == statusValue {
637+
determineContainerResize := func(rName v1.ResourceName, desiredValue, currentValue int64) (resize, restart bool) {
638+
if desiredValue == currentValue {
639639
return false, false
640640
}
641641
for _, policy := range container.ResizePolicy {
@@ -646,7 +646,7 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe
646646
// If a resource policy isn't set, the implicit default is NotRequired.
647647
return true, false
648648
}
649-
markContainerForUpdate := func(rName v1.ResourceName, specValue, statusValue int64) {
649+
markContainerForUpdate := func(rName v1.ResourceName, desiredValue, currentValue int64) {
650650
cUpdateInfo := containerToUpdateInfo{
651651
container: &container,
652652
kubeContainerID: kubeContainerStatus.ID,
@@ -655,18 +655,19 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe
655655
}
656656
// Order the container updates such that resource decreases are applied before increases
657657
switch {
658-
case specValue > statusValue: // append
658+
case desiredValue > currentValue: // append
659659
changes.ContainersToUpdate[rName] = append(changes.ContainersToUpdate[rName], cUpdateInfo)
660-
case specValue < statusValue: // prepend
660+
case desiredValue < currentValue: // prepend
661661
changes.ContainersToUpdate[rName] = append(changes.ContainersToUpdate[rName], containerToUpdateInfo{})
662662
copy(changes.ContainersToUpdate[rName][1:], changes.ContainersToUpdate[rName])
663663
changes.ContainersToUpdate[rName][0] = cUpdateInfo
664664
}
665665
}
666666
resizeMemLim, restartMemLim := determineContainerResize(v1.ResourceMemory, desiredResources.memoryLimit, currentResources.memoryLimit)
667+
resizeMemReq, restartMemReq := determineContainerResize(v1.ResourceMemory, desiredResources.memoryRequest, currentResources.memoryRequest)
667668
resizeCPULim, restartCPULim := determineContainerResize(v1.ResourceCPU, desiredResources.cpuLimit, currentResources.cpuLimit)
668669
resizeCPUReq, restartCPUReq := determineContainerResize(v1.ResourceCPU, desiredResources.cpuRequest, currentResources.cpuRequest)
669-
if restartCPULim || restartCPUReq || restartMemLim {
670+
if restartCPULim || restartCPUReq || restartMemLim || restartMemReq {
670671
// resize policy requires this container to restart
671672
changes.ContainersToKill[kubeContainerStatus.ID] = containerToKillInfo{
672673
name: kubeContainerStatus.Name,
@@ -683,6 +684,8 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe
683684
} else {
684685
if resizeMemLim {
685686
markContainerForUpdate(v1.ResourceMemory, desiredResources.memoryLimit, currentResources.memoryLimit)
687+
} else if resizeMemReq {
688+
markContainerForUpdate(v1.ResourceMemory, desiredResources.memoryRequest, currentResources.memoryRequest)
686689
}
687690
if resizeCPULim {
688691
markContainerForUpdate(v1.ResourceCPU, desiredResources.cpuLimit, currentResources.cpuLimit)

pkg/kubelet/kuberuntime/kuberuntime_manager_test.go

+90
Original file line numberDiff line numberDiff line change
@@ -2897,6 +2897,96 @@ func TestComputePodActionsForPodResize(t *testing.T) {
28972897
return &pa
28982898
},
28992899
},
2900+
"Update container memory (requests only) with RestartContainer policy for memory": {
2901+
setupFn: func(pod *v1.Pod) {
2902+
c := &pod.Spec.Containers[2]
2903+
c.ResizePolicy = []v1.ContainerResizePolicy{cpuPolicyRestartNotRequired, memPolicyRestartRequired}
2904+
c.Resources = v1.ResourceRequirements{
2905+
Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem200M},
2906+
Requests: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M},
2907+
}
2908+
setupActuatedResources(pod, c, v1.ResourceRequirements{
2909+
Limits: v1.ResourceList{
2910+
v1.ResourceCPU: cpu200m.DeepCopy(),
2911+
v1.ResourceMemory: mem200M.DeepCopy(),
2912+
},
2913+
Requests: v1.ResourceList{
2914+
v1.ResourceCPU: cpu100m.DeepCopy(),
2915+
v1.ResourceMemory: mem200M.DeepCopy(),
2916+
},
2917+
})
2918+
},
2919+
getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
2920+
kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[2].Name)
2921+
killMap := make(map[kubecontainer.ContainerID]containerToKillInfo)
2922+
killMap[kcs.ID] = containerToKillInfo{
2923+
container: &pod.Spec.Containers[2],
2924+
name: pod.Spec.Containers[2].Name,
2925+
}
2926+
pa := podActions{
2927+
SandboxID: podStatus.SandboxStatuses[0].Id,
2928+
ContainersToStart: []int{2},
2929+
ContainersToKill: killMap,
2930+
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{},
2931+
UpdatePodResources: true,
2932+
}
2933+
return &pa
2934+
},
2935+
},
2936+
"Update container memory (requests only) with RestartNotRequired policy for memory": {
2937+
setupFn: func(pod *v1.Pod) {
2938+
c := &pod.Spec.Containers[2]
2939+
c.ResizePolicy = []v1.ContainerResizePolicy{cpuPolicyRestartNotRequired, memPolicyRestartNotRequired}
2940+
c.Resources = v1.ResourceRequirements{
2941+
Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem200M},
2942+
Requests: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M},
2943+
}
2944+
setupActuatedResources(pod, c, v1.ResourceRequirements{
2945+
Limits: v1.ResourceList{
2946+
v1.ResourceCPU: cpu200m.DeepCopy(),
2947+
v1.ResourceMemory: mem200M.DeepCopy(),
2948+
},
2949+
Requests: v1.ResourceList{
2950+
v1.ResourceCPU: cpu100m.DeepCopy(),
2951+
v1.ResourceMemory: mem200M.DeepCopy(),
2952+
},
2953+
})
2954+
},
2955+
getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
2956+
kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[2].Name)
2957+
killMap := make(map[kubecontainer.ContainerID]containerToKillInfo)
2958+
killMap[kcs.ID] = containerToKillInfo{
2959+
container: &pod.Spec.Containers[2],
2960+
name: pod.Spec.Containers[2].Name,
2961+
}
2962+
pa := podActions{
2963+
SandboxID: podStatus.SandboxStatuses[0].Id,
2964+
ContainersToStart: []int{},
2965+
ContainersToKill: getKillMap(pod, podStatus, []int{}),
2966+
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{
2967+
v1.ResourceMemory: {
2968+
{
2969+
container: &pod.Spec.Containers[2],
2970+
kubeContainerID: kcs.ID,
2971+
desiredContainerResources: containerResources{
2972+
memoryLimit: mem200M.Value(),
2973+
memoryRequest: mem100M.Value(),
2974+
cpuLimit: cpu200m.MilliValue(),
2975+
cpuRequest: cpu100m.MilliValue(),
2976+
},
2977+
currentContainerResources: &containerResources{
2978+
memoryLimit: mem200M.Value(),
2979+
memoryRequest: mem200M.Value(),
2980+
cpuLimit: cpu200m.MilliValue(),
2981+
cpuRequest: cpu100m.MilliValue(),
2982+
},
2983+
},
2984+
},
2985+
},
2986+
}
2987+
return &pa
2988+
},
2989+
},
29002990
} {
29012991
t.Run(desc, func(t *testing.T) {
29022992
pod, status := makeBasePodAndStatus()

0 commit comments

Comments
 (0)