@@ -118,6 +118,7 @@ import (
118
118
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
119
119
"k8s.io/kubernetes/pkg/kubelet/userns"
120
120
"k8s.io/kubernetes/pkg/kubelet/util"
121
+ "k8s.io/kubernetes/pkg/kubelet/util/format"
121
122
"k8s.io/kubernetes/pkg/kubelet/util/manager"
122
123
"k8s.io/kubernetes/pkg/kubelet/util/queue"
123
124
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
@@ -2829,38 +2830,47 @@ func isPodResizeInProgress(pod *v1.Pod, podStatus *kubecontainer.PodStatus) bool
2829
2830
// canResizePod determines if the requested resize is currently feasible.
2830
2831
// pod should hold the desired (pre-allocated) spec.
2831
2832
// Returns true if the resize can proceed.
2832
- func (kl * Kubelet ) canResizePod (pod * v1.Pod ) (bool , v1.PodResizeStatus ) {
2833
+ func (kl * Kubelet ) canResizePod (pod * v1.Pod ) (bool , v1.PodResizeStatus , string ) {
2833
2834
if goos == "windows" {
2834
- return false , v1 .PodResizeStatusInfeasible
2835
+ return false , v1 .PodResizeStatusInfeasible , "Resizing Windows pods is not supported"
2835
2836
}
2836
2837
2837
2838
if v1qos .GetPodQOS (pod ) == v1 .PodQOSGuaranteed && ! utilfeature .DefaultFeatureGate .Enabled (features .InPlacePodVerticalScalingExclusiveCPUs ) {
2838
2839
if utilfeature .DefaultFeatureGate .Enabled (features .CPUManager ) {
2839
2840
if kl .containerManager .GetNodeConfig ().CPUManagerPolicy == "static" {
2840
- klog .V (3 ).InfoS ("Resize is infeasible for Guaranteed Pods alongside CPU Manager static policy" )
2841
- return false , v1 .PodResizeStatusInfeasible
2841
+ msg := "Resize is infeasible for Guaranteed Pods alongside CPU Manager static policy"
2842
+ klog .V (3 ).InfoS (msg , "pod" , format .Pod (pod ))
2843
+ return false , v1 .PodResizeStatusInfeasible , msg
2842
2844
}
2843
2845
}
2844
2846
if utilfeature .DefaultFeatureGate .Enabled (features .MemoryManager ) {
2845
2847
if kl .containerManager .GetNodeConfig ().ExperimentalMemoryManagerPolicy == "static" {
2846
- klog .V (3 ).InfoS ("Resize is infeasible for Guaranteed Pods alongside Memory Manager static policy" )
2847
- return false , v1 .PodResizeStatusInfeasible
2848
+ msg := "Resize is infeasible for Guaranteed Pods alongside Memory Manager static policy"
2849
+ klog .V (3 ).InfoS (msg , "pod" , format .Pod (pod ))
2850
+ return false , v1 .PodResizeStatusInfeasible , msg
2848
2851
}
2849
2852
}
2850
2853
}
2851
2854
2852
2855
node , err := kl .getNodeAnyWay ()
2853
2856
if err != nil {
2854
2857
klog .ErrorS (err , "getNodeAnyway function failed" )
2855
- return false , ""
2858
+ return false , "" , ""
2856
2859
}
2857
2860
cpuAvailable := node .Status .Allocatable .Cpu ().MilliValue ()
2858
2861
memAvailable := node .Status .Allocatable .Memory ().Value ()
2859
2862
cpuRequests := resource .GetResourceRequest (pod , v1 .ResourceCPU )
2860
2863
memRequests := resource .GetResourceRequest (pod , v1 .ResourceMemory )
2861
2864
if cpuRequests > cpuAvailable || memRequests > memAvailable {
2862
- klog .V (3 ).InfoS ("Resize is not feasible as request exceeds allocatable node resources" , "pod" , klog .KObj (pod ))
2863
- return false , v1 .PodResizeStatusInfeasible
2865
+ var msg string
2866
+ if memRequests > memAvailable {
2867
+ msg = fmt .Sprintf ("memory, requested: %d, capacity: %d" , memRequests , memAvailable )
2868
+ } else {
2869
+ msg = fmt .Sprintf ("cpu, requested: %d, capacity: %d" , cpuRequests , cpuAvailable )
2870
+ }
2871
+ msg = "Node didn't have enough capacity: " + msg
2872
+ klog .V (3 ).InfoS (msg , "pod" , klog .KObj (pod ))
2873
+ return false , v1 .PodResizeStatusInfeasible , msg
2864
2874
}
2865
2875
2866
2876
// Treat the existing pod needing resize as a new pod with desired resources seeking admit.
@@ -2871,10 +2881,10 @@ func (kl *Kubelet) canResizePod(pod *v1.Pod) (bool, v1.PodResizeStatus) {
2871
2881
if ok , failReason , failMessage := kl .canAdmitPod (allocatedPods , pod ); ! ok {
2872
2882
// Log reason and return. Let the next sync iteration retry the resize
2873
2883
klog .V (3 ).InfoS ("Resize cannot be accommodated" , "pod" , klog .KObj (pod ), "reason" , failReason , "message" , failMessage )
2874
- return false , v1 .PodResizeStatusDeferred
2884
+ return false , v1 .PodResizeStatusDeferred , failMessage
2875
2885
}
2876
2886
2877
- return true , v1 .PodResizeStatusInProgress
2887
+ return true , v1 .PodResizeStatusInProgress , ""
2878
2888
}
2879
2889
2880
2890
// handlePodResourcesResize returns the "allocated pod", which should be used for all resource
@@ -2899,7 +2909,7 @@ func (kl *Kubelet) handlePodResourcesResize(pod *v1.Pod, podStatus *kubecontaine
2899
2909
kl .podResizeMutex .Lock ()
2900
2910
defer kl .podResizeMutex .Unlock ()
2901
2911
// Desired resources != allocated resources. Can we update the allocation to the desired resources?
2902
- fit , resizeStatus := kl .canResizePod (pod )
2912
+ fit , resizeStatus , resizeMsg := kl .canResizePod (pod )
2903
2913
if fit {
2904
2914
// Update pod resource allocation checkpoint
2905
2915
if err := kl .statusManager .SetPodAllocation (pod ); err != nil {
@@ -2925,6 +2935,14 @@ func (kl *Kubelet) handlePodResourcesResize(pod *v1.Pod, podStatus *kubecontaine
2925
2935
}
2926
2936
if resizeStatus != "" {
2927
2937
kl .statusManager .SetPodResizeStatus (pod .UID , resizeStatus )
2938
+ if resizeMsg != "" {
2939
+ switch resizeStatus {
2940
+ case v1 .PodResizeStatusDeferred :
2941
+ kl .recorder .Eventf (pod , v1 .EventTypeWarning , events .ResizeDeferred , resizeMsg )
2942
+ case v1 .PodResizeStatusInfeasible :
2943
+ kl .recorder .Eventf (pod , v1 .EventTypeWarning , events .ResizeInfeasible , resizeMsg )
2944
+ }
2945
+ }
2928
2946
}
2929
2947
return allocatedPod , nil
2930
2948
}
0 commit comments