Skip to content

OCPBUGS-52169: Workload partitioning of static init containers #2224

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 18 additions & 3 deletions pkg/kubelet/managed/managed.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,14 +117,29 @@ func GenerateResourceName(workloadName string) v1.ResourceName {
func updateContainers(workloadName string, pod *v1.Pod) error {
updateContainer := func(container *v1.Container) error {
if container.Resources.Requests == nil {
return fmt.Errorf("managed container %v does not have Resource.Requests", container.Name)
// Nothing to modify, but that is OK, it will not
// change the QoS class of the modified Pod
return nil
}
if _, ok := container.Resources.Requests[v1.ResourceCPU]; !ok {

_, cpuOk := container.Resources.Requests[v1.ResourceCPU]
_, memoryOk := container.Resources.Requests[v1.ResourceMemory]

// When both cpu and memory requests are missing, there is nothing
// to do
if !cpuOk && !memoryOk {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Isn't this missing the case where only limits are set and requests are implied from limits?

return nil
}

// Both memory and cpu have to be set to make sure stripping them
// will not change the QoS class of the Pod
if !cpuOk {
return fmt.Errorf("managed container %v does not have cpu requests", container.Name)
}
if _, ok := container.Resources.Requests[v1.ResourceMemory]; !ok {
if !memoryOk {
return fmt.Errorf("managed container %v does not have memory requests", container.Name)
}

if container.Resources.Limits == nil {
container.Resources.Limits = v1.ResourceList{}
}
Expand Down
185 changes: 171 additions & 14 deletions pkg/kubelet/managed/managed_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,17 +19,6 @@ func TestModifyStaticPodForPinnedManagementErrorStates(t *testing.T) {
pod *v1.Pod
expectedError error
}{
{
pod: createPod(workloadAnnotations, nil,
&v1.Container{
Name: "nginx",
Image: "test/image",
Resources: v1.ResourceRequirements{
Requests: nil,
},
}),
expectedError: fmt.Errorf("managed container nginx does not have Resource.Requests"),
},
{
pod: createPod(workloadAnnotations, nil,
&v1.Container{
Expand Down Expand Up @@ -129,6 +118,7 @@ func TestStaticPodManaged(t *testing.T) {
pod *v1.Pod
expectedAnnotations map[string]string
isGuaranteed bool
isBestEffort bool
}{
{
pod: &v1.Pod{
Expand Down Expand Up @@ -270,6 +260,164 @@ func TestStaticPodManaged(t *testing.T) {
"resources.workload.openshift.io/c1": `{"cpushares":20,"cpulimit":100}`,
},
},
{
pod: &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
UID: "12345",
Namespace: "mynamespace",
Annotations: map[string]string{
"target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "c1",
Image: "test/nginx",
Resources: v1.ResourceRequirements{},
},
},
SecurityContext: &v1.PodSecurityContext{},
},
Status: v1.PodStatus{
Phase: v1.PodPending,
},
},
expectedAnnotations: map[string]string{
"target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`,
},
isBestEffort: true,
},
{
pod: &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
UID: "12345",
Namespace: "mynamespace",
Annotations: map[string]string{
"target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "c1",
Image: "test/nginx",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName("dummy"): resource.MustParse("20m"),
},
},
},
},
SecurityContext: &v1.PodSecurityContext{},
},
Status: v1.PodStatus{
Phase: v1.PodPending,
},
},
expectedAnnotations: map[string]string{
"target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`,
},
isBestEffort: true,
},
{
pod: &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
UID: "12345",
Namespace: "mynamespace",
Annotations: map[string]string{
"target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "c1",
Image: "test/nginx",
Resources: v1.ResourceRequirements{},
},
},
InitContainers: []v1.Container{
{
Name: "ic1",
Image: "test/nginx",
Resources: v1.ResourceRequirements{},
},
},
SecurityContext: &v1.PodSecurityContext{},
},
Status: v1.PodStatus{
Phase: v1.PodPending,
},
},
expectedAnnotations: map[string]string{
"target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`,
},
isBestEffort: true,
},
{
pod: &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
UID: "12345",
Namespace: "mynamespace",
Annotations: map[string]string{
"target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "c1",
Image: "test/nginx",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName("dummy"): resource.MustParse("20m"),
},
},
},
},
InitContainers: []v1.Container{
{
Name: "ic1",
Image: "test/nginx",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName("dummy"): resource.MustParse("20m"),
},
},
},
},
SecurityContext: &v1.PodSecurityContext{},
},
Status: v1.PodStatus{
Phase: v1.PodPending,
},
},
expectedAnnotations: map[string]string{
"target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`,
},
isBestEffort: true,
},
{
pod: &v1.Pod{
TypeMeta: metav1.TypeMeta{
Expand Down Expand Up @@ -481,15 +629,24 @@ func TestStaticPodManaged(t *testing.T) {
if container.Resources.Requests.Cpu().String() != "0" && !tc.isGuaranteed {
t.Errorf("cpu requests should be 0 got %v", container.Resources.Requests.Cpu().String())
}
if container.Resources.Requests.Memory().String() == "0" && !tc.isGuaranteed {
if container.Resources.Requests.Memory().String() == "0" && !tc.isGuaranteed && !tc.isBestEffort {
t.Errorf("memory requests were %v but should be %v", container.Resources.Requests.Memory().String(), container.Resources.Requests.Memory().String())
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note: I did not touch this, but the Errorf is broken, it uses the same value for both the left and the right side.

}
if _, exists := container.Resources.Requests[GenerateResourceName(workloadName)]; !exists && !tc.isGuaranteed {
if container.Resources.Requests.Memory().String() != "0" && !tc.isGuaranteed && tc.isBestEffort {
t.Errorf("memory requests should be 0 got %v", container.Resources.Requests.Memory().String())
}
if _, exists := container.Resources.Requests[GenerateResourceName(workloadName)]; !exists && !tc.isGuaranteed && !tc.isBestEffort {
t.Errorf("managed capacity label missing from pod %v and container %v", tc.pod.Name, container.Name)
}
if _, exists := container.Resources.Limits[GenerateResourceName(workloadName)]; !exists && !tc.isGuaranteed {
if _, exists := container.Resources.Limits[GenerateResourceName(workloadName)]; !exists && !tc.isGuaranteed && !tc.isBestEffort {
t.Errorf("managed capacity label missing from pod %v and container %v limits", tc.pod.Name, container.Name)
}
if _, exists := container.Resources.Requests[GenerateResourceName(workloadName)]; exists && tc.isBestEffort {
t.Errorf("managed capacity label present in best-effort pod %v and container %v requests", tc.pod.Name, container.Name)
}
if _, exists := container.Resources.Limits[GenerateResourceName(workloadName)]; exists && tc.isBestEffort {
t.Errorf("managed capacity label present in best-effort pod %v and container %v limits", tc.pod.Name, container.Name)
}
}
}
}
Expand Down