Skip to content

Commit fc4ac5e

Browse files
committed
Move image pull backoff test to be with other image pull tests
Signed-off-by: Laura Lorenz <[email protected]>
1 parent 2479d91 commit fc4ac5e

File tree

2 files changed

+55
-71
lines changed

2 files changed

+55
-71
lines changed

Diff for: test/e2e_node/criproxy_test.go

-71
Original file line numberDiff line numberDiff line change
@@ -96,44 +96,6 @@ var _ = SIGDescribe(feature.CriProxy, framework.WithSerial(), func() {
9696
framework.ExpectNoError(err)
9797
})
9898

99-
ginkgo.It("Image pull retry backs off on error.", func(ctx context.Context) {
100-
// inject PullImage failed to trigger backoff
101-
expectedErr := fmt.Errorf("PullImage failed")
102-
err := addCRIProxyInjector(e2eCriProxy, func(apiName string) error {
103-
if apiName == criproxy.PullImage {
104-
return expectedErr
105-
}
106-
return nil
107-
})
108-
framework.ExpectNoError(err)
109-
110-
pod := e2epod.NewPodClient(f).Create(ctx, newImageBackoffPod())
111-
framework.Logf("pod name: %s", pod.Name)
112-
podErr := e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "ImagePullBackOff", 1*time.Minute, func(pod *v1.Pod) (bool, error) {
113-
if len(pod.Status.ContainerStatuses) > 0 && pod.Status.Reason == images.ErrImagePullBackOff.Error() {
114-
return true, nil
115-
}
116-
return false, nil
117-
})
118-
gomega.Expect(podErr).To(gomega.HaveOccurred())
119-
120-
eventMsg, err := getFailedToPullImageMsg(ctx, f, pod.Name)
121-
framework.ExpectNoError(err)
122-
isExpectedErrMsg := strings.Contains(eventMsg, expectedErr.Error())
123-
gomega.Expect(isExpectedErrMsg).To(gomega.BeTrueBecause("we injected an exception into the PullImage interface of the cri proxy"))
124-
125-
// Wait for ~60s worth of backoffs to occur so we can confirm the backoff growth.
126-
podErr = e2epod.WaitForPodContainerStarted(ctx, f.ClientSet, f.Namespace.Name, pod.Name, 0, 1*time.Minute)
127-
gomega.Expect(podErr).To(gomega.HaveOccurred(), "Expected container not to start from repeadedly backing off image pulls")
128-
129-
e, err := getImageBackoffs(ctx, f, pod.Name)
130-
framework.ExpectNoError(err)
131-
// 3 would take 10s best case
132-
gomega.Expect(e.Count).Should(gomega.BeNumerically(">", 3))
133-
// 6 would take 150s best case
134-
gomega.Expect(e.Count).Should(gomega.BeNumerically("<=", 6))
135-
136-
})
13799
})
138100

139101
ginkgo.Context("Inject a pull image timeout exception into the CriProxy", func() {
@@ -185,21 +147,7 @@ func getFailedToPullImageMsg(ctx context.Context, f *framework.Framework, podNam
185147
return "", fmt.Errorf("failed to find FailedToPullImage event for pod: %s", podName)
186148
}
187149

188-
func getImageBackoffs(ctx context.Context, f *framework.Framework, podName string) (v1.Event, error) {
189-
190-
event := v1.Event{}
191-
e, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{})
192-
if err != nil {
193-
return event, err
194-
}
195150

196-
for _, event := range e.Items {
197-
if event.InvolvedObject.Name == podName && event.Reason == kubeletevents.PullingImage {
198-
return event, nil
199-
}
200-
}
201-
return event, nil
202-
}
203151

204152
func getPodImagePullDuration(ctx context.Context, f *framework.Framework, podName string) (time.Duration, error) {
205153
events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{})
@@ -226,25 +174,6 @@ func getPodImagePullDuration(ctx context.Context, f *framework.Framework, podNam
226174
return endTime.Sub(startTime), nil
227175
}
228176

229-
func newImageBackoffPod() *v1.Pod {
230-
podName := "image-backoff" + string(uuid.NewUUID())
231-
pod := &v1.Pod{
232-
ObjectMeta: metav1.ObjectMeta{
233-
Name: podName,
234-
},
235-
Spec: v1.PodSpec{
236-
Containers: []v1.Container{
237-
{
238-
Image: imageutils.GetPauseImageName(),
239-
Name: podName,
240-
ImagePullPolicy: v1.PullAlways,
241-
},
242-
},
243-
},
244-
}
245-
return pod
246-
}
247-
248177
func newPullImageAlwaysPod() *v1.Pod {
249178
podName := "cri-proxy-test-" + string(uuid.NewUUID())
250179
pod := &v1.Pod{

Diff for: test/e2e_node/image_pull_test.go

+55
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ import (
3434
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3535
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
3636
kubeletevents "k8s.io/kubernetes/pkg/kubelet/events"
37+
"k8s.io/kubernetes/pkg/kubelet/images"
3738
"k8s.io/kubernetes/test/e2e/feature"
3839
"k8s.io/kubernetes/test/e2e/framework"
3940
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@@ -230,6 +231,45 @@ var _ = SIGDescribe("Pull Image", feature.CriProxy, framework.WithSerial(), func
230231
})
231232

232233
})
234+
235+
ginkgo.It("Image pull retry backs off on error.", func(ctx context.Context) {
236+
// inject PullImage failed to trigger backoff
237+
expectedErr := fmt.Errorf("PullImage failed")
238+
err := addCRIProxyInjector(e2eCriProxy, func(apiName string) error {
239+
if apiName == criproxy.PullImage {
240+
return expectedErr
241+
}
242+
return nil
243+
})
244+
framework.ExpectNoError(err)
245+
246+
pod := e2epod.NewPodClient(f).Create(ctx, newPullImageAlwaysPod())
247+
podErr := e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "ImagePullBackOff", 1*time.Minute, func(pod *v1.Pod) (bool, error) {
248+
if len(pod.Status.ContainerStatuses) > 0 && pod.Status.Reason == images.ErrImagePullBackOff.Error() {
249+
return true, nil
250+
}
251+
return false, nil
252+
})
253+
gomega.Expect(podErr).To(gomega.HaveOccurred())
254+
255+
eventMsg, err := getFailedToPullImageMsg(ctx, f, pod.Name)
256+
framework.ExpectNoError(err)
257+
isExpectedErrMsg := strings.Contains(eventMsg, expectedErr.Error())
258+
gomega.Expect(isExpectedErrMsg).To(gomega.BeTrueBecause("we injected an exception into the PullImage interface of the cri proxy"))
259+
260+
// Wait for ~60s worth of backoffs to occur so we can confirm the backoff growth.
261+
podErr = e2epod.WaitForPodContainerStarted(ctx, f.ClientSet, f.Namespace.Name, pod.Name, 0, 1*time.Minute)
262+
gomega.Expect(podErr).To(gomega.HaveOccurred(), "Expected container not to start from repeadedly backing off image pulls")
263+
264+
e, err := getImagePullAttempts(ctx, f, pod.Name)
265+
framework.ExpectNoError(err)
266+
// 3 would take 10s best case
267+
gomega.Expect(e.Count).Should(gomega.BeNumerically(">", 3))
268+
// 6 would take 150s best case
269+
gomega.Expect(e.Count).Should(gomega.BeNumerically("<=", 6))
270+
271+
})
272+
233273
})
234274

235275
func getPodImagePullDurations(ctx context.Context, f *framework.Framework, testpods []*v1.Pod) (map[string]*pulledStruct, map[string]metav1.Time, map[string]metav1.Time, error) {
@@ -343,3 +383,18 @@ func getDurationsFromPulledEventMsg(msg string) (*pulledStruct, error) {
343383
pulledIncludeWaitingDuration: pulledIncludeWaitingDuration,
344384
}, nil
345385
}
386+
387+
func getImagePullAttempts(ctx context.Context, f *framework.Framework, podName string) (v1.Event, error) {
388+
event := v1.Event{}
389+
e, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{})
390+
if err != nil {
391+
return event, err
392+
}
393+
394+
for _, event := range e.Items {
395+
if event.InvolvedObject.Name == podName && event.Reason == kubeletevents.PullingImage {
396+
return event, nil
397+
}
398+
}
399+
return event, nil
400+
}

0 commit comments

Comments
 (0)