Skip to content

Commit 4db8e8c

Browse files
committed
Add configurable tolerance e2e test.
1 parent ac10713 commit 4db8e8c

File tree

3 files changed

+88
-26
lines changed

3 files changed

+88
-26
lines changed

test/e2e/autoscaling/horizontal_pod_autoscaling_behavior.go

+77-26
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ import (
2121
"time"
2222

2323
autoscalingv2 "k8s.io/api/autoscaling/v2"
24+
"k8s.io/kubernetes/pkg/features"
2425
"k8s.io/kubernetes/test/e2e/feature"
2526
"k8s.io/kubernetes/test/e2e/framework"
2627
e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling"
@@ -30,37 +31,29 @@ import (
3031
"github.com/onsi/gomega"
3132
)
3233

33-
var _ = SIGDescribe(feature.HPA, framework.WithSerial(), framework.WithSlow(), "Horizontal pod autoscaling (non-default behavior)", func() {
34-
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
35-
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
34+
const (
35+
hpaName = "consumer"
3636

37-
hpaName := "consumer"
37+
podCPURequest = 500
38+
targetCPUUtilizationPercent = 25
3839

39-
podCPURequest := 500
40-
targetCPUUtilizationPercent := 25
40+
fullWindowOfNewUsage = 30 * time.Second
41+
windowWithOldUsagePasses = 30 * time.Second
42+
newPodMetricsDelay = 15 * time.Second
43+
metricsAvailableDelay = fullWindowOfNewUsage + windowWithOldUsagePasses + newPodMetricsDelay
4144

42-
// usageForReplicas returns usage for (n - 0.5) replicas as if they would consume all CPU
43-
// under the target. The 0.5 replica reduction is to accommodate for the deviation between
44-
// the actual consumed cpu and requested usage by the ResourceConsumer.
45-
// HPA rounds up the recommendations. So, if the usage is e.g. for 3.5 replicas,
46-
// the recommended replica number will be 4.
47-
usageForReplicas := func(replicas int) int {
48-
usagePerReplica := podCPURequest * targetCPUUtilizationPercent / 100
49-
return replicas*usagePerReplica - usagePerReplica/2
50-
}
45+
hpaReconciliationInterval = 15 * time.Second
46+
actuationDelay = 10 * time.Second
47+
maxHPAReactionTime = metricsAvailableDelay + hpaReconciliationInterval + actuationDelay
5148

52-
fullWindowOfNewUsage := 30 * time.Second
53-
windowWithOldUsagePasses := 30 * time.Second
54-
newPodMetricsDelay := 15 * time.Second
55-
metricsAvailableDelay := fullWindowOfNewUsage + windowWithOldUsagePasses + newPodMetricsDelay
56-
57-
hpaReconciliationInterval := 15 * time.Second
58-
actuationDelay := 10 * time.Second
59-
maxHPAReactionTime := metricsAvailableDelay + hpaReconciliationInterval + actuationDelay
49+
maxConsumeCPUDelay = 30 * time.Second
50+
waitForReplicasPollInterval = 20 * time.Second
51+
maxResourceConsumerDelay = maxConsumeCPUDelay + waitForReplicasPollInterval
52+
)
6053

61-
maxConsumeCPUDelay := 30 * time.Second
62-
waitForReplicasPollInterval := 20 * time.Second
63-
maxResourceConsumerDelay := maxConsumeCPUDelay + waitForReplicasPollInterval
54+
var _ = SIGDescribe(feature.HPA, framework.WithSerial(), framework.WithSlow(), "Horizontal pod autoscaling (non-default behavior)", func() {
55+
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
56+
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
6457

6558
waitBuffer := 1 * time.Minute
6659

@@ -505,3 +498,61 @@ var _ = SIGDescribe(feature.HPA, framework.WithSerial(), framework.WithSlow(), "
505498
})
506499
})
507500
})
501+
502+
var _ = SIGDescribe(feature.HPAConfigurableTolerance, framework.WithFeatureGate(features.HPAConfigurableTolerance),
503+
framework.WithSerial(), framework.WithSlow(), "Horizontal pod autoscaling (configurable tolerance)", func() {
504+
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
505+
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
506+
507+
waitBuffer := 1 * time.Minute
508+
509+
ginkgo.Describe("with large configurable tolerance", func() {
510+
ginkgo.It("should not scale", func(ctx context.Context) {
511+
ginkgo.By("setting up resource consumer and HPA")
512+
initPods := 1
513+
initCPUUsageTotal := usageForReplicas(initPods)
514+
515+
rc := e2eautoscaling.NewDynamicResourceConsumer(ctx,
516+
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
517+
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
518+
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
519+
)
520+
ginkgo.DeferCleanup(rc.CleanUp)
521+
522+
scaleRule := e2eautoscaling.HPAScalingRuleWithToleranceMilli(10000)
523+
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
524+
rc, int32(targetCPUUtilizationPercent), 1, 10,
525+
e2eautoscaling.HPABehaviorWithScaleUpAndDownRules(scaleRule, scaleRule),
526+
)
527+
ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
528+
529+
waitDeadline := maxHPAReactionTime + maxResourceConsumerDelay + waitBuffer
530+
531+
ginkgo.By("trying to trigger scale up")
532+
rc.ConsumeCPU(usageForReplicas(8))
533+
waitStart := time.Now()
534+
535+
rc.EnsureDesiredReplicasInRange(ctx, initPods, initPods, waitDeadline, hpa.Name)
536+
timeWaited := time.Since(waitStart)
537+
538+
ginkgo.By("verifying time waited for a scale up")
539+
framework.Logf("time waited for scale up: %s", timeWaited)
540+
gomega.Expect(timeWaited).To(gomega.BeNumerically(">", waitDeadline), "waited %s, wanted to wait more than %s", timeWaited, waitDeadline)
541+
542+
ginkgo.By("verifying number of replicas")
543+
replicas, err := rc.GetReplicas(ctx)
544+
framework.ExpectNoError(err)
545+
gomega.Expect(replicas).To(gomega.BeNumerically("==", initPods), "had %s replicas, still have %s replicas after time deadline", initPods, replicas)
546+
})
547+
})
548+
})
549+
550+
// usageForReplicas returns usage for (n - 0.5) replicas as if they would consume all CPU
551+
// under the target. The 0.5 replica reduction is to accommodate for the deviation between
552+
// the actual consumed cpu and requested usage by the ResourceConsumer.
553+
// HPA rounds up the recommendations. So, if the usage is e.g. for 3.5 replicas,
554+
// the recommended replica number will be 4.
555+
func usageForReplicas(replicas int) int {
556+
usagePerReplica := podCPURequest * targetCPUUtilizationPercent / 100
557+
return replicas*usagePerReplica - usagePerReplica/2
558+
}

test/e2e/feature/feature.go

+4
Original file line numberDiff line numberDiff line change
@@ -219,6 +219,10 @@ var (
219219
// TODO: document the feature (owning SIG, when to use this feature for a test)
220220
HPA = framework.WithFeature(framework.ValidFeatures.Add("HPA"))
221221

222+
// OWNER: sig-autoscaling
223+
// Marks tests that require HPA configurable tolerance (https://kep.k8s.io/4951).
224+
HPAConfigurableTolerance = framework.WithFeature(framework.ValidFeatures.Add("HPAConfigurableTolerance"))
225+
222226
// owner: sig-node
223227
HostAccess = framework.WithFeature(framework.ValidFeatures.Add("HostAccess"))
224228

test/e2e/framework/autoscaling/autoscaling_utils.go

+7
Original file line numberDiff line numberDiff line change
@@ -880,6 +880,13 @@ func HPAScalingRuleWithScalingPolicy(policyType autoscalingv2.HPAScalingPolicyTy
880880
}
881881
}
882882

883+
func HPAScalingRuleWithToleranceMilli(toleranceMilli int64) *autoscalingv2.HPAScalingRules {
884+
quantity := resource.NewMilliQuantity(toleranceMilli, resource.DecimalSI)
885+
return &autoscalingv2.HPAScalingRules{
886+
Tolerance: quantity,
887+
}
888+
}
889+
883890
func HPABehaviorWithStabilizationWindows(upscaleStabilization, downscaleStabilization time.Duration) *autoscalingv2.HorizontalPodAutoscalerBehavior {
884891
scaleUpRule := HPAScalingRuleWithStabilizationWindow(int32(upscaleStabilization.Seconds()))
885892
scaleDownRule := HPAScalingRuleWithStabilizationWindow(int32(downscaleStabilization.Seconds()))

0 commit comments

Comments
 (0)