21
21
from eventdata .schedulers .utilization_scheduler import UtilizationBasedScheduler
22
22
23
23
24
+ @pytest .fixture ()
25
+ def reset_recorded_times ():
26
+ UtilizationBasedScheduler .reset_recorded_response_times ()
27
+
28
+
24
29
class StaticPerfCounter :
25
30
def __init__ (self , start ):
26
31
self .now = start
@@ -29,28 +34,44 @@ def __call__(self, *args, **kwargs):
29
34
return self .now
30
35
31
36
37
+ @pytest .mark .usefixtures ("reset_recorded_times" )
32
38
def test_invalid_target_utilization ():
33
39
with pytest .raises (ValueError ) as ex :
34
40
UtilizationBasedScheduler (params = {
35
41
"target-utilization" : 200.432 ,
36
- "warmup-time-period " : 100
42
+ "record-response-times " : False
37
43
})
38
44
39
45
assert "target-utilization must be in the range (0.0, 1.0] but is 200.432" == str (ex .value )
40
46
41
47
with pytest .raises (ValueError ) as ex :
42
48
UtilizationBasedScheduler (params = {
43
49
"target-utilization" : 0.0 ,
44
- "warmup-time-period " : 100
50
+ "record-response-times " : False
45
51
})
46
52
47
53
assert "target-utilization must be in the range (0.0, 1.0] but is 0.0" == str (ex .value )
48
54
49
55
56
+ @pytest .mark .usefixtures ("reset_recorded_times" )
57
+ def test_no_response_times_recorded ():
58
+ with pytest .raises (ValueError ) as ex :
59
+ UtilizationBasedScheduler (params = {
60
+ "target-utilization" : 0.5 ,
61
+ "record-response-times" : False
62
+ })
63
+
64
+ assert "No response times recorded. Please run first with 'record-response-times'." == str (ex .value )
65
+
66
+
67
+ @pytest .mark .usefixtures ("reset_recorded_times" )
50
68
def test_valid_params ():
69
+ # simulate that response times have been recorded previously...
70
+ UtilizationBasedScheduler .RESPONSE_TIMES .append (1 )
71
+
51
72
s = UtilizationBasedScheduler (params = {
52
73
"target-utilization" : 0.0000001 ,
53
- "warmup-time-period " : 100
74
+ "record-response-times " : False
54
75
})
55
76
56
77
assert s is not None
@@ -63,60 +84,60 @@ def test_valid_params():
63
84
assert s is not None
64
85
65
86
87
+ @pytest .mark .usefixtures ("reset_recorded_times" )
66
88
def test_unthrottled_calculation ():
67
89
perf_counter = StaticPerfCounter (start = 0 )
68
90
69
91
s = UtilizationBasedScheduler (params = {
70
- "target-utilization" : 1.0 ,
71
- "warmup-time-period" : 100
92
+ "record-response-times" : True
72
93
}, perf_counter = perf_counter )
73
94
95
+ # simulate two requests 10 seconds apart
96
+ assert s .next (0 ) == 0
97
+ perf_counter .now = 10
74
98
assert s .next (0 ) == 0
75
- assert s .in_warmup
76
- assert s .start_warmup == 0
77
- assert s .end_warmup == 100
78
99
79
- # simulate end of warmup
80
- perf_counter . now = 100
81
- assert s . next ( 100 ) == 0
82
- assert not s . in_warmup
100
+ s = UtilizationBasedScheduler ( params = {
101
+ "target-utilization" : 1.0 ,
102
+ "record-response-times" : False
103
+ }, perf_counter = perf_counter )
83
104
84
- # normal mode of operation
105
+ # normal mode of operation (unthrottled)
85
106
assert s .next (200 ) == 0
86
107
assert s .next (300 ) == 0
87
108
88
109
110
+ @pytest .mark .usefixtures ("reset_recorded_times" )
89
111
def test_throttled_calculation ():
90
112
perf_counter = StaticPerfCounter (start = 0 )
91
113
92
114
s = UtilizationBasedScheduler (params = {
93
- "target-utilization" : 0.1 ,
94
- "warmup-time-period" : 100
115
+ "record-response-times" : True
95
116
}, perf_counter = perf_counter )
96
117
97
- # warmup phase, response time is always 20 seconds
118
+ # recording phase, response time is always 20 seconds
119
+ next_scheduled = 0
98
120
for t in range (0 , 100 , 20 ):
99
121
perf_counter .now = t
100
- assert s .next (t ) == 0
101
- assert s .in_warmup
102
- assert s .start_warmup == 0
103
- assert s .end_warmup == 100
104
-
105
- # simulate end of warmup
106
- perf_counter .now = 100
107
- assert s .next (100 ) == 0
108
- assert not s .in_warmup
109
- # 20 seconds * (1 / target utilization - 1) = 20 seconds * (1 / 0.1 - 1) = 20 seconds * 9 = 180 seconds
110
- assert s .wait_time == 180
122
+ next_scheduled = s .next (next_scheduled )
123
+ assert next_scheduled == 0
124
+
125
+ # now we're in throttled mode
126
+ s = UtilizationBasedScheduler (params = {
127
+ "target-utilization" : 0.1 ,
128
+ "record-response-times" : False
129
+ }, perf_counter = perf_counter )
130
+ # 20 seconds * (1 / target utilization) = 20 seconds * (1 / 0.1) = 20 seconds * 10 = 200 seconds
131
+ assert s .time_between_requests == 200
111
132
112
133
# normal mode of operation
113
- t = 101
114
134
waiting_times = []
115
- while t < 1000000 :
116
- next_request = s .next (t )
117
- waiting_times .append ((next_request - t ))
135
+ next_scheduled = 0
136
+ while next_scheduled < 1000000 :
137
+ next_request = s .next (next_scheduled )
138
+ waiting_times .append ((next_request - next_scheduled ))
118
139
# 20 seconds is our expected response time
119
- t = next_request + 20
140
+ next_scheduled = next_request
120
141
121
- # mean response time should approach 180 seconds
122
- assert 170 <= statistics .mean (waiting_times ) <= 190
142
+ # mean response time should approach 200 seconds
143
+ assert 190 <= statistics .mean (waiting_times ) <= 210
0 commit comments