21
21
22
22
import org .elasticsearch .common .Rounding ;
23
23
import org .elasticsearch .common .io .stream .Writeable ;
24
+ import org .elasticsearch .common .time .DateFormatter ;
24
25
import org .elasticsearch .search .DocValueFormat ;
25
26
import org .elasticsearch .search .aggregations .InternalAggregations ;
26
27
import org .elasticsearch .search .aggregations .ParsedMultiBucketAggregation ;
40
41
import java .util .List ;
41
42
import java .util .Map ;
42
43
import java .util .TreeMap ;
44
+ import java .util .concurrent .TimeUnit ;
43
45
44
46
import static java .util .Collections .emptyList ;
45
- import static org .elasticsearch .common .unit .TimeValue .timeValueHours ;
46
- import static org .elasticsearch .common .unit .TimeValue .timeValueMinutes ;
47
- import static org .elasticsearch .common .unit .TimeValue .timeValueSeconds ;
48
47
import static org .hamcrest .Matchers .equalTo ;
49
48
50
49
public class InternalAutoDateHistogramTests extends InternalMultiBucketAggregationTestCase <InternalAutoDateHistogram > {
51
50
52
51
private DocValueFormat format ;
53
52
private RoundingInfo [] roundingInfos ;
53
+ private long defaultStart ;
54
+ private int roundingIndex ;
54
55
55
56
@ Override
56
57
public void setUp () throws Exception {
57
58
super .setUp ();
58
59
format = randomNumericDocValueFormat ();
60
+ defaultStart = randomLongBetween (0 , DateFormatter .forPattern ("date_optional_time" ).parseMillis ("2050-01-01" ));
61
+ roundingIndex = between (0 , AutoDateHistogramAggregationBuilder .buildRoundings (null , null ).length - 1 );
59
62
}
60
63
61
64
@ Override
@@ -68,17 +71,20 @@ protected InternalAutoDateHistogram createTestInstance(String name,
68
71
int targetBuckets = randomIntBetween (1 , nbBuckets * 2 + 1 );
69
72
List <InternalAutoDateHistogram .Bucket > buckets = new ArrayList <>(nbBuckets );
70
73
71
- long startingDate = System .currentTimeMillis ();
74
+ long startingDate = defaultStart ;
75
+ if (rarely ()) {
76
+ startingDate += randomFrom (TimeUnit .MINUTES , TimeUnit .HOURS , TimeUnit .DAYS ).toMillis (between (1 , 10000 ));
77
+ }
72
78
73
79
long interval = randomIntBetween (1 , 3 );
74
- long intervalMillis = randomFrom ( timeValueSeconds ( interval ), timeValueMinutes ( interval ), timeValueHours ( interval )). getMillis () ;
80
+ long intervalMillis = roundingInfos [ roundingIndex ]. roughEstimateDurationMillis * interval ;
75
81
76
82
for (int i = 0 ; i < nbBuckets ; i ++) {
77
83
long key = startingDate + (intervalMillis * i );
78
84
buckets .add (i , new InternalAutoDateHistogram .Bucket (key , randomIntBetween (1 , 100 ), format , aggregations ));
79
85
}
80
86
InternalAggregations subAggregations = new InternalAggregations (Collections .emptyList ());
81
- BucketInfo bucketInfo = new BucketInfo (roundingInfos , randomIntBetween ( 0 , roundingInfos . length - 1 ) , subAggregations );
87
+ BucketInfo bucketInfo = new BucketInfo (roundingInfos , roundingIndex , subAggregations );
82
88
return new InternalAutoDateHistogram (name , buckets , targetBuckets , bucketInfo , format , emptyList (), metadata , 1 );
83
89
}
84
90
@@ -108,10 +114,6 @@ public void testGetAppropriateRoundingUsesCorrectIntervals() {
108
114
assertThat (result , equalTo (2 ));
109
115
}
110
116
111
- public void testReduceRandom () {
112
- super .testReduceRandom ();
113
- }
114
-
115
117
@ Override
116
118
protected void assertReduced (InternalAutoDateHistogram reduced , List <InternalAutoDateHistogram > inputs ) {
117
119
@@ -134,41 +136,49 @@ protected void assertReduced(InternalAutoDateHistogram reduced, List<InternalAut
134
136
RoundingInfo roundingInfo = roundingInfos [roundingIndex ];
135
137
136
138
long normalizedDuration = (highest - lowest ) / roundingInfo .getRoughEstimateDurationMillis ();
137
- long innerIntervalToUse = roundingInfo .innerIntervals [0 ];
138
139
int innerIntervalIndex = 0 ;
139
140
140
- // First, try to calculate the correct innerInterval using the normalizedDuration.
141
- // This handles cases where highest and lowest are further apart than the interval being used.
141
+ /*
142
+ * Guess the interval to use based on the roughly estimated
143
+ * duration. It'll be accurate or it'll produce more buckets
144
+ * than we need but it is quick.
145
+ */
142
146
if (normalizedDuration != 0 ) {
143
147
for (int j = roundingInfo .innerIntervals .length -1 ; j >= 0 ; j --) {
144
148
int interval = roundingInfo .innerIntervals [j ];
145
149
if (normalizedDuration / interval < reduced .getBuckets ().size ()) {
146
- innerIntervalToUse = interval ;
147
150
innerIntervalIndex = j ;
148
151
}
149
152
}
150
153
}
151
154
152
- long intervalInMillis = innerIntervalToUse * roundingInfo .getRoughEstimateDurationMillis ();
153
- int bucketCount = getBucketCount (lowest , highest , roundingInfo , intervalInMillis );
154
-
155
- //Next, if our bucketCount is still above what we need, we'll go back and determine the interval
156
- // based on a size calculation.
157
- if (bucketCount > reduced .getBuckets ().size ()) {
158
- for (int i = innerIntervalIndex ; i < roundingInfo .innerIntervals .length ; i ++) {
159
- long newIntervalMillis = roundingInfo .innerIntervals [i ] * roundingInfo .getRoughEstimateDurationMillis ();
160
- if (getBucketCount (lowest , highest , roundingInfo , newIntervalMillis ) <= reduced .getBuckets ().size ()) {
161
- innerIntervalToUse = roundingInfo .innerIntervals [i ];
162
- intervalInMillis = innerIntervalToUse * roundingInfo .getRoughEstimateDurationMillis ();
163
- }
155
+ /*
156
+ * Next pick smaller intervals until we find the one that makes the right
157
+ * number of buckets.
158
+ */
159
+ int innerIntervalToUse ;
160
+ do {
161
+ innerIntervalToUse = roundingInfo .innerIntervals [innerIntervalIndex ];
162
+ int bucketCount = getBucketCount (lowest , highest , roundingInfo .rounding , innerIntervalToUse );
163
+ if (bucketCount == reduced .getBuckets ().size ()) {
164
+ break ;
164
165
}
165
- }
166
-
167
- Map <Long , Long > expectedCounts = new TreeMap <>();
168
- for (long keyForBucket = roundingInfo .rounding .round (lowest );
169
- keyForBucket <= roundingInfo .rounding .round (highest );
170
- keyForBucket = keyForBucket + intervalInMillis ) {
171
- expectedCounts .put (keyForBucket , 0L );
166
+ if (bucketCount < reduced .getBuckets ().size ()) {
167
+ innerIntervalToUse = roundingInfo .innerIntervals [Math .max (0 , innerIntervalIndex - 1 )];
168
+ break ;
169
+ }
170
+ } while (++innerIntervalIndex < roundingInfo .innerIntervals .length );
171
+
172
+ assertThat (reduced .getInterval ().toString (), equalTo (innerIntervalToUse + roundingInfo .unitAbbreviation ));
173
+ Map <Instant , Long > expectedCounts = new TreeMap <>();
174
+ long keyForBucket = roundingInfo .rounding .round (lowest );
175
+ while (keyForBucket <= roundingInfo .rounding .round (highest )) {
176
+ long nextKey = keyForBucket ;
177
+ for (int i = 0 ; i < innerIntervalToUse ; i ++) {
178
+ nextKey = roundingInfo .rounding .nextRoundingValue (nextKey );
179
+ }
180
+ Instant key = Instant .ofEpochMilli (keyForBucket );
181
+ expectedCounts .put (key , 0L );
172
182
173
183
// Iterate through the input buckets, and for each bucket, determine if it's inside
174
184
// the range of the bucket in the outer loop. if it is, add the doc count to the total
@@ -178,26 +188,26 @@ protected void assertReduced(InternalAutoDateHistogram reduced, List<InternalAut
178
188
for (Histogram .Bucket bucket : histogram .getBuckets ()) {
179
189
long roundedBucketKey = roundingInfo .rounding .round (((ZonedDateTime ) bucket .getKey ()).toInstant ().toEpochMilli ());
180
190
long docCount = bucket .getDocCount ();
181
- if (roundedBucketKey >= keyForBucket
182
- && roundedBucketKey < keyForBucket + intervalInMillis ) {
183
- expectedCounts .compute (keyForBucket ,
184
- (key , oldValue ) -> (oldValue == null ? 0 : oldValue ) + docCount );
191
+ if (roundedBucketKey >= keyForBucket && roundedBucketKey < nextKey ) {
192
+ expectedCounts .compute (key ,
193
+ (k , oldValue ) -> (oldValue == null ? 0 : oldValue ) + docCount );
185
194
}
186
195
}
187
196
}
197
+ keyForBucket = nextKey ;
188
198
}
189
199
190
200
// If there is only a single bucket, and we haven't added it above, add a bucket with no documents.
191
201
// this step is necessary because of the roundedBucketKey < keyForBucket + intervalInMillis above.
192
202
if (roundingInfo .rounding .round (lowest ) == roundingInfo .rounding .round (highest ) && expectedCounts .isEmpty ()) {
193
- expectedCounts .put (roundingInfo .rounding .round (lowest ), 0L );
203
+ expectedCounts .put (Instant . ofEpochMilli ( roundingInfo .rounding .round (lowest ) ), 0L );
194
204
}
195
205
196
206
197
207
// pick out the actual reduced values to the make the assertion more readable
198
- Map <Long , Long > actualCounts = new TreeMap <>();
208
+ Map <Instant , Long > actualCounts = new TreeMap <>();
199
209
for (Histogram .Bucket bucket : reduced .getBuckets ()) {
200
- actualCounts .compute (((ZonedDateTime ) bucket .getKey ()).toInstant (). toEpochMilli () ,
210
+ actualCounts .compute (((ZonedDateTime ) bucket .getKey ()).toInstant (),
201
211
(key , oldValue ) -> (oldValue == null ? 0 : oldValue ) + bucket .getDocCount ());
202
212
}
203
213
assertEquals (expectedCounts , actualCounts );
@@ -211,11 +221,13 @@ protected void assertReduced(InternalAutoDateHistogram reduced, List<InternalAut
211
221
assertThat (reduced .getInterval (), equalTo (expectedInterval ));
212
222
}
213
223
214
- private int getBucketCount (long lowest , long highest , RoundingInfo roundingInfo , long intervalInMillis ) {
224
+ private int getBucketCount (long min , long max , Rounding rounding , int interval ) {
215
225
int bucketCount = 0 ;
216
- for (long keyForBucket = roundingInfo .rounding .round (lowest );
217
- keyForBucket <= roundingInfo .rounding .round (highest );
218
- keyForBucket = keyForBucket + intervalInMillis ) {
226
+ long key = rounding .round (min );
227
+ while (key < max ) {
228
+ for (int i = 0 ; i < interval ; i ++) {
229
+ key = rounding .nextRoundingValue (key );
230
+ }
219
231
bucketCount ++;
220
232
}
221
233
return bucketCount ;
0 commit comments