21
21
22
22
import org .elasticsearch .common .Rounding ;
23
23
import org .elasticsearch .common .io .stream .Writeable ;
24
+ import org .elasticsearch .common .time .DateFormatter ;
24
25
import org .elasticsearch .search .DocValueFormat ;
25
26
import org .elasticsearch .search .aggregations .InternalAggregations ;
26
27
import org .elasticsearch .search .aggregations .ParsedMultiBucketAggregation ;
40
41
import java .util .List ;
41
42
import java .util .Map ;
42
43
import java .util .TreeMap ;
44
+ import java .util .concurrent .TimeUnit ;
43
45
44
46
import static java .util .Collections .emptyList ;
45
- import static org .elasticsearch .common .unit .TimeValue .timeValueHours ;
46
- import static org .elasticsearch .common .unit .TimeValue .timeValueMinutes ;
47
- import static org .elasticsearch .common .unit .TimeValue .timeValueSeconds ;
48
47
import static org .hamcrest .Matchers .equalTo ;
49
48
50
49
public class InternalAutoDateHistogramTests extends InternalMultiBucketAggregationTestCase <InternalAutoDateHistogram > {
51
50
52
51
private DocValueFormat format ;
53
52
private RoundingInfo [] roundingInfos ;
53
+ private long defaultStart ;
54
+ private int roundingIndex ;
54
55
55
56
@ Override
56
57
public void setUp () throws Exception {
57
58
super .setUp ();
58
59
format = randomNumericDocValueFormat ();
60
+ defaultStart = randomLongBetween (0 , DateFormatter .forPattern ("date_optional_time" ).parseMillis ("2050-01-01" ));
61
+ roundingIndex = between (0 , AutoDateHistogramAggregationBuilder .buildRoundings (null , null ).length - 1 );
59
62
}
60
63
61
64
@ Override
@@ -68,17 +71,20 @@ protected InternalAutoDateHistogram createTestInstance(String name,
68
71
int targetBuckets = randomIntBetween (1 , nbBuckets * 2 + 1 );
69
72
List <InternalAutoDateHistogram .Bucket > buckets = new ArrayList <>(nbBuckets );
70
73
71
- long startingDate = System .currentTimeMillis ();
74
+ long startingDate = defaultStart ;
75
+ if (rarely ()) {
76
+ startingDate += randomFrom (TimeUnit .MINUTES , TimeUnit .HOURS , TimeUnit .DAYS ).toMillis (between (1 , 10000 ));
77
+ }
72
78
73
79
long interval = randomIntBetween (1 , 3 );
74
- long intervalMillis = randomFrom ( timeValueSeconds ( interval ), timeValueMinutes ( interval ), timeValueHours ( interval )). getMillis () ;
80
+ long intervalMillis = roundingInfos [ roundingIndex ]. roughEstimateDurationMillis * interval ;
75
81
76
82
for (int i = 0 ; i < nbBuckets ; i ++) {
77
83
long key = startingDate + (intervalMillis * i );
78
84
buckets .add (i , new InternalAutoDateHistogram .Bucket (key , randomIntBetween (1 , 100 ), format , aggregations ));
79
85
}
80
86
InternalAggregations subAggregations = new InternalAggregations (Collections .emptyList ());
81
- BucketInfo bucketInfo = new BucketInfo (roundingInfos , randomIntBetween ( 0 , roundingInfos . length - 1 ) , subAggregations );
87
+ BucketInfo bucketInfo = new BucketInfo (roundingInfos , roundingIndex , subAggregations );
82
88
return new InternalAutoDateHistogram (name , buckets , targetBuckets , bucketInfo , format , emptyList (), metadata , 1 );
83
89
}
84
90
@@ -108,11 +114,6 @@ public void testGetAppropriateRoundingUsesCorrectIntervals() {
108
114
assertThat (result , equalTo (2 ));
109
115
}
110
116
111
- @ AwaitsFix (bugUrl = "https://github.com/elastic/elasticsearch/issues/54540" )
112
- public void testReduceRandom () {
113
- super .testReduceRandom ();
114
- }
115
-
116
117
@ Override
117
118
protected void assertReduced (InternalAutoDateHistogram reduced , List <InternalAutoDateHistogram > inputs ) {
118
119
@@ -135,41 +136,49 @@ protected void assertReduced(InternalAutoDateHistogram reduced, List<InternalAut
135
136
RoundingInfo roundingInfo = roundingInfos [roundingIndex ];
136
137
137
138
long normalizedDuration = (highest - lowest ) / roundingInfo .getRoughEstimateDurationMillis ();
138
- long innerIntervalToUse = roundingInfo .innerIntervals [0 ];
139
139
int innerIntervalIndex = 0 ;
140
140
141
- // First, try to calculate the correct innerInterval using the normalizedDuration.
142
- // This handles cases where highest and lowest are further apart than the interval being used.
141
+ /*
142
+ * Guess the interval to use based on the roughly estimated
143
+ * duration. It'll be accurate or it'll produce more buckets
144
+ * than we need but it is quick.
145
+ */
143
146
if (normalizedDuration != 0 ) {
144
147
for (int j = roundingInfo .innerIntervals .length -1 ; j >= 0 ; j --) {
145
148
int interval = roundingInfo .innerIntervals [j ];
146
149
if (normalizedDuration / interval < reduced .getBuckets ().size ()) {
147
- innerIntervalToUse = interval ;
148
150
innerIntervalIndex = j ;
149
151
}
150
152
}
151
153
}
152
154
153
- long intervalInMillis = innerIntervalToUse * roundingInfo .getRoughEstimateDurationMillis ();
154
- int bucketCount = getBucketCount (lowest , highest , roundingInfo , intervalInMillis );
155
-
156
- //Next, if our bucketCount is still above what we need, we'll go back and determine the interval
157
- // based on a size calculation.
158
- if (bucketCount > reduced .getBuckets ().size ()) {
159
- for (int i = innerIntervalIndex ; i < roundingInfo .innerIntervals .length ; i ++) {
160
- long newIntervalMillis = roundingInfo .innerIntervals [i ] * roundingInfo .getRoughEstimateDurationMillis ();
161
- if (getBucketCount (lowest , highest , roundingInfo , newIntervalMillis ) <= reduced .getBuckets ().size ()) {
162
- innerIntervalToUse = roundingInfo .innerIntervals [i ];
163
- intervalInMillis = innerIntervalToUse * roundingInfo .getRoughEstimateDurationMillis ();
164
- }
155
+ /*
156
+ * Next pick smaller intervals until we find the one that makes the right
157
+ * number of buckets.
158
+ */
159
+ int innerIntervalToUse ;
160
+ do {
161
+ innerIntervalToUse = roundingInfo .innerIntervals [innerIntervalIndex ];
162
+ int bucketCount = getBucketCount (lowest , highest , roundingInfo .rounding , innerIntervalToUse );
163
+ if (bucketCount == reduced .getBuckets ().size ()) {
164
+ break ;
165
165
}
166
- }
167
-
168
- Map <Long , Long > expectedCounts = new TreeMap <>();
169
- for (long keyForBucket = roundingInfo .rounding .round (lowest );
170
- keyForBucket <= roundingInfo .rounding .round (highest );
171
- keyForBucket = keyForBucket + intervalInMillis ) {
172
- expectedCounts .put (keyForBucket , 0L );
166
+ if (bucketCount < reduced .getBuckets ().size ()) {
167
+ innerIntervalToUse = roundingInfo .innerIntervals [Math .max (0 , innerIntervalIndex - 1 )];
168
+ break ;
169
+ }
170
+ } while (++innerIntervalIndex < roundingInfo .innerIntervals .length );
171
+
172
+ assertThat (reduced .getInterval ().toString (), equalTo (innerIntervalToUse + roundingInfo .unitAbbreviation ));
173
+ Map <Instant , Long > expectedCounts = new TreeMap <>();
174
+ long keyForBucket = roundingInfo .rounding .round (lowest );
175
+ while (keyForBucket <= roundingInfo .rounding .round (highest )) {
176
+ long nextKey = keyForBucket ;
177
+ for (int i = 0 ; i < innerIntervalToUse ; i ++) {
178
+ nextKey = roundingInfo .rounding .nextRoundingValue (nextKey );
179
+ }
180
+ Instant key = Instant .ofEpochMilli (keyForBucket );
181
+ expectedCounts .put (key , 0L );
173
182
174
183
// Iterate through the input buckets, and for each bucket, determine if it's inside
175
184
// the range of the bucket in the outer loop. if it is, add the doc count to the total
@@ -179,26 +188,26 @@ protected void assertReduced(InternalAutoDateHistogram reduced, List<InternalAut
179
188
for (Histogram .Bucket bucket : histogram .getBuckets ()) {
180
189
long roundedBucketKey = roundingInfo .rounding .round (((ZonedDateTime ) bucket .getKey ()).toInstant ().toEpochMilli ());
181
190
long docCount = bucket .getDocCount ();
182
- if (roundedBucketKey >= keyForBucket
183
- && roundedBucketKey < keyForBucket + intervalInMillis ) {
184
- expectedCounts .compute (keyForBucket ,
185
- (key , oldValue ) -> (oldValue == null ? 0 : oldValue ) + docCount );
191
+ if (roundedBucketKey >= keyForBucket && roundedBucketKey < nextKey ) {
192
+ expectedCounts .compute (key ,
193
+ (k , oldValue ) -> (oldValue == null ? 0 : oldValue ) + docCount );
186
194
}
187
195
}
188
196
}
197
+ keyForBucket = nextKey ;
189
198
}
190
199
191
200
// If there is only a single bucket, and we haven't added it above, add a bucket with no documents.
192
201
// this step is necessary because of the roundedBucketKey < keyForBucket + intervalInMillis above.
193
202
if (roundingInfo .rounding .round (lowest ) == roundingInfo .rounding .round (highest ) && expectedCounts .isEmpty ()) {
194
- expectedCounts .put (roundingInfo .rounding .round (lowest ), 0L );
203
+ expectedCounts .put (Instant . ofEpochMilli ( roundingInfo .rounding .round (lowest ) ), 0L );
195
204
}
196
205
197
206
198
207
// pick out the actual reduced values to the make the assertion more readable
199
- Map <Long , Long > actualCounts = new TreeMap <>();
208
+ Map <Instant , Long > actualCounts = new TreeMap <>();
200
209
for (Histogram .Bucket bucket : reduced .getBuckets ()) {
201
- actualCounts .compute (((ZonedDateTime ) bucket .getKey ()).toInstant (). toEpochMilli () ,
210
+ actualCounts .compute (((ZonedDateTime ) bucket .getKey ()).toInstant (),
202
211
(key , oldValue ) -> (oldValue == null ? 0 : oldValue ) + bucket .getDocCount ());
203
212
}
204
213
assertEquals (expectedCounts , actualCounts );
@@ -212,11 +221,13 @@ protected void assertReduced(InternalAutoDateHistogram reduced, List<InternalAut
212
221
assertThat (reduced .getInterval (), equalTo (expectedInterval ));
213
222
}
214
223
215
- private int getBucketCount (long lowest , long highest , RoundingInfo roundingInfo , long intervalInMillis ) {
224
+ private int getBucketCount (long min , long max , Rounding rounding , int interval ) {
216
225
int bucketCount = 0 ;
217
- for (long keyForBucket = roundingInfo .rounding .round (lowest );
218
- keyForBucket <= roundingInfo .rounding .round (highest );
219
- keyForBucket = keyForBucket + intervalInMillis ) {
226
+ long key = rounding .round (min );
227
+ while (key < max ) {
228
+ for (int i = 0 ; i < interval ; i ++) {
229
+ key = rounding .nextRoundingValue (key );
230
+ }
220
231
bucketCount ++;
221
232
}
222
233
return bucketCount ;
0 commit comments