20
20
package org .elasticsearch .search .aggregations .bucket .histogram ;
21
21
22
22
import org .elasticsearch .common .io .stream .Writeable ;
23
+ import org .elasticsearch .common .rounding .Rounding ;
24
+ import org .elasticsearch .common .unit .TimeValue ;
23
25
import org .elasticsearch .search .DocValueFormat ;
24
26
import org .elasticsearch .search .aggregations .BucketOrder ;
25
27
import org .elasticsearch .search .aggregations .InternalAggregations ;
26
- import org .elasticsearch .test .InternalMultiBucketAggregationTestCase ;
27
28
import org .elasticsearch .search .aggregations .ParsedMultiBucketAggregation ;
28
29
import org .elasticsearch .search .aggregations .pipeline .PipelineAggregator ;
30
+ import org .elasticsearch .test .InternalMultiBucketAggregationTestCase ;
29
31
import org .joda .time .DateTime ;
30
32
31
33
import java .util .ArrayList ;
@@ -42,12 +44,38 @@ public class InternalDateHistogramTests extends InternalMultiBucketAggregationTe
42
44
43
45
private boolean keyed ;
44
46
private DocValueFormat format ;
47
+ private long intervalMillis ;
48
+ private long baseMillis ;
49
+ private long minDocCount ;
50
+ private InternalDateHistogram .EmptyBucketInfo emptyBucketInfo ;
45
51
46
52
@ Override
47
53
public void setUp () throws Exception {
48
54
super .setUp ();
49
55
keyed = randomBoolean ();
50
56
format = randomNumericDocValueFormat ();
57
+ //in order for reduction to work properly (and be realistic) we need to use the same interval, minDocCount, emptyBucketInfo
58
+ //and base in all randomly created aggs as part of the same test run. This is particularly important when minDocCount is
59
+ //set to 0 as empty buckets need to be added to fill the holes.
60
+ long interval = randomIntBetween (1 , 3 );
61
+ intervalMillis = randomFrom (timeValueSeconds (interval ), timeValueMinutes (interval ), timeValueHours (interval )).getMillis ();
62
+ Rounding rounding = Rounding .builder (TimeValue .timeValueMillis (intervalMillis )).build ();
63
+ baseMillis = rounding .round (System .currentTimeMillis ());
64
+ if (randomBoolean ()) {
65
+ minDocCount = randomIntBetween (1 , 10 );
66
+ emptyBucketInfo = null ;
67
+ } else {
68
+ minDocCount = 0 ;
69
+ ExtendedBounds extendedBounds = null ;
70
+ if (randomBoolean ()) {
71
+ //it's ok if min and max are outside the range of the generated buckets, that will just mean that
72
+ //empty buckets won't be added before the first bucket and/or after the last one
73
+ long min = baseMillis - intervalMillis * randomNumberOfBuckets ();
74
+ long max = baseMillis + randomNumberOfBuckets () * intervalMillis + randomNumberOfBuckets ();
75
+ extendedBounds = new ExtendedBounds (min , max );
76
+ }
77
+ emptyBucketInfo = new InternalDateHistogram .EmptyBucketInfo (rounding , InternalAggregations .EMPTY , extendedBounds );
78
+ }
51
79
}
52
80
53
81
@ Override
@@ -57,29 +85,58 @@ protected InternalDateHistogram createTestInstance(String name,
57
85
InternalAggregations aggregations ) {
58
86
int nbBuckets = randomNumberOfBuckets ();
59
87
List <InternalDateHistogram .Bucket > buckets = new ArrayList <>(nbBuckets );
60
- long startingDate = System .currentTimeMillis ();
61
-
62
- long interval = randomIntBetween (1 , 3 );
63
- long intervalMillis = randomFrom (timeValueSeconds (interval ), timeValueMinutes (interval ), timeValueHours (interval )).getMillis ();
64
-
88
+ //avoid having different random instance start from exactly the same base
89
+ long startingDate = baseMillis - intervalMillis * randomIntBetween (0 , 100 );
65
90
for (int i = 0 ; i < nbBuckets ; i ++) {
66
- long key = startingDate + (intervalMillis * i );
67
- buckets .add (i , new InternalDateHistogram .Bucket (key , randomIntBetween (1 , 100 ), keyed , format , aggregations ));
91
+ //rarely leave some holes to be filled up with empty buckets in case minDocCount is set to 0
92
+ if (frequently ()) {
93
+ long key = startingDate + intervalMillis * i ;
94
+ buckets .add (new InternalDateHistogram .Bucket (key , randomIntBetween (1 , 100 ), keyed , format , aggregations ));
95
+ }
68
96
}
69
-
70
- BucketOrder order = randomFrom ( BucketOrder . key ( true ), BucketOrder . key ( false ));
71
- return new InternalDateHistogram ( name , buckets , order , 1 , 0L , null , format , keyed , pipelineAggregators , metaData );
97
+ BucketOrder order = BucketOrder . key ( randomBoolean ());
98
+ return new InternalDateHistogram ( name , buckets , order , minDocCount , 0L , emptyBucketInfo , format , keyed ,
99
+ pipelineAggregators , metaData );
72
100
}
73
101
74
102
@ Override
75
103
protected void assertReduced (InternalDateHistogram reduced , List <InternalDateHistogram > inputs ) {
76
- Map <Long , Long > expectedCounts = new TreeMap <>();
104
+ TreeMap <Long , Long > expectedCounts = new TreeMap <>();
77
105
for (Histogram histogram : inputs ) {
78
106
for (Histogram .Bucket bucket : histogram .getBuckets ()) {
79
107
expectedCounts .compute (((DateTime ) bucket .getKey ()).getMillis (),
80
108
(key , oldValue ) -> (oldValue == null ? 0 : oldValue ) + bucket .getDocCount ());
81
109
}
82
110
}
111
+ if (minDocCount == 0 ) {
112
+ long minBound = -1 ;
113
+ long maxBound = -1 ;
114
+ if (emptyBucketInfo .bounds != null ) {
115
+ minBound = emptyBucketInfo .rounding .round (emptyBucketInfo .bounds .getMin ());
116
+ maxBound = emptyBucketInfo .rounding .round (emptyBucketInfo .bounds .getMax ());
117
+ if (expectedCounts .isEmpty () && minBound <= maxBound ) {
118
+ expectedCounts .put (minBound , 0L );
119
+ }
120
+ }
121
+ if (expectedCounts .isEmpty () == false ) {
122
+ Long nextKey = expectedCounts .firstKey ();
123
+ while (nextKey < expectedCounts .lastKey ()) {
124
+ expectedCounts .putIfAbsent (nextKey , 0L );
125
+ nextKey += intervalMillis ;
126
+ }
127
+ if (emptyBucketInfo .bounds != null ) {
128
+ while (minBound < expectedCounts .firstKey ()) {
129
+ expectedCounts .put (expectedCounts .firstKey () - intervalMillis , 0L );
130
+ }
131
+ while (expectedCounts .lastKey () < maxBound ) {
132
+ expectedCounts .put (expectedCounts .lastKey () + intervalMillis , 0L );
133
+ }
134
+ }
135
+ }
136
+ } else {
137
+ expectedCounts .entrySet ().removeIf (doubleLongEntry -> doubleLongEntry .getValue () < minDocCount );
138
+ }
139
+
83
140
Map <Long , Long > actualCounts = new TreeMap <>();
84
141
for (Histogram .Bucket bucket : reduced .getBuckets ()) {
85
142
actualCounts .compute (((DateTime ) bucket .getKey ()).getMillis (),
@@ -106,6 +163,7 @@ protected InternalDateHistogram mutateInstance(InternalDateHistogram instance) {
106
163
long minDocCount = instance .getMinDocCount ();
107
164
long offset = instance .getOffset ();
108
165
List <PipelineAggregator > pipelineAggregators = instance .pipelineAggregators ();
166
+ InternalDateHistogram .EmptyBucketInfo emptyBucketInfo = instance .emptyBucketInfo ;
109
167
Map <String , Object > metaData = instance .getMetaData ();
110
168
switch (between (0 , 5 )) {
111
169
case 0 :
@@ -121,6 +179,7 @@ protected InternalDateHistogram mutateInstance(InternalDateHistogram instance) {
121
179
break ;
122
180
case 3 :
123
181
minDocCount += between (1 , 10 );
182
+ emptyBucketInfo = null ;
124
183
break ;
125
184
case 4 :
126
185
offset += between (1 , 20 );
@@ -136,7 +195,7 @@ protected InternalDateHistogram mutateInstance(InternalDateHistogram instance) {
136
195
default :
137
196
throw new AssertionError ("Illegal randomisation branch" );
138
197
}
139
- return new InternalDateHistogram (name , buckets , order , minDocCount , offset , null , format , keyed , pipelineAggregators ,
198
+ return new InternalDateHistogram (name , buckets , order , minDocCount , offset , emptyBucketInfo , format , keyed , pipelineAggregators ,
140
199
metaData );
141
200
}
142
201
}
0 commit comments