25
25
import org .elasticsearch .search .aggregations .BucketOrder ;
26
26
import org .elasticsearch .search .aggregations .InternalAggregation ;
27
27
import org .elasticsearch .search .aggregations .InternalAggregations ;
28
- import org .elasticsearch .test .InternalMultiBucketAggregationTestCase ;
29
28
import org .elasticsearch .search .aggregations .ParsedMultiBucketAggregation ;
30
29
import org .elasticsearch .search .aggregations .pipeline .PipelineAggregator ;
30
+ import org .elasticsearch .test .InternalMultiBucketAggregationTestCase ;
31
31
32
32
import java .util .ArrayList ;
33
33
import java .util .Arrays ;
@@ -40,29 +40,55 @@ public class InternalHistogramTests extends InternalMultiBucketAggregationTestCa
40
40
41
41
private boolean keyed ;
42
42
private DocValueFormat format ;
43
+ private int interval ;
44
+ private int minDocCount ;
45
+ private InternalHistogram .EmptyBucketInfo emptyBucketInfo ;
46
+ private int offset ;
43
47
44
48
@ Override
45
- public void setUp () throws Exception {
49
+ public void setUp () throws Exception {
46
50
super .setUp ();
47
51
keyed = randomBoolean ();
48
52
format = randomNumericDocValueFormat ();
53
+ //in order for reduction to work properly (and be realistic) we need to use the same interval, minDocCount, emptyBucketInfo
54
+ //and offset in all randomly created aggs as part of the same test run. This is particularly important when minDocCount is
55
+ //set to 0 as empty buckets need to be added to fill the holes.
56
+ interval = randomIntBetween (1 , 3 );
57
+ offset = randomIntBetween (0 , 3 );
58
+ if (randomBoolean ()) {
59
+ minDocCount = randomIntBetween (1 , 10 );
60
+ emptyBucketInfo = null ;
61
+ } else {
62
+ minDocCount = 0 ;
63
+ //it's ok if minBound and maxBound are outside the range of the generated buckets, that will just mean that
64
+ //empty buckets won't be added before the first bucket and/or after the last one
65
+ int minBound = randomInt (50 ) - 30 ;
66
+ int maxBound = randomNumberOfBuckets () * interval + randomIntBetween (0 , 10 );
67
+ emptyBucketInfo = new InternalHistogram .EmptyBucketInfo (interval , offset , minBound , maxBound , InternalAggregations .EMPTY );
68
+ }
69
+ }
70
+
71
+ private double round (double key ) {
72
+ return Math .floor ((key - offset ) / interval ) * interval + offset ;
49
73
}
50
74
51
75
@ Override
52
76
protected InternalHistogram createTestInstance (String name ,
53
77
List <PipelineAggregator > pipelineAggregators ,
54
78
Map <String , Object > metaData ,
55
79
InternalAggregations aggregations ) {
56
- final int base = randomInt (50 ) - 30 ;
80
+ final double base = round ( randomInt (50 ) - 30 ) ;
57
81
final int numBuckets = randomNumberOfBuckets ();
58
- final int interval = randomIntBetween (1 , 3 );
59
82
List <InternalHistogram .Bucket > buckets = new ArrayList <>();
60
83
for (int i = 0 ; i < numBuckets ; ++i ) {
61
- final int docCount = TestUtil .nextInt (random (), 1 , 50 );
62
- buckets .add (new InternalHistogram .Bucket (base + i * interval , docCount , keyed , format , aggregations ));
84
+ //rarely leave some holes to be filled up with empty buckets in case minDocCount is set to 0
85
+ if (frequently ()) {
86
+ final int docCount = TestUtil .nextInt (random (), 1 , 50 );
87
+ buckets .add (new InternalHistogram .Bucket (base + i * interval , docCount , keyed , format , aggregations ));
88
+ }
63
89
}
64
90
BucketOrder order = BucketOrder .key (randomBoolean ());
65
- return new InternalHistogram (name , buckets , order , 1 , null , format , keyed , pipelineAggregators , metaData );
91
+ return new InternalHistogram (name , buckets , order , minDocCount , emptyBucketInfo , format , keyed , pipelineAggregators , metaData );
66
92
}
67
93
68
94
// issue 26787
@@ -88,13 +114,36 @@ public void testHandlesNaN() {
88
114
89
115
@ Override
90
116
protected void assertReduced (InternalHistogram reduced , List <InternalHistogram > inputs ) {
91
- Map <Double , Long > expectedCounts = new TreeMap <>();
117
+ TreeMap <Double , Long > expectedCounts = new TreeMap <>();
92
118
for (Histogram histogram : inputs ) {
93
119
for (Histogram .Bucket bucket : histogram .getBuckets ()) {
94
120
expectedCounts .compute ((Double ) bucket .getKey (),
95
121
(key , oldValue ) -> (oldValue == null ? 0 : oldValue ) + bucket .getDocCount ());
96
122
}
97
123
}
124
+ if (minDocCount == 0 ) {
125
+ double minBound = round (emptyBucketInfo .minBound );
126
+ if (expectedCounts .isEmpty () && emptyBucketInfo .minBound < emptyBucketInfo .maxBound ) {
127
+ expectedCounts .put (minBound , 0L );
128
+ }
129
+ if (expectedCounts .isEmpty () == false ) {
130
+ Double nextKey = expectedCounts .firstKey ();
131
+ while (nextKey < expectedCounts .lastKey ()) {
132
+ expectedCounts .putIfAbsent (nextKey , 0L );
133
+ nextKey += interval ;
134
+ }
135
+ while (minBound < expectedCounts .firstKey ()) {
136
+ expectedCounts .put (expectedCounts .firstKey () - interval , 0L );
137
+ }
138
+ double maxBound = round (emptyBucketInfo .maxBound );
139
+ while (expectedCounts .lastKey () < maxBound ) {
140
+ expectedCounts .put (expectedCounts .lastKey () + interval , 0L );
141
+ }
142
+ }
143
+ } else {
144
+ expectedCounts .entrySet ().removeIf (doubleLongEntry -> doubleLongEntry .getValue () < minDocCount );
145
+ }
146
+
98
147
Map <Double , Long > actualCounts = new TreeMap <>();
99
148
for (Histogram .Bucket bucket : reduced .getBuckets ()) {
100
149
actualCounts .compute ((Double ) bucket .getKey (),
@@ -121,6 +170,7 @@ protected InternalHistogram mutateInstance(InternalHistogram instance) {
121
170
long minDocCount = instance .getMinDocCount ();
122
171
List <PipelineAggregator > pipelineAggregators = instance .pipelineAggregators ();
123
172
Map <String , Object > metaData = instance .getMetaData ();
173
+ InternalHistogram .EmptyBucketInfo emptyBucketInfo = instance .emptyBucketInfo ;
124
174
switch (between (0 , 4 )) {
125
175
case 0 :
126
176
name += randomAlphaOfLength (5 );
@@ -135,6 +185,7 @@ protected InternalHistogram mutateInstance(InternalHistogram instance) {
135
185
break ;
136
186
case 3 :
137
187
minDocCount += between (1 , 10 );
188
+ emptyBucketInfo = null ;
138
189
break ;
139
190
case 4 :
140
191
if (metaData == null ) {
@@ -147,6 +198,6 @@ protected InternalHistogram mutateInstance(InternalHistogram instance) {
147
198
default :
148
199
throw new AssertionError ("Illegal randomisation branch" );
149
200
}
150
- return new InternalHistogram (name , buckets , order , minDocCount , null , format , keyed , pipelineAggregators , metaData );
201
+ return new InternalHistogram (name , buckets , order , minDocCount , emptyBucketInfo , format , keyed , pipelineAggregators , metaData );
151
202
}
152
203
}
0 commit comments