Skip to content

Increase InternalDateHistogramTests coverage #36064

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Nov 30, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ public int hashCode() {
private final boolean keyed;
private final long minDocCount;
private final long offset;
private final EmptyBucketInfo emptyBucketInfo;
final EmptyBucketInfo emptyBucketInfo;

InternalDateHistogram(String name, List<Bucket> buckets, BucketOrder order, long minDocCount, long offset,
EmptyBucketInfo emptyBucketInfo,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,14 @@
package org.elasticsearch.search.aggregations.bucket.histogram;

import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.rounding.Rounding;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.test.InternalMultiBucketAggregationTestCase;
import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.test.InternalMultiBucketAggregationTestCase;
import org.joda.time.DateTime;

import java.util.ArrayList;
Expand All @@ -42,12 +44,38 @@ public class InternalDateHistogramTests extends InternalMultiBucketAggregationTe

private boolean keyed;
private DocValueFormat format;
private long intervalMillis;
private long baseMillis;
private long minDocCount;
private InternalDateHistogram.EmptyBucketInfo emptyBucketInfo;

@Override
public void setUp() throws Exception {
super.setUp();
keyed = randomBoolean();
format = randomNumericDocValueFormat();
//in order for reduction to work properly (and be realistic) we need to use the same interval, minDocCount, emptyBucketInfo
//and base in all randomly created aggs as part of the same test run. This is particularly important when minDocCount is
//set to 0 as empty buckets need to be added to fill the holes.
long interval = randomIntBetween(1, 3);
intervalMillis = randomFrom(timeValueSeconds(interval), timeValueMinutes(interval), timeValueHours(interval)).getMillis();
Rounding rounding = Rounding.builder(TimeValue.timeValueMillis(intervalMillis)).build();
baseMillis = rounding.round(System.currentTimeMillis());
if (randomBoolean()) {
minDocCount = randomIntBetween(1, 10);
emptyBucketInfo = null;
} else {
minDocCount = 0;
ExtendedBounds extendedBounds = null;
if (randomBoolean()) {
//it's ok if min and max are outside the range of the generated buckets, that will just mean that
//empty buckets won't be added before the first bucket and/or after the last one
long min = baseMillis - intervalMillis * randomNumberOfBuckets();
long max = baseMillis + randomNumberOfBuckets() * intervalMillis + randomNumberOfBuckets();
extendedBounds = new ExtendedBounds(min, max);
}
emptyBucketInfo = new InternalDateHistogram.EmptyBucketInfo(rounding, InternalAggregations.EMPTY, extendedBounds);
}
}

@Override
Expand All @@ -57,29 +85,58 @@ protected InternalDateHistogram createTestInstance(String name,
InternalAggregations aggregations) {
int nbBuckets = randomNumberOfBuckets();
List<InternalDateHistogram.Bucket> buckets = new ArrayList<>(nbBuckets);
long startingDate = System.currentTimeMillis();

long interval = randomIntBetween(1, 3);
long intervalMillis = randomFrom(timeValueSeconds(interval), timeValueMinutes(interval), timeValueHours(interval)).getMillis();

//avoid having different random instance start from exactly the same base
long startingDate = baseMillis - intervalMillis * randomIntBetween(0, 100);
for (int i = 0; i < nbBuckets; i++) {
long key = startingDate + (intervalMillis * i);
buckets.add(i, new InternalDateHistogram.Bucket(key, randomIntBetween(1, 100), keyed, format, aggregations));
//rarely leave some holes to be filled up with empty buckets in case minDocCount is set to 0
if (frequently()) {
long key = startingDate + intervalMillis * i;
buckets.add(new InternalDateHistogram.Bucket(key, randomIntBetween(1, 100), keyed, format, aggregations));
}
}

BucketOrder order = randomFrom(BucketOrder.key(true), BucketOrder.key(false));
return new InternalDateHistogram(name, buckets, order, 1, 0L, null, format, keyed, pipelineAggregators, metaData);
BucketOrder order = BucketOrder.key(randomBoolean());
return new InternalDateHistogram(name, buckets, order, minDocCount, 0L, emptyBucketInfo, format, keyed,
pipelineAggregators, metaData);
}

@Override
protected void assertReduced(InternalDateHistogram reduced, List<InternalDateHistogram> inputs) {
Map<Long, Long> expectedCounts = new TreeMap<>();
TreeMap<Long, Long> expectedCounts = new TreeMap<>();
for (Histogram histogram : inputs) {
for (Histogram.Bucket bucket : histogram.getBuckets()) {
expectedCounts.compute(((DateTime) bucket.getKey()).getMillis(),
(key, oldValue) -> (oldValue == null ? 0 : oldValue) + bucket.getDocCount());
}
}
if (minDocCount == 0) {
long minBound = -1;
long maxBound = -1;
if (emptyBucketInfo.bounds != null) {
minBound = emptyBucketInfo.rounding.round(emptyBucketInfo.bounds.getMin());
maxBound = emptyBucketInfo.rounding.round(emptyBucketInfo.bounds.getMax());
if (expectedCounts.isEmpty() && minBound <= maxBound) {
expectedCounts.put(minBound, 0L);
}
}
if (expectedCounts.isEmpty() == false) {
Long nextKey = expectedCounts.firstKey();
while (nextKey < expectedCounts.lastKey()) {
expectedCounts.putIfAbsent(nextKey, 0L);
nextKey += intervalMillis;
}
if (emptyBucketInfo.bounds != null) {
while (minBound < expectedCounts.firstKey()) {
expectedCounts.put(expectedCounts.firstKey() - intervalMillis, 0L);
}
while (expectedCounts.lastKey() < maxBound) {
expectedCounts.put(expectedCounts.lastKey() + intervalMillis, 0L);
}
}
}
} else {
expectedCounts.entrySet().removeIf(doubleLongEntry -> doubleLongEntry.getValue() < minDocCount);
}

Map<Long, Long> actualCounts = new TreeMap<>();
for (Histogram.Bucket bucket : reduced.getBuckets()) {
actualCounts.compute(((DateTime) bucket.getKey()).getMillis(),
Expand All @@ -106,6 +163,7 @@ protected InternalDateHistogram mutateInstance(InternalDateHistogram instance) {
long minDocCount = instance.getMinDocCount();
long offset = instance.getOffset();
List<PipelineAggregator> pipelineAggregators = instance.pipelineAggregators();
InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = instance.emptyBucketInfo;
Map<String, Object> metaData = instance.getMetaData();
switch (between(0, 5)) {
case 0:
Expand All @@ -121,6 +179,7 @@ protected InternalDateHistogram mutateInstance(InternalDateHistogram instance) {
break;
case 3:
minDocCount += between(1, 10);
emptyBucketInfo = null;
break;
case 4:
offset += between(1, 20);
Expand All @@ -136,7 +195,7 @@ protected InternalDateHistogram mutateInstance(InternalDateHistogram instance) {
default:
throw new AssertionError("Illegal randomisation branch");
}
return new InternalDateHistogram(name, buckets, order, minDocCount, offset, null, format, keyed, pipelineAggregators,
return new InternalDateHistogram(name, buckets, order, minDocCount, offset, emptyBucketInfo, format, keyed, pipelineAggregators,
metaData);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ protected void assertReduced(InternalHistogram reduced, List<InternalHistogram>
}
if (minDocCount == 0) {
double minBound = round(emptyBucketInfo.minBound);
if (expectedCounts.isEmpty() && emptyBucketInfo.minBound < emptyBucketInfo.maxBound) {
if (expectedCounts.isEmpty() && emptyBucketInfo.minBound <= emptyBucketInfo.maxBound) {
expectedCounts.put(minBound, 0L);
}
if (expectedCounts.isEmpty() == false) {
Expand Down