diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java index 5732a872c8f58..529bdb84b12ac 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java @@ -30,6 +30,7 @@ import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.RangeType; import org.elasticsearch.indices.breaker.CircuitBreakerService; import java.util.Set; @@ -71,6 +72,7 @@ public static class Builder implements IndexFieldData.Builder { private NumericType numericType; private Function> scriptFunction = AbstractAtomicOrdinalsFieldData.DEFAULT_SCRIPT_FUNCTION; + private RangeType rangeType; public Builder numericType(NumericType type) { this.numericType = type; @@ -82,12 +84,17 @@ public Builder scriptFunction(Function> s return this; } + public Builder setRangeType(RangeType rangeType) { + this.rangeType = rangeType; + return this; + } + @Override public IndexFieldData build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache, CircuitBreakerService breakerService, MapperService mapperService) { // Ignore Circuit Breaker final String fieldName = fieldType.name(); - if (BINARY_INDEX_FIELD_NAMES.contains(fieldName)) { + if (BINARY_INDEX_FIELD_NAMES.contains(fieldName) || rangeType != null) { assert numericType == null; return new BinaryDVIndexFieldData(indexSettings.getIndex(), fieldName); } else if (numericType != null) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java index e6a42e59353b0..c4fe6ca934f96 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java @@ -41,6 +41,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; @@ -210,6 +212,8 @@ public static final class RangeFieldType extends MappedFieldType { } } + public RangeType rangeType() { return rangeType; } + @Override public MappedFieldType clone() { return new RangeFieldType(this); @@ -230,6 +234,12 @@ public int hashCode() { return Objects.hash(super.hashCode(), rangeType, dateTimeFormatter); } + @Override + public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) { + failIfNoDocValues(); + return new DocValuesIndexFieldData.Builder().setRangeType(rangeType); + } + @Override public String typeName() { return rangeType.name; @@ -468,6 +478,14 @@ public String toString() { sb.append(includeTo ? ']' : ')'); return sb.toString(); } + + public Object getFrom() { + return from; + } + + public Object getTo() { + return to; + } } static class BinaryRangesDocValuesField extends CustomDocValuesField { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RangeType.java b/server/src/main/java/org/elasticsearch/index/mapper/RangeType.java index ac3ec8f750603..256325eba5974 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RangeType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RangeType.java @@ -108,6 +108,11 @@ public List decodeRanges(BytesRef bytes) { throw new UnsupportedOperationException(); } + @Override + public Double doubleValue (Object endpointValue) { + throw new UnsupportedOperationException("IP ranges cannot be safely converted to doubles"); + } + @Override public Query dvRangeQuery(String field, BinaryDocValuesRangeQuery.QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) { @@ -208,6 +213,11 @@ public List decodeRanges(BytesRef bytes) { return LONG.decodeRanges(bytes); } + @Override + public Double doubleValue (Object endpointValue) { + return LONG.doubleValue(endpointValue); + } + @Override public Query dvRangeQuery(String field, BinaryDocValuesRangeQuery.QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) { @@ -274,6 +284,12 @@ public List decodeRanges(BytesRef bytes) { return BinaryRangeUtil.decodeFloatRanges(bytes); } + @Override + public Double doubleValue(Object endpointValue) { + assert endpointValue instanceof Float; + return ((Float) endpointValue).doubleValue(); + } + @Override public Query dvRangeQuery(String field, BinaryDocValuesRangeQuery.QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) { @@ -339,6 +355,12 @@ public List decodeRanges(BytesRef bytes) { return BinaryRangeUtil.decodeDoubleRanges(bytes); } + @Override + public Double doubleValue(Object endpointValue) { + assert endpointValue instanceof Double; + return (Double) endpointValue; + } + @Override public Query dvRangeQuery(String field, BinaryDocValuesRangeQuery.QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) { @@ -407,6 +429,11 @@ public List decodeRanges(BytesRef bytes) { return LONG.decodeRanges(bytes); } + @Override + public Double doubleValue(Object endpointValue) { + return LONG.doubleValue(endpointValue); + } + @Override public Query dvRangeQuery(String field, BinaryDocValuesRangeQuery.QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) { @@ -461,6 +488,12 @@ public List decodeRanges(BytesRef bytes) { return BinaryRangeUtil.decodeLongRanges(bytes); } + @Override + public Double doubleValue(Object endpointValue) { + assert endpointValue instanceof Long; + return ((Long) endpointValue).doubleValue(); + } + @Override public Query dvRangeQuery(String field, BinaryDocValuesRangeQuery.QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) { @@ -621,6 +654,19 @@ public Query rangeQuery(String field, boolean hasDocValues, Object from, Object public abstract BytesRef encodeRanges(Set ranges) throws IOException; public abstract List decodeRanges(BytesRef bytes); + /** + * Given the Range.to or Range.from Object value from a Range instance, converts that value into a Double. Before converting, it + * asserts that the object is of the expected type. Operation is not supported on IP ranges (because of loss of precision) + * + * @param endpointValue Object value for Range.to or Range.from + * @return endpointValue as a Double + */ + public abstract Double doubleValue(Object endpointValue); + + public boolean isNumeric() { + return numberType != null; + } + public abstract Query dvRangeQuery(String field, BinaryDocValuesRangeQuery.QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java index ecb65df433a69..f990bded291ae 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java @@ -19,12 +19,14 @@ package org.elasticsearch.search.aggregations.bucket.histogram; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -32,9 +34,7 @@ import org.elasticsearch.search.aggregations.InternalOrder; import org.elasticsearch.search.aggregations.InternalOrder.CompoundOrder; import org.elasticsearch.search.aggregations.bucket.MultiBucketAggregationBuilder; -import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; @@ -48,9 +48,10 @@ import java.util.Objects; /** - * A builder for histograms on numeric fields. + * A builder for histograms on numeric fields. This builder can operate on either base numeric fields, or numeric range fields. IP range + * fields are unsupported, and will throw at the factory layer. */ -public class HistogramAggregationBuilder extends ValuesSourceAggregationBuilder +public class HistogramAggregationBuilder extends ValuesSourceAggregationBuilder implements MultiBucketAggregationBuilder { public static final String NAME = "histogram"; @@ -65,7 +66,7 @@ public class HistogramAggregationBuilder extends ValuesSourceAggregationBuilder< private static final ObjectParser PARSER; static { PARSER = new ObjectParser<>(HistogramAggregationBuilder.NAME); - ValuesSourceParserHelper.declareNumericFields(PARSER, true, true, false); + ValuesSourceParserHelper.declareAnyFields(PARSER, true, true); PARSER.declareDouble(HistogramAggregationBuilder::interval, Histogram.INTERVAL_FIELD); @@ -95,9 +96,15 @@ public static HistogramAggregationBuilder parse(String aggregationName, XContent private boolean keyed = false; private long minDocCount = 0; + @Override + protected ValuesSourceType resolveScriptAny(Script script) { + // TODO: No idea how we'd support Range scripts here. + return ValuesSourceType.NUMERIC; + } + /** Create a new builder with the given name. */ public HistogramAggregationBuilder(String name) { - super(name, ValuesSourceType.NUMERIC, ValueType.DOUBLE); + super(name, ValuesSourceType.ANY, null); } protected HistogramAggregationBuilder(HistogramAggregationBuilder clone, Builder factoriesBuilder, Map metaData) { @@ -118,7 +125,7 @@ protected AggregationBuilder shallowCopy(Builder factoriesBuilder, Map innerBuild(SearchContext context, ValuesSourceConfig config, + protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { return new HistogramAggregatorFactory(name, config, interval, offset, order, keyed, minDocCount, minBound, maxBound, context, parent, subFactoriesBuilder, metaData); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java index b6828c8e84d1b..6fac7e514be9f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java @@ -22,10 +22,9 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.internal.SearchContext; @@ -34,7 +33,11 @@ import java.util.List; import java.util.Map; -public final class HistogramAggregatorFactory extends ValuesSourceAggregatorFactory { +/** + * Constructs the per-shard aggregator instance for histogram aggregation. Selects the numeric or range field implementation based on the + * field type. + */ +public final class HistogramAggregatorFactory extends ValuesSourceAggregatorFactory { private final double interval, offset; private final BucketOrder order; @@ -42,10 +45,19 @@ public final class HistogramAggregatorFactory extends ValuesSourceAggregatorFact private final long minDocCount; private final double minBound, maxBound; - public HistogramAggregatorFactory(String name, ValuesSourceConfig config, double interval, double offset, - BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound, - SearchContext context, AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { + @Override + protected ValuesSource resolveMissingAny(Object missing) { + if (missing instanceof Number) { + return ValuesSource.Numeric.EMPTY; + } + throw new IllegalArgumentException("Only numeric missing values are supported for histogram aggregation, found [" + + missing + "]"); + } + + public HistogramAggregatorFactory(String name, ValuesSourceConfig config, double interval, double offset, + BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound, + SearchContext context, AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metaData); this.interval = interval; this.offset = offset; @@ -61,24 +73,34 @@ public long minDocCount() { } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket, + protected Aggregator doCreateInternal(ValuesSource valuesSource, Aggregator parent, boolean collectsFromSingleBucket, List pipelineAggregators, Map metaData) throws IOException { if (collectsFromSingleBucket == false) { return asMultiBucketAggregator(this, context, parent); } - return createAggregator(valuesSource, parent, pipelineAggregators, metaData); - } - - private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregator parent, List pipelineAggregators, - Map metaData) throws IOException { - - return new HistogramAggregator(name, factories, interval, offset, order, keyed, minDocCount, minBound, maxBound, valuesSource, - config.format(), context, parent, pipelineAggregators, metaData); + if (valuesSource instanceof ValuesSource.Numeric) { + return new NumericHistogramAggregator(name, factories, interval, offset, order, keyed, minDocCount, minBound, maxBound, + (ValuesSource.Numeric) valuesSource, config.format(), context, parent, pipelineAggregators, metaData); + } else if (valuesSource instanceof ValuesSource.Range) { + ValuesSource.Range rangeValueSource = (ValuesSource.Range) valuesSource; + if (rangeValueSource.rangeType().isNumeric() == false) { + throw new IllegalArgumentException("Expected numeric range type but found non-numeric range [" + + rangeValueSource.rangeType().name + "]"); + } + return new RangeHistogramAggregator(name, factories, interval, offset, order, keyed, minDocCount, minBound, maxBound, + (ValuesSource.Range) valuesSource, config.format(), context, parent, pipelineAggregators, + metaData); + } + else { + throw new IllegalArgumentException("Expected one of [Numeric, Range] values source, found [" + + valuesSource.toString() + "]"); + } } @Override protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { - return createAggregator(null, parent, pipelineAggregators, metaData); + return new NumericHistogramAggregator(name, factories, interval, offset, order, keyed, minDocCount, minBound, maxBound, + null, config.format(), context, parent, pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/NumericHistogramAggregator.java similarity index 92% rename from server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java rename to server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/NumericHistogramAggregator.java index 1295cec2e4b6d..b63cf94a98085 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/NumericHistogramAggregator.java @@ -52,7 +52,7 @@ * written as {@code interval * x + offset} and yet is less than or equal to * {@code value}. */ -class HistogramAggregator extends BucketsAggregator { +class NumericHistogramAggregator extends BucketsAggregator { private final ValuesSource.Numeric valuesSource; private final DocValueFormat formatter; @@ -64,11 +64,11 @@ class HistogramAggregator extends BucketsAggregator { private final LongHash bucketOrds; - HistogramAggregator(String name, AggregatorFactories factories, double interval, double offset, - BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound, - @Nullable ValuesSource.Numeric valuesSource, DocValueFormat formatter, - SearchContext context, Aggregator parent, - List pipelineAggregators, Map metaData) throws IOException { + NumericHistogramAggregator(String name, AggregatorFactories factories, double interval, double offset, + BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound, + @Nullable ValuesSource.Numeric valuesSource, DocValueFormat formatter, + SearchContext context, Aggregator parent, + List pipelineAggregators, Map metaData) throws IOException { super(name, factories, context, parent, pipelineAggregators, metaData); if (interval <= 0) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/RangeHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/RangeHistogramAggregator.java new file mode 100644 index 0000000000000..1a722dc951418 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/RangeHistogramAggregator.java @@ -0,0 +1,175 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.histogram; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.util.LongHash; +import org.elasticsearch.index.fielddata.SortedBinaryDocValues; +import org.elasticsearch.index.mapper.RangeFieldMapper; +import org.elasticsearch.index.mapper.RangeType; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalOrder; +import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; +import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.support.ValuesSource; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class RangeHistogramAggregator extends BucketsAggregator { + private final ValuesSource.Range valuesSource; + private final DocValueFormat formatter; + private final double interval, offset; + private final BucketOrder order; + private final boolean keyed; + private final long minDocCount; + private final double minBound, maxBound; + + private final LongHash bucketOrds; + + RangeHistogramAggregator(String name, AggregatorFactories factories, double interval, double offset, + BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound, + @Nullable ValuesSource.Range valuesSource, DocValueFormat formatter, + SearchContext context, Aggregator parent, + List pipelineAggregators, Map metaData) throws IOException { + + super(name, factories, context, parent, pipelineAggregators, metaData); + if (interval <= 0) { + throw new IllegalArgumentException("interval must be positive, got: " + interval); + } + this.interval = interval; + this.offset = offset; + this.order = InternalOrder.validate(order, this); + this.keyed = keyed; + this.minDocCount = minDocCount; + this.minBound = minBound; + this.maxBound = maxBound; + this.valuesSource = valuesSource; + this.formatter = formatter; + + bucketOrds = new LongHash(1, context.bigArrays()); + } + + @Override + protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { + if (valuesSource == null) { + return LeafBucketCollector.NO_OP_COLLECTOR; + } + final SortedBinaryDocValues values = valuesSource.bytesValues(ctx); + final RangeType rangeType = valuesSource.rangeType(); + return new LeafBucketCollectorBase(sub, values) { + @Override + public void collect(int doc, long bucket) throws IOException { + assert bucket == 0; + if (values.advanceExact(doc)) { + // Is it possible for valuesCount to be > 1 here? Multiple ranges are encoded into the same BytesRef in the binary doc + // values, so it isn't clear what we'd be iterating over. + final int valuesCount = values.docValueCount(); + assert valuesCount == 1 : "Value count for ranges should always be 1"; + double previousKey = Double.NEGATIVE_INFINITY; + + for (int i = 0; i < valuesCount; i++) { + BytesRef encodedRanges = values.nextValue(); + List ranges = rangeType.decodeRanges(encodedRanges); + double previousFrom = Double.NEGATIVE_INFINITY; + for (RangeFieldMapper.Range range : ranges) { + final Double from = rangeType.doubleValue(range.getFrom()); + // The encoding should ensure that this assert is always true. + assert from >= previousFrom : "Start of range not >= previous start"; + final Double to = rangeType.doubleValue(range.getTo()); + final double startKey = Math.floor((from - offset) / interval); + final double endKey = Math.floor((to - offset) / interval); + for (double key = startKey > previousKey ? startKey : previousKey; key <= endKey; key++) { + if (key == previousKey) { + continue; + } + // Bucket collection identical to NumericHistogramAggregator, could be refactored + long bucketOrd = bucketOrds.add(Double.doubleToLongBits(key)); + if (bucketOrd < 0) { // already seen + bucketOrd = -1 - bucketOrd; + collectExistingBucket(sub, doc, bucketOrd); + } else { + collectBucket(sub, doc, bucketOrd); + } + } + if (endKey > previousKey) { + previousKey = endKey; + } + } + + } + } + } + }; + } + + // TODO: buildAggregation and buildEmptyAggregation are literally just copied out of NumericHistogramAggregator. We could refactor + // this to an abstract super class, if we wanted to. Might be overkill. + @Override + public InternalAggregation buildAggregation(long bucket) throws IOException { + assert bucket == 0; + consumeBucketsAndMaybeBreak((int) bucketOrds.size()); + List buckets = new ArrayList<>((int) bucketOrds.size()); + for (long i = 0; i < bucketOrds.size(); i++) { + double roundKey = Double.longBitsToDouble(bucketOrds.get(i)); + double key = roundKey * interval + offset; + buckets.add(new InternalHistogram.Bucket(key, bucketDocCount(i), keyed, formatter, bucketAggregations(i))); + } + + // the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order + CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator(this)); + + InternalHistogram.EmptyBucketInfo emptyBucketInfo = null; + if (minDocCount == 0) { + emptyBucketInfo = new InternalHistogram.EmptyBucketInfo(interval, offset, minBound, maxBound, buildEmptySubAggregations()); + } + return new InternalHistogram(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, pipelineAggregators(), + metaData()); + } + + @Override + public InternalAggregation buildEmptyAggregation() { + InternalHistogram.EmptyBucketInfo emptyBucketInfo = null; + if (minDocCount == 0) { + emptyBucketInfo = new InternalHistogram.EmptyBucketInfo(interval, offset, minBound, maxBound, buildEmptySubAggregations()); + } + return new InternalHistogram(name, Collections.emptyList(), order, minDocCount, emptyBucketInfo, formatter, keyed, + pipelineAggregators(), metaData()); + } + + @Override + public void doClose() { + Releasables.close(bucketOrds); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java index de729b619dcd4..c0fd5f26eb559 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.missing; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; @@ -78,7 +79,7 @@ protected void innerWriteTo(StreamOutput out) { } @Override - protected boolean serializeTargetValueType() { + protected boolean serializeTargetValueType(Version version) { return true; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java index 75b32f8abe062..dab9cf34dbb7c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.search.aggregations.bucket.significant; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -155,7 +156,7 @@ protected void innerWriteTo(StreamOutput out) throws IOException { } @Override - protected boolean serializeTargetValueType() { + protected boolean serializeTargetValueType(Version version) { return true; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java index 285869dd2e0cf..f22eaf4d28a59 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.search.aggregations.bucket.terms; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -94,7 +95,7 @@ public RareTermsAggregationBuilder(StreamInput in) throws IOException { } @Override - protected boolean serializeTargetValueType() { + protected boolean serializeTargetValueType(Version version) { return true; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java index a124feb115b19..7d5bda9ef1b81 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.search.aggregations.bucket.terms; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -140,7 +141,7 @@ public TermsAggregationBuilder(StreamInput in) throws IOException { } @Override - protected boolean serializeTargetValueType() { + protected boolean serializeTargetValueType(Version version) { return true; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java index 8d927e2fa59eb..0cc2b7d09c0c2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -96,7 +97,7 @@ protected void innerWriteTo(StreamOutput out) throws IOException { } @Override - protected boolean serializeTargetValueType() { + protected boolean serializeTargetValueType(Version version) { return true; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java index ccf8ef8ba3dca..845fab414a3ac 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; @@ -78,7 +79,7 @@ protected void innerWriteTo(StreamOutput out) { } @Override - protected boolean serializeTargetValueType() { + protected boolean serializeTargetValueType(Version version) { return true; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/MissingValues.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/MissingValues.java index d7b56af2439e0..c61091fd2a12c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/MissingValues.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/MissingValues.java @@ -49,6 +49,11 @@ public SortedBinaryDocValues bytesValues(LeafReaderContext context) throws IOExc SortedBinaryDocValues values = valuesSource.bytesValues(context); return replaceMissing(values, missing); } + + @Override + public String toString() { + return "anon ValuesSource.Bytes of [" + super.toString() + "]"; + } }; } @@ -82,6 +87,10 @@ public BytesRef nextValue() throws IOException { return missing; } } + @Override + public String toString() { + return "anon SortedBinaryDocValues of [" + super.toString() + "]"; + } }; } @@ -111,6 +120,10 @@ public SortedNumericDoubleValues doubleValues(LeafReaderContext context) throws final SortedNumericDoubleValues values = valuesSource.doubleValues(context); return replaceMissing(values, missing.doubleValue()); } + @Override + public String toString() { + return "anon ValuesSource.Numeric of [" + super.toString() + "]"; + } }; } @@ -145,6 +158,11 @@ public boolean advanceExact(int doc) throws IOException { return true; } + @Override + public String toString() { + return "anon SortedNumericDocValues of [" + super.toString() + "]"; + } + }; } @@ -179,6 +197,11 @@ public int docValueCount() { return count == 0 ? 1 : count; } + @Override + public String toString() { + return "anon SortedNumericDoubleValues of [" + super.toString() + "]"; + } + }; } @@ -209,6 +232,12 @@ public LongUnaryOperator globalOrdinalsMapping(LeafReaderContext context) throws valuesSource.globalOrdinalsValues(context), valuesSource.globalOrdinalsMapping(context), missing); } + + @Override + public String toString() { + return "anon ValuesSource.Bytes.WithOrdinals of [" + super.toString() + "]"; + } + }; } @@ -263,6 +292,12 @@ public boolean advanceExact(int doc) throws IOException { // the document does not have a value return true; } + + @Override + public String toString() { + return "anon AbstractSortedDocValues of [" + super.toString() + "]"; + } + }; } @@ -316,6 +351,11 @@ public boolean advanceExact(int doc) throws IOException { // the document does not have a value return true; } + + @Override + public String toString() { + return "anon AbstractSortedDocValues of [" + super.toString() + "]"; + } }; } @@ -369,6 +409,11 @@ public MultiGeoPointValues geoPointValues(LeafReaderContext context) { final MultiGeoPointValues values = valuesSource.geoPointValues(context); return replaceMissing(values, missing); } + + @Override + public String toString() { + return "anon ValuesSource.GeoPoint of [" + super.toString() + "]"; + } }; } @@ -402,6 +447,11 @@ public GeoPoint nextValue() throws IOException { return missing; } } + + @Override + public String toString() { + return "anon MultiGeoPointValues of [" + super.toString() + "]"; + } }; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java index fc23f72eddc9c..d130b385be89e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java @@ -26,6 +26,7 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData; +import org.elasticsearch.index.fielddata.plain.BinaryDVIndexFieldData; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.search.DocValueFormat; @@ -48,7 +49,8 @@ public enum ValueType implements Writeable { // TODO: what is the difference between "number" and "numeric"? NUMERIC((byte) 7, "numeric", "numeric", ValuesSourceType.NUMERIC, IndexNumericFieldData.class, DocValueFormat.RAW), GEOPOINT((byte) 8, "geo_point", "geo_point", ValuesSourceType.GEOPOINT, IndexGeoPointFieldData.class, DocValueFormat.GEOHASH), - BOOLEAN((byte) 9, "boolean", "boolean", ValuesSourceType.NUMERIC, IndexNumericFieldData.class, DocValueFormat.BOOLEAN); + BOOLEAN((byte) 9, "boolean", "boolean", ValuesSourceType.NUMERIC, IndexNumericFieldData.class, DocValueFormat.BOOLEAN), + RANGE((byte) 10, "range", "range", ValuesSourceType.RANGE, BinaryDVIndexFieldData.class, DocValueFormat.RAW); final String description; final ValuesSourceType valuesSourceType; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java index 7fd38288a821b..19a607a0f177c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java @@ -42,6 +42,7 @@ import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.fielddata.SortingBinaryDocValues; import org.elasticsearch.index.fielddata.SortingNumericDoubleValues; +import org.elasticsearch.index.mapper.RangeType; import org.elasticsearch.script.AggregationScript; import org.elasticsearch.search.aggregations.support.ValuesSource.WithScript.BytesValues; import org.elasticsearch.search.aggregations.support.values.ScriptBytesValues; @@ -65,6 +66,28 @@ public boolean needsScores() { return false; } + public static class Range extends ValuesSource { + private final RangeType rangeType; + protected final IndexFieldData indexFieldData; + + public Range(IndexFieldData indexFieldData, RangeType rangeType) { + this.indexFieldData = indexFieldData; + this.rangeType = rangeType; + } + + @Override + public SortedBinaryDocValues bytesValues(LeafReaderContext context) { + return indexFieldData.load(context).getBytesValues(); + } + + @Override + public DocValueBits docsWithValue(LeafReaderContext context) throws IOException { + final SortedBinaryDocValues bytes = bytesValues(context); + return org.elasticsearch.index.fielddata.FieldData.docsWithValue(bytes); + } + + public RangeType rangeType() { return rangeType; } + } public abstract static class Bytes extends ValuesSource { @Override @@ -193,6 +216,7 @@ public FieldData(IndexFieldData indexFieldData) { public SortedBinaryDocValues bytesValues(LeafReaderContext context) { return indexFieldData.load(context).getBytesValues(); } + } public static class Script extends Bytes { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java index 11969b3f7dbeb..d1d72313688e4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.search.aggregations.support; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -61,7 +62,7 @@ protected LeafOnly(StreamInput in, ValuesSourceType valuesSourceType, ValueType /** * Read an aggregation from a stream that serializes its targetValueType. This should only be used by subclasses that override - * {@link #serializeTargetValueType()} to return true. + * {@link #serializeTargetValueType(Version)} to return true. */ protected LeafOnly(StreamInput in, ValuesSourceType valuesSourceType) throws IOException { super(in, valuesSourceType); @@ -108,24 +109,31 @@ protected ValuesSourceAggregationBuilder(ValuesSourceAggregationBuilder } /** - * Read an aggregation from a stream that does not serialize its targetValueType. This should be used by most subclasses. + * Read an aggregation from a stream that has a sensible default for TargetValueType. This should be used by most subclasses. + * Subclasses needing to maintain backward compatibility to a version that did not serialize TargetValueType should use this + * constructor, providing the old, constant value for TargetValueType and override {@link #serializeTargetValueType(Version)} to return + * true only for versions that support the serialization. */ protected ValuesSourceAggregationBuilder(StreamInput in, ValuesSourceType valuesSourceType, ValueType targetValueType) throws IOException { super(in); - assert false == serializeTargetValueType() : "Wrong read constructor called for subclass that provides its targetValueType"; this.valuesSourceType = valuesSourceType; - this.targetValueType = targetValueType; + if (serializeTargetValueType(in.getVersion())) { + this.targetValueType = in.readOptionalWriteable(ValueType::readFromStream); + } else { + this.targetValueType = targetValueType; + } read(in); } /** * Read an aggregation from a stream that serializes its targetValueType. This should only be used by subclasses that override - * {@link #serializeTargetValueType()} to return true. + * {@link #serializeTargetValueType(Version)} to return true. */ protected ValuesSourceAggregationBuilder(StreamInput in, ValuesSourceType valuesSourceType) throws IOException { super(in); - assert serializeTargetValueType() : "Wrong read constructor called for subclass that serializes its targetValueType"; + // TODO: Can we get rid of this constructor and always use the three value version? Does this assert provide any value? + assert serializeTargetValueType(in.getVersion()) : "Wrong read constructor called for subclass that serializes its targetValueType"; this.valuesSourceType = valuesSourceType; this.targetValueType = in.readOptionalWriteable(ValueType::readFromStream); read(in); @@ -149,7 +157,7 @@ private void read(StreamInput in) throws IOException { @Override protected final void doWriteTo(StreamOutput out) throws IOException { - if (serializeTargetValueType()) { + if (serializeTargetValueType(out.getVersion())) { out.writeOptionalWriteable(targetValueType); } out.writeOptionalString(field); @@ -177,8 +185,9 @@ protected final void doWriteTo(StreamOutput out) throws IOException { /** * Should this builder serialize its targetValueType? Defaults to false. All subclasses that override this to true should use the three * argument read constructor rather than the four argument version. + * @param version For backwards compatibility, subclasses can change behavior based on the version */ - protected boolean serializeTargetValueType() { + protected boolean serializeTargetValueType(Version version) { return false; } @@ -306,10 +315,21 @@ protected final ValuesSourceAggregatorFactory doBuild(SearchContext context, return factory; } + /** + * Provide a hook for aggregations to have finer grained control of the ValuesSourceType for script values. This will only be called if + * the user did not supply a type hint for the script. The script object is provided for reference. + * + * @param script - The user supplied script + * @return The ValuesSourceType we expect this script to yield. + */ + protected ValuesSourceType resolveScriptAny(Script script) { + return ValuesSourceType.BYTES; + } + protected ValuesSourceConfig resolveConfig(SearchContext context) { ValueType valueType = this.valueType != null ? this.valueType : targetValueType; return ValuesSourceConfig.resolve(context.getQueryShardContext(), - valueType, field, script, missing, timeZone, format); + valueType, field, script, missing, timeZone, format, this::resolveScriptAny); } protected abstract ValuesSourceAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java index 919d1b752e22c..a9e59519dcf6c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java @@ -28,6 +28,7 @@ import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.RangeFieldMapper; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.script.AggregationScript; import org.elasticsearch.script.Script; @@ -48,12 +49,25 @@ public class ValuesSourceConfig { * Resolve a {@link ValuesSourceConfig} given configuration parameters. */ public static ValuesSourceConfig resolve( - QueryShardContext context, - ValueType valueType, - String field, Script script, - Object missing, - ZoneId timeZone, - String format) { + QueryShardContext context, + ValueType valueType, + String field, Script script, + Object missing, + ZoneId timeZone, + String format) { + return resolve(context, valueType, field, script, missing, timeZone, format, s -> ValuesSourceType.BYTES); + } + + /** + * Resolve a {@link ValuesSourceConfig} given configuration parameters. + */ + public static ValuesSourceConfig resolve( + QueryShardContext context, + ValueType valueType, + String field, Script script, + Object missing, + ZoneId timeZone, + String format, Function resolveScriptAny) { if (field == null) { if (script == null) { @@ -67,7 +81,7 @@ public static ValuesSourceConfig resolve( // we need to have a specific value source // type to know how to handle the script values, so we fallback // on Bytes - valuesSourceType = ValuesSourceType.BYTES; + valuesSourceType = resolveScriptAny.apply(script); } ValuesSourceConfig config = new ValuesSourceConfig<>(valuesSourceType); config.missing(missing); @@ -101,9 +115,12 @@ public static ValuesSourceConfig resolve( config = new ValuesSourceConfig<>(ValuesSourceType.NUMERIC); } else if (indexFieldData instanceof IndexGeoPointFieldData) { config = new ValuesSourceConfig<>(ValuesSourceType.GEOPOINT); + } else if (fieldType instanceof RangeFieldMapper.RangeFieldType) { + config = new ValuesSourceConfig<>(ValuesSourceType.RANGE); } else { config = new ValuesSourceConfig<>(ValuesSourceType.BYTES); } + } else { config = new ValuesSourceConfig<>(valueType.getValuesSourceType()); } @@ -303,6 +320,9 @@ private VS originalValuesSource() { if (valueSourceType() == ValuesSourceType.GEOPOINT) { return (VS) geoPointField(); } + if (valueSourceType() == ValuesSourceType.RANGE) { + return (VS) rangeField(); + } // falling back to bytes values return (VS) bytesField(); } @@ -352,4 +372,14 @@ private ValuesSource.GeoPoint geoPointField() { return new ValuesSource.GeoPoint.Fielddata((IndexGeoPointFieldData) fieldContext().indexFieldData()); } + + private ValuesSource rangeField() { + MappedFieldType fieldType = fieldContext.fieldType(); + + if (fieldType instanceof RangeFieldMapper.RangeFieldType == false) { + throw new IllegalStateException("Asked for range ValuesSource, but field is of type " + fieldType.name()); + } + RangeFieldMapper.RangeFieldType rangeFieldType = (RangeFieldMapper.RangeFieldType)fieldType; + return new ValuesSource.Range(fieldContext().indexFieldData(), rangeFieldType.rangeType()); + } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceType.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceType.java index a4da3e3e3c320..93398abe99e9a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceType.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceType.java @@ -30,7 +30,8 @@ public enum ValuesSourceType implements Writeable { ANY, NUMERIC, BYTES, - GEOPOINT; + GEOPOINT, + RANGE; public static ValuesSourceType fromString(String name) { return valueOf(name.trim().toUpperCase(Locale.ROOT)); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/NumericHistogramAggregatorTests.java similarity index 89% rename from server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/NumericHistogramAggregatorTests.java index 624870f6e47a6..e3d1b931c71d5 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/NumericHistogramAggregatorTests.java @@ -34,10 +34,9 @@ import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; - import static org.hamcrest.Matchers.containsString; -public class HistogramAggregatorTests extends AggregatorTestCase { +public class NumericHistogramAggregatorTests extends AggregatorTestCase { public void testLongs() throws Exception { try (Directory dir = newDirectory(); @@ -300,6 +299,44 @@ public void testOffset() throws Exception { } } + public void testRandomOffset() throws Exception { + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + // Note, these values are carefully chosen to ensure that no matter what offset we pick, no two can end up in the same bucket + for (double value : new double[] {9.3, 3.2, -5}) { + Document doc = new Document(); + doc.add(new SortedNumericDocValuesField("field", NumericUtils.doubleToSortableLong(value))); + w.addDocument(doc); + } + + final double offset = randomDouble(); + final double interval = 5; + final double expectedOffset = offset % interval; + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") + .field("field") + .interval(interval) + .offset(offset); + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE); + fieldType.setName("field"); + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + assertEquals(3, histogram.getBuckets().size()); + + assertEquals(-10 + expectedOffset, histogram.getBuckets().get(0).getKey()); + assertEquals(1, histogram.getBuckets().get(0).getDocCount()); + + assertEquals(expectedOffset, histogram.getBuckets().get(1).getKey()); + assertEquals(1, histogram.getBuckets().get(1).getDocCount()); + + assertEquals(5 + expectedOffset, histogram.getBuckets().get(2).getKey()); + assertEquals(1, histogram.getBuckets().get(2).getDocCount()); + + assertTrue(AggregationInspectionHelper.hasValue(histogram)); + } + } + } + public void testExtendedBounds() throws Exception { try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/RangeHistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/RangeHistogramAggregatorTests.java new file mode 100644 index 0000000000000..73dd41e640dda --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/RangeHistogramAggregatorTests.java @@ -0,0 +1,445 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.histogram; + +import org.apache.lucene.document.BinaryDocValuesField; +import org.apache.lucene.document.Document; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.RangeFieldMapper; +import org.elasticsearch.index.mapper.RangeType; +import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.junit.Rule; +import org.junit.rules.ExpectedException; + +import java.util.Collections; +import java.util.Set; + +public class RangeHistogramAggregatorTests extends AggregatorTestCase { + + @Rule + public final ExpectedException expectedException = ExpectedException.none(); + + public void testDoubles() throws Exception { + RangeType rangeType = RangeType.DOUBLE; + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] { + new RangeFieldMapper.Range(rangeType, 1.0D, 5.0D, true, true), // bucket 0 5 + new RangeFieldMapper.Range(rangeType, -3.1, 4.2, true, true), // bucket -5, 0 + new RangeFieldMapper.Range(rangeType, 4.2, 13.3, true, true), // bucket 0, 5, 10 + new RangeFieldMapper.Range(rangeType, 42.5, 49.3, true, true), // bucket 40, 45 + }) { + Document doc = new Document(); + BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range)); + doc.add(new BinaryDocValuesField("field", encodedRange)); + w.addDocument(doc); + } + + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") + .field("field") + .interval(5); + MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType(); + fieldType.setName("field"); + + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + assertEquals(6, histogram.getBuckets().size()); + + assertEquals(-5d, histogram.getBuckets().get(0).getKey()); + assertEquals(1, histogram.getBuckets().get(0).getDocCount()); + + assertEquals(0d, histogram.getBuckets().get(1).getKey()); + assertEquals(3, histogram.getBuckets().get(1).getDocCount()); + + assertEquals(5d, histogram.getBuckets().get(2).getKey()); + assertEquals(2, histogram.getBuckets().get(2).getDocCount()); + + assertEquals(10d, histogram.getBuckets().get(3).getKey()); + assertEquals(1, histogram.getBuckets().get(3).getDocCount()); + + assertEquals(40d, histogram.getBuckets().get(4).getKey()); + assertEquals(1, histogram.getBuckets().get(4).getDocCount()); + + assertEquals(45d, histogram.getBuckets().get(5).getKey()); + assertEquals(1, histogram.getBuckets().get(5).getDocCount()); + } + } + } + + public void testLongs() throws Exception { + RangeType rangeType = RangeType.LONG; + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] { + new RangeFieldMapper.Range(rangeType, 1L, 5L, true, true), // bucket 0 5 + new RangeFieldMapper.Range(rangeType, -3L, 4L, true, true), // bucket -5, 0 + new RangeFieldMapper.Range(rangeType, 4L, 13L, true, true), // bucket 0, 5, 10 + new RangeFieldMapper.Range(rangeType, 42L, 49L, true, true), // bucket 40, 45 + }) { + Document doc = new Document(); + BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range)); + doc.add(new BinaryDocValuesField("field", encodedRange)); + w.addDocument(doc); + } + + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") + .field("field") + .interval(5); + MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType(); + fieldType.setName("field"); + + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + assertEquals(6, histogram.getBuckets().size()); + + assertEquals(-5d, histogram.getBuckets().get(0).getKey()); + assertEquals(1, histogram.getBuckets().get(0).getDocCount()); + + assertEquals(0d, histogram.getBuckets().get(1).getKey()); + assertEquals(3, histogram.getBuckets().get(1).getDocCount()); + + assertEquals(5d, histogram.getBuckets().get(2).getKey()); + assertEquals(2, histogram.getBuckets().get(2).getDocCount()); + + assertEquals(10d, histogram.getBuckets().get(3).getKey()); + assertEquals(1, histogram.getBuckets().get(3).getDocCount()); + + assertEquals(40d, histogram.getBuckets().get(4).getKey()); + assertEquals(1, histogram.getBuckets().get(4).getDocCount()); + + assertEquals(45d, histogram.getBuckets().get(5).getKey()); + assertEquals(1, histogram.getBuckets().get(5).getDocCount()); + } + } + } + + public void testMultipleRanges() throws Exception { + RangeType rangeType = RangeType.LONG; + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + Document doc = new Document(); + BytesRef encodedRange = rangeType.encodeRanges(Set.of( + new RangeFieldMapper.Range(rangeType, 1L, 5L, true, true), // bucket 0 5 + new RangeFieldMapper.Range(rangeType, -3L, 4L, true, true), // bucket -5, 0 + new RangeFieldMapper.Range(rangeType, 4L, 13L, true, true), // bucket 0, 5, 10 + new RangeFieldMapper.Range(rangeType, 42L, 49L, true, true) // bucket 40, 45 + )); + doc.add(new BinaryDocValuesField("field", encodedRange)); + w.addDocument(doc); + + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") + .field("field") + .interval(5); + MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType(); + fieldType.setName("field"); + + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + assertEquals(6, histogram.getBuckets().size()); + + assertEquals(-5d, histogram.getBuckets().get(0).getKey()); + assertEquals(1, histogram.getBuckets().get(0).getDocCount()); + + assertEquals(0d, histogram.getBuckets().get(1).getKey()); + assertEquals(1, histogram.getBuckets().get(1).getDocCount()); + + assertEquals(5d, histogram.getBuckets().get(2).getKey()); + assertEquals(1, histogram.getBuckets().get(2).getDocCount()); + + assertEquals(10d, histogram.getBuckets().get(3).getKey()); + assertEquals(1, histogram.getBuckets().get(3).getDocCount()); + + assertEquals(40d, histogram.getBuckets().get(4).getKey()); + assertEquals(1, histogram.getBuckets().get(4).getDocCount()); + + assertEquals(45d, histogram.getBuckets().get(5).getKey()); + assertEquals(1, histogram.getBuckets().get(5).getDocCount()); + } + } + + } + + public void testMultipleRangesLotsOfOverlap() throws Exception { + RangeType rangeType = RangeType.LONG; + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + Document doc = new Document(); + BytesRef encodedRange = rangeType.encodeRanges(Set.of( + new RangeFieldMapper.Range(rangeType, 1L, 2L, true, true), // bucket 0 + new RangeFieldMapper.Range(rangeType, 1L, 4L, true, true), // bucket 0 + new RangeFieldMapper.Range(rangeType, 1L, 13L, true, true), // bucket 0, 5, 10 + new RangeFieldMapper.Range(rangeType, 1L, 5L, true, true) // bucket 0, 5 + )); + doc.add(new BinaryDocValuesField("field", encodedRange)); + w.addDocument(doc); + + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") + .field("field") + .interval(5); + MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType(); + fieldType.setName("field"); + + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + assertEquals(3, histogram.getBuckets().size()); + + assertEquals(0d, histogram.getBuckets().get(0).getKey()); + assertEquals(1, histogram.getBuckets().get(0).getDocCount()); + + assertEquals(5d, histogram.getBuckets().get(1).getKey()); + assertEquals(1, histogram.getBuckets().get(1).getDocCount()); + + assertEquals(10d, histogram.getBuckets().get(2).getKey()); + assertEquals(1, histogram.getBuckets().get(2).getDocCount()); + } + } + + } + + public void testLongsIrrationalInterval() throws Exception { + RangeType rangeType = RangeType.LONG; + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] { + new RangeFieldMapper.Range(rangeType, 1L, 5L, true, true), // bucket 0 5 + new RangeFieldMapper.Range(rangeType, -3L, 4L, true, true), // bucket -5, 0 + new RangeFieldMapper.Range(rangeType, 4L, 13L, true, true), // bucket 0, 5, 10 + }) { + Document doc = new Document(); + BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range)); + doc.add(new BinaryDocValuesField("field", encodedRange)); + w.addDocument(doc); + } + + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") + .field("field") + .interval(Math.PI); + MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType(); + fieldType.setName("field"); + + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + assertEquals(6, histogram.getBuckets().size()); + + assertEquals(-1 * Math.PI, histogram.getBuckets().get(0).getKey()); + assertEquals(1, histogram.getBuckets().get(0).getDocCount()); + + assertEquals(0 * Math.PI, histogram.getBuckets().get(1).getKey()); + assertEquals(2, histogram.getBuckets().get(1).getDocCount()); + + assertEquals(1 * Math.PI, histogram.getBuckets().get(2).getKey()); + assertEquals(3, histogram.getBuckets().get(2).getDocCount()); + + assertEquals(2 * Math.PI, histogram.getBuckets().get(3).getKey()); + assertEquals(1, histogram.getBuckets().get(3).getDocCount()); + + assertEquals(3 * Math.PI, histogram.getBuckets().get(4).getKey()); + assertEquals(1, histogram.getBuckets().get(4).getDocCount()); + + assertEquals(4 * Math.PI, histogram.getBuckets().get(5).getKey()); + assertEquals(1, histogram.getBuckets().get(5).getDocCount()); + } + } + } + + public void testMinDocCount() throws Exception { + RangeType rangeType = RangeType.LONG; + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] { + new RangeFieldMapper.Range(rangeType, -14L, -11L, true, true), // bucket -15 + new RangeFieldMapper.Range(rangeType, 0L, 9L, true, true), // bucket 0, 5 + new RangeFieldMapper.Range(rangeType, 6L, 12L, true, true), // bucket 5, 10 + new RangeFieldMapper.Range(rangeType, 13L, 14L, true, true), // bucket 10 + }) { + Document doc = new Document(); + BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range)); + doc.add(new BinaryDocValuesField("field", encodedRange)); + w.addDocument(doc); + } + + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") + .field("field") + .interval(5) + .minDocCount(2); + MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType(); + fieldType.setName("field"); + + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + InternalHistogram histogram = searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + assertEquals(2, histogram.getBuckets().size()); + + assertEquals(5d, histogram.getBuckets().get(0).getKey()); + assertEquals(2, histogram.getBuckets().get(0).getDocCount()); + + assertEquals(10d, histogram.getBuckets().get(1).getKey()); + assertEquals(2, histogram.getBuckets().get(1).getDocCount()); + } + } + } + + public void testOffset() throws Exception { + RangeType rangeType = RangeType.DOUBLE; + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] { + new RangeFieldMapper.Range(rangeType, 1.0D, 5.0D, true, true), // bucket -1, 4 + new RangeFieldMapper.Range(rangeType, -3.1, 4.2, true, true), // bucket -6 -1 4 + new RangeFieldMapper.Range(rangeType, 4.2, 13.3, true, true), // bucket 4, 9 + new RangeFieldMapper.Range(rangeType, 42.5, 49.3, true, true), // bucket 39, 44, 49 + }) { + Document doc = new Document(); + BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range)); + doc.add(new BinaryDocValuesField("field", encodedRange)); + w.addDocument(doc); + } + + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") + .field("field") + .interval(5) + .offset(4); + MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType(); + fieldType.setName("field"); + + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + //assertEquals(7, histogram.getBuckets().size()); + + assertEquals(-6d, histogram.getBuckets().get(0).getKey()); + assertEquals(1, histogram.getBuckets().get(0).getDocCount()); + + assertEquals(-1d, histogram.getBuckets().get(1).getKey()); + assertEquals(2, histogram.getBuckets().get(1).getDocCount()); + + assertEquals(4d, histogram.getBuckets().get(2).getKey()); + assertEquals(3, histogram.getBuckets().get(2).getDocCount()); + + assertEquals(9d, histogram.getBuckets().get(3).getKey()); + assertEquals(1, histogram.getBuckets().get(3).getDocCount()); + + assertEquals(39d, histogram.getBuckets().get(4).getKey()); + assertEquals(1, histogram.getBuckets().get(4).getDocCount()); + + assertEquals(44d, histogram.getBuckets().get(5).getKey()); + assertEquals(1, histogram.getBuckets().get(5).getDocCount()); + + assertEquals(49d, histogram.getBuckets().get(6).getKey()); + assertEquals(1, histogram.getBuckets().get(6).getDocCount()); + } + } + } + + public void testOffsetGtInterval() throws Exception { + RangeType rangeType = RangeType.DOUBLE; + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] { + new RangeFieldMapper.Range(rangeType, 1.0D, 5.0D, true, true), // bucket 0 5 + new RangeFieldMapper.Range(rangeType, -3.1, 4.2, true, true), // bucket -5, 0 + new RangeFieldMapper.Range(rangeType, 4.2, 13.3, true, true), // bucket 0, 5, 10 + new RangeFieldMapper.Range(rangeType, 42.5, 49.3, true, true), // bucket 40, 45 + }) { + Document doc = new Document(); + BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range)); + doc.add(new BinaryDocValuesField("field", encodedRange)); + w.addDocument(doc); + } + + // I'd like to randomize the offset here, like I did in the test for the numeric side, but there's no way I can think of to + // construct the intervals such that they wouldn't "slosh" between buckets. + final double offset = 20; + final double interval = 5; + final double expectedOffset = offset % interval; + + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") + .field("field") + .interval(interval) + .offset(offset); + MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType(); + fieldType.setName("field"); + + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + assertEquals(6, histogram.getBuckets().size()); + + assertEquals(-5d + expectedOffset, histogram.getBuckets().get(0).getKey()); + assertEquals(1, histogram.getBuckets().get(0).getDocCount()); + + assertEquals(0d + expectedOffset, histogram.getBuckets().get(1).getKey()); + assertEquals(3, histogram.getBuckets().get(1).getDocCount()); + + assertEquals(5d + expectedOffset, histogram.getBuckets().get(2).getKey()); + assertEquals(2, histogram.getBuckets().get(2).getDocCount()); + + assertEquals(10d + expectedOffset, histogram.getBuckets().get(3).getKey()); + assertEquals(1, histogram.getBuckets().get(3).getDocCount()); + + assertEquals(40d + expectedOffset, histogram.getBuckets().get(4).getKey()); + assertEquals(1, histogram.getBuckets().get(4).getDocCount()); + + assertEquals(45d + expectedOffset, histogram.getBuckets().get(5).getKey()); + assertEquals(1, histogram.getBuckets().get(5).getDocCount()); + } + } + } + + + public void testIpRangesUnsupported() throws Exception { + RangeType rangeType = RangeType.IP; + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + Document doc = new Document(); + BytesRef encodedRange = + rangeType.encodeRanges(Collections.singleton(new RangeFieldMapper.Range(rangeType, InetAddresses.forString("10.0.0.1"), + InetAddresses.forString("10.0.0.10"), true, true))); + doc.add(new BinaryDocValuesField("field", encodedRange)); + w.addDocument(doc); + + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") + .field("field") + .interval(5); + MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType(); + fieldType.setName("field"); + + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + expectedException.expect(IllegalArgumentException.class); + search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + } + } + + } + +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregatorTests.java index fd3b930e407d5..e8748417480ea 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregatorTests.java @@ -41,6 +41,8 @@ import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.RangeFieldMapper; +import org.elasticsearch.index.mapper.RangeType; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.search.aggregations.support.ValueType; @@ -215,6 +217,8 @@ private static MappedFieldType createMappedFieldType(ValueType valueType) { return new IpFieldMapper.Builder("_name").fieldType(); case GEOPOINT: return new GeoPointFieldMapper.Builder("_name").fieldType(); + case RANGE: + return new RangeFieldMapper.Builder("_name", RangeType.DOUBLE).fieldType(); default: throw new IllegalArgumentException("Test does not support value type [" + valueType + "]"); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/support/ValuesSourceTypeTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/support/ValuesSourceTypeTests.java index d2f73aab3aaa3..42c276e0c4efb 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/support/ValuesSourceTypeTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/support/ValuesSourceTypeTests.java @@ -37,6 +37,7 @@ public void testValidOrdinals() { assertThat(ValuesSourceType.NUMERIC.ordinal(), equalTo(1)); assertThat(ValuesSourceType.BYTES.ordinal(), equalTo(2)); assertThat(ValuesSourceType.GEOPOINT.ordinal(), equalTo(3)); + assertThat(ValuesSourceType.RANGE.ordinal(), equalTo(4)); } @Override @@ -45,6 +46,7 @@ public void testFromString() { assertThat(ValuesSourceType.fromString("numeric"), equalTo(ValuesSourceType.NUMERIC)); assertThat(ValuesSourceType.fromString("bytes"), equalTo(ValuesSourceType.BYTES)); assertThat(ValuesSourceType.fromString("geopoint"), equalTo(ValuesSourceType.GEOPOINT)); + assertThat(ValuesSourceType.fromString("range"), equalTo(ValuesSourceType.RANGE)); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> ValuesSourceType.fromString("does_not_exist")); assertThat(e.getMessage(), equalTo("No enum constant org.elasticsearch.search.aggregations.support.ValuesSourceType.DOES_NOT_EXIST")); @@ -57,6 +59,7 @@ public void testReadFrom() throws IOException { assertReadFromStream(1, ValuesSourceType.NUMERIC); assertReadFromStream(2, ValuesSourceType.BYTES); assertReadFromStream(3, ValuesSourceType.GEOPOINT); + assertReadFromStream(4, ValuesSourceType.RANGE); } @Override @@ -65,5 +68,6 @@ public void testWriteTo() throws IOException { assertWriteToStream(ValuesSourceType.NUMERIC, 1); assertWriteToStream(ValuesSourceType.BYTES, 2); assertWriteToStream(ValuesSourceType.GEOPOINT, 3); + assertWriteToStream(ValuesSourceType.RANGE, 4); } } diff --git a/server/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java b/server/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java index 92bf4d6acad2c..197e82ea3a47b 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java +++ b/server/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java @@ -100,7 +100,7 @@ public void testSimpleProfile() { ProfileResult histoAggResult = aggProfileResultsList.get(0); assertThat(histoAggResult, notNullValue()); assertThat(histoAggResult.getQueryName(), - equalTo("HistogramAggregator")); + equalTo("NumericHistogramAggregator")); assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); assertThat(histoAggResult.getProfiledChildren().size(), equalTo(0)); assertThat(histoAggResult.getTime(), greaterThan(0L)); @@ -145,7 +145,7 @@ public void testMultiLevelProfile() { ProfileResult histoAggResult = aggProfileResultsList.get(0); assertThat(histoAggResult, notNullValue()); assertThat(histoAggResult.getQueryName(), - equalTo("HistogramAggregator")); + equalTo("NumericHistogramAggregator")); assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); assertThat(histoAggResult.getTime(), greaterThan(0L)); Map histoBreakdown = histoAggResult.getTimeBreakdown(); @@ -215,7 +215,7 @@ public void testMultiLevelProfileBreadthFirst() { ProfileResult histoAggResult = aggProfileResultsList.get(0); assertThat(histoAggResult, notNullValue()); assertThat(histoAggResult.getQueryName(), - equalTo("HistogramAggregator")); + equalTo("NumericHistogramAggregator")); assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); assertThat(histoAggResult.getTime(), greaterThan(0L)); Map histoBreakdown = histoAggResult.getTimeBreakdown(); @@ -346,7 +346,7 @@ public void testComplexProfile() { ProfileResult histoAggResult = aggProfileResultsList.get(0); assertThat(histoAggResult, notNullValue()); assertThat(histoAggResult.getQueryName(), - equalTo("HistogramAggregator")); + equalTo("NumericHistogramAggregator")); assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); assertThat(histoAggResult.getTime(), greaterThan(0L)); Map histoBreakdown = histoAggResult.getTimeBreakdown();