Skip to content

Commit 835cd88

Browse files
authored
Add runtime_mappings to search request (backport of #64374) (#64889)
* Add `runtime_mappings` to search request (backport of #64374) This adds a way to specify the `runtime_mappings` on a search request which are always "runtime" fields. It looks like: ``` curl -XDELETE -uelastic:password -HContent-Type:application/json localhost:9200/test curl -XPOST -uelastic:password -HContent-Type:application/json 'localhost:9200/test/_bulk?pretty&refresh' -d' {"index": {}} {"animal": "cat", "sound": "meow"} {"index": {}} {"animal": "dog", "sound": "woof"} {"index": {}} {"animal": "snake", "sound": "hisssssssssssssssss"} ' curl -XPOST -uelastic:password -HContent-Type:application/json localhost:9200/test/_search?pretty -d' { "runtime_mappings": { "animal.upper": { "type": "keyword", "script": "for (String s : doc[\"animal.keyword\"]) {emit(s.toUpperCase())}" } }, "query": { "match": { "animal.upper": "DOG" } } }' ``` NOTE: If we have to send a search request with runtime mappings to a node that doesn't support runtime mappings at all then we'll fail the search request entirely. The alternative would be to not send those runtime mappings and let the node fail the search request with an "unknown field" error. I believe this is would be hard to surprising because you defined the field in the search request. NOTE: It isn't obvious but you can also use `runtime_mappings` to override fields inside objects by naming the runtime fields with `.` in them. Like this: ``` curl -XDELETE -uelastic:password -HContent-Type:application/json localhost:9200/test curl -uelastic:password -XPOST -HContent-Type:application/json localhost:9200/test/_bulk?refresh -d' {"index":{}} {"name": {"first": "Andrew", "last": "Wiggin"}} {"index":{}} {"name": {"first": "Julian", "last": "Delphiki", "suffix": "II"}} ' curl -uelastic:password -XPOST -HContent-Type:application/json localhost:9200/test/_search?pretty -d'{ "runtime_mappings": { "name.first": { "type": "keyword", "script": "if (\"Wiggin\".equals(doc[\"name.last.keyword\"].value)) {emit(\"Ender\");} else if (\"Delphiki\".equals(doc[\"name.last.keyword\"].value)) {emit(\"Bean\");}" } }, "query": { "match": { "name.first": "Bean" } } }' ``` Relates to #59332
1 parent bdeb82d commit 835cd88

File tree

43 files changed

+1444
-464
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

43 files changed

+1444
-464
lines changed

modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/DisableGraphQueryTests.java

+2-1
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@
4646
import java.util.Collection;
4747
import java.util.Collections;
4848

49+
import static java.util.Collections.emptyMap;
4950
import static org.hamcrest.Matchers.equalTo;
5051

5152
/**
@@ -84,7 +85,7 @@ public void setup() {
8485
indexService = createIndex("test", settings, "t",
8586
"text_shingle", "type=text,analyzer=text_shingle",
8687
"text_shingle_unigram", "type=text,analyzer=text_shingle_unigram");
87-
shardContext = indexService.newQueryShardContext(0, null, () -> 0L, null);
88+
shardContext = indexService.newQueryShardContext(0, null, () -> 0L, null, emptyMap());
8889

8990
// parsed queries for "text_shingle_unigram:(foo bar baz)" with query parsers
9091
// that ignores position length attribute

modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java

+3-2
Original file line numberDiff line numberDiff line change
@@ -29,9 +29,9 @@
2929
import org.apache.lucene.search.ScoreMode;
3030
import org.apache.lucene.search.Scorer;
3131
import org.apache.lucene.search.Weight;
32-
import org.elasticsearch.Version;
3332
import org.apache.lucene.store.ByteBuffersDirectory;
3433
import org.apache.lucene.store.Directory;
34+
import org.elasticsearch.Version;
3535
import org.elasticsearch.action.ActionRequestValidationException;
3636
import org.elasticsearch.action.ActionResponse;
3737
import org.elasticsearch.action.ActionType;
@@ -89,6 +89,7 @@
8989
import java.util.Objects;
9090

9191
import static java.util.Arrays.asList;
92+
import static java.util.Collections.emptyMap;
9293
import static java.util.Collections.unmodifiableList;
9394
import static org.elasticsearch.action.ValidateActions.addValidationError;
9495
import static org.elasticsearch.rest.RestRequest.Method.GET;
@@ -577,7 +578,7 @@ private static Response prepareRamIndex(Request request,
577578
searcher.setQueryCache(null);
578579
final long absoluteStartMillis = System.currentTimeMillis();
579580
QueryShardContext context =
580-
indexService.newQueryShardContext(0, searcher, () -> absoluteStartMillis, null);
581+
indexService.newQueryShardContext(0, searcher, () -> absoluteStartMillis, null, emptyMap());
581582
return handler.apply(context, indexReader.leaves().get(0));
582583
}
583584
}

modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java

+3-1
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,8 @@
3232
import java.util.List;
3333
import java.util.Map;
3434

35+
import static java.util.Collections.emptyMap;
36+
3537
/**
3638
* Test that needsScores() is reported correctly depending on whether _score is used
3739
*/
@@ -45,7 +47,7 @@ public void testNeedsScores() {
4547
contexts.put(NumberSortScript.CONTEXT, Whitelist.BASE_WHITELISTS);
4648
PainlessScriptEngine service = new PainlessScriptEngine(Settings.EMPTY, contexts);
4749

48-
QueryShardContext shardContext = index.newQueryShardContext(0, null, () -> 0, null);
50+
QueryShardContext shardContext = index.newQueryShardContext(0, null, () -> 0, null, emptyMap());
4951

5052
NumberSortScript.Factory factory = service.compile(null, "1.2", NumberSortScript.CONTEXT, Collections.emptyMap());
5153
NumberSortScript.LeafFactory ss = factory.newFactory(Collections.emptyMap(), shardContext.lookup());

modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java

+2-1
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,7 @@
103103
import java.util.function.Function;
104104
import java.util.stream.Collectors;
105105

106+
import static java.util.Collections.emptyMap;
106107
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
107108
import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
108109
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
@@ -535,7 +536,7 @@ public void testQueryWithRewrite() throws Exception {
535536
QueryShardContext shardContext = indexService.newQueryShardContext(
536537
randomInt(20), null, () -> {
537538
throw new UnsupportedOperationException();
538-
}, null);
539+
}, null, emptyMap());
539540
PlainActionFuture<QueryBuilder> future = new PlainActionFuture<>();
540541
Rewriteable.rewriteAndFetch(queryBuilder, shardContext, future);
541542
assertQueryBuilder(qbSource, future.get());

modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java

+2-1
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@
5151
import java.util.Map;
5252
import java.util.function.Function;
5353

54+
import static java.util.Collections.emptyMap;
5455
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
5556
import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
5657
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
@@ -258,7 +259,7 @@ public void testRangeQueriesWithNow() throws Exception {
258259
try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) {
259260
long[] currentTime = new long[] {System.currentTimeMillis()};
260261
QueryShardContext queryShardContext =
261-
indexService.newQueryShardContext(0, searcher, () -> currentTime[0], null);
262+
indexService.newQueryShardContext(0, searcher, () -> currentTime[0], null, emptyMap());
262263

263264
BytesReference source = BytesReference.bytes(jsonBuilder().startObject()
264265
.field("field1", "value")

modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkByScrollParallelizationHelperTests.java

+1
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@ public void testSliceIntoSubRequests() throws IOException {
3838
() -> null,
3939
() -> null,
4040
() -> emptyList(),
41+
() -> null,
4142
() -> null));
4243
if (searchRequest.source() != null) {
4344
// Clear the slice builder if there is one set. We can't call sliceIntoSubRequests if it is.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
---
2+
"date_nanos requires dates after 1970 and before 2262":
3+
- skip:
4+
version: " - 6.99.99"
5+
reason: "Implemented in 7.0"
6+
7+
- do:
8+
indices.create:
9+
index: date_ns
10+
body:
11+
settings:
12+
number_of_shards: 3
13+
number_of_replicas: 0
14+
mappings:
15+
properties:
16+
date:
17+
type: date_nanos
18+
field:
19+
type: long
20+
21+
- do:
22+
bulk:
23+
refresh: true
24+
body:
25+
- '{ "index" : { "_index" : "date_ns", "_id" : "date_ns_1" } }'
26+
- '{"date" : "1969-10-28T12:12:12.123456789Z" }'
27+
- '{ "index" : { "_index" : "date_ns", "_id" : "date_ns_2" } }'
28+
- '{"date" : "2263-10-29T12:12:12.123456789Z" }'
29+
30+
- match: { errors: true }
31+
- match: { items.0.index.status: 400 }
32+
- match: { items.0.index.error.type: mapper_parsing_exception }
33+
- match: { items.0.index.error.caused_by.reason: "date[1969-10-28T12:12:12.123456789Z] is before the epoch in 1970 and cannot be stored in nanosecond resolution" }
34+
- match: { items.1.index.status: 400 }
35+
- match: { items.1.index.error.type: mapper_parsing_exception }
36+
- match: { items.1.index.error.caused_by.reason: "date[2263-10-29T12:12:12.123456789Z] is after 2262-04-11T23:47:16.854775807 and cannot be stored in nanosecond resolution" }

rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml

-22
Original file line numberDiff line numberDiff line change
@@ -73,28 +73,6 @@ setup:
7373
- match: { hits.hits.1._id: "second" }
7474
- match: { hits.hits.1.sort: [1540815132987654321] }
7575

76-
77-
---
78-
"date_nanos requires dates after 1970 and before 2262":
79-
80-
- do:
81-
bulk:
82-
refresh: true
83-
body:
84-
- '{ "index" : { "_index" : "date_ns", "_id" : "date_ns_1" } }'
85-
- '{"date" : "1969-10-28T12:12:12.123456789Z" }'
86-
- '{ "index" : { "_index" : "date_ns", "_id" : "date_ns_2" } }'
87-
- '{"date" : "2263-10-29T12:12:12.123456789Z" }'
88-
89-
- match: { errors: true }
90-
- match: { items.0.index.status: 400 }
91-
- match: { items.0.index.error.type: mapper_parsing_exception }
92-
- match: { items.0.index.error.caused_by.reason: "date[1969-10-28T12:12:12.123456789Z] is before the epoch in 1970 and cannot be stored in nanosecond resolution" }
93-
- match: { items.1.index.status: 400 }
94-
- match: { items.1.index.error.type: mapper_parsing_exception }
95-
- match: { items.1.index.error.caused_by.reason: "date[2263-10-29T12:12:12.123456789Z] is after 2262-04-11T23:47:16.854775807 and cannot be stored in nanosecond resolution" }
96-
97-
9876
---
9977
"doc value fields are working as expected across date and date_nanos fields":
10078

server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java

+2-1
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@
5555
import java.util.function.Function;
5656
import java.util.stream.Collectors;
5757

58+
import static java.util.Collections.emptyMap;
5859
import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findConflictingV1Templates;
5960
import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findConflictingV2Templates;
6061
import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findV2Template;
@@ -181,7 +182,7 @@ public static Template resolveTemplate(final String matchingTemplate, final Stri
181182
resolvedAliases, tempClusterState.metadata(), aliasValidator, xContentRegistry,
182183
// the context is only used for validation so it's fine to pass fake values for the
183184
// shard id and the current timestamp
184-
tempIndexService.newQueryShardContext(0, null, () -> 0L, null)));
185+
tempIndexService.newQueryShardContext(0, null, () -> 0L, null, emptyMap())));
185186
Map<String, AliasMetadata> aliasesByName = aliases.stream().collect(
186187
Collectors.toMap(AliasMetadata::getAlias, Function.identity()));
187188

server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java

+2-1
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,8 @@ public void run() {
8989
CollapseBuilder innerCollapseBuilder = innerHitBuilder.getInnerCollapseBuilder();
9090
SearchSourceBuilder sourceBuilder = buildExpandSearchSourceBuilder(innerHitBuilder, innerCollapseBuilder)
9191
.query(groupQuery)
92-
.postFilter(searchRequest.source().postFilter());
92+
.postFilter(searchRequest.source().postFilter())
93+
.runtimeMappings(searchRequest.source().runtimeMappings());
9394
SearchRequest groupRequest = new SearchRequest(searchRequest);
9495
groupRequest.source(sourceBuilder);
9596
multiRequest.add(groupRequest);

server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java

+5-3
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
package org.elasticsearch.cluster.metadata;
2121

2222
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
23+
2324
import org.apache.logging.log4j.Level;
2425
import org.apache.logging.log4j.LogManager;
2526
import org.apache.logging.log4j.Logger;
@@ -100,6 +101,7 @@
100101
import java.util.stream.Collectors;
101102
import java.util.stream.IntStream;
102103

104+
import static java.util.Collections.emptyMap;
103105
import static java.util.Collections.singletonMap;
104106
import static java.util.stream.Collectors.toList;
105107
import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING;
@@ -494,7 +496,7 @@ private ClusterState applyCreateIndexRequestWithV1Templates(final ClusterState c
494496
MetadataIndexTemplateService.resolveAliases(templates), currentState.metadata(), aliasValidator,
495497
// the context is only used for validation so it's fine to pass fake values for the
496498
// shard id and the current timestamp
497-
xContentRegistry, indexService.newQueryShardContext(0, null, () -> 0L, null)),
499+
xContentRegistry, indexService.newQueryShardContext(0, null, () -> 0L, null, emptyMap())),
498500
templates.stream().map(IndexTemplateMetadata::getName).collect(toList()), metadataTransformer);
499501
}
500502

@@ -527,7 +529,7 @@ private ClusterState applyCreateIndexRequestWithV2Template(final ClusterState cu
527529
MetadataIndexTemplateService.resolveAliases(currentState.metadata(), templateName), currentState.metadata(), aliasValidator,
528530
// the context is only used for validation so it's fine to pass fake values for the
529531
// shard id and the current timestamp
530-
xContentRegistry, indexService.newQueryShardContext(0, null, () -> 0L, null)),
532+
xContentRegistry, indexService.newQueryShardContext(0, null, () -> 0L, null, emptyMap())),
531533
Collections.singletonList(templateName), metadataTransformer);
532534
}
533535

@@ -578,7 +580,7 @@ private ClusterState applyCreateIndexRequestWithExistingMetadata(final ClusterSt
578580
currentState.metadata(), aliasValidator, xContentRegistry,
579581
// the context is only used for validation so it's fine to pass fake values for the
580582
// shard id and the current timestamp
581-
indexService.newQueryShardContext(0, null, () -> 0L, null)),
583+
indexService.newQueryShardContext(0, null, () -> 0L, null, emptyMap())),
582584
org.elasticsearch.common.collect.List.of(), metadataTransformer);
583585
}
584586

server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesService.java

+2-1
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@
4747
import java.util.function.Function;
4848

4949
import static java.util.Collections.emptyList;
50+
import static java.util.Collections.emptyMap;
5051
import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.NO_LONGER_ASSIGNED;
5152

5253
/**
@@ -149,7 +150,7 @@ public ClusterState applyAliasActions(ClusterState currentState, Iterable<AliasA
149150
// the context is only used for validation so it's fine to pass fake values for the shard id,
150151
// but the current timestamp should be set to real value as we may use `now` in a filtered alias
151152
aliasValidator.validateAliasFilter(alias, filter, indexService.newQueryShardContext(0, null,
152-
() -> System.currentTimeMillis(), null), xContentRegistry);
153+
() -> System.currentTimeMillis(), null, emptyMap()), xContentRegistry);
153154
}
154155
};
155156
if (action.apply(newAliasValidator, metadata, index)) {

server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java

+2-1
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,7 @@
8080
import java.util.function.Predicate;
8181
import java.util.stream.Collectors;
8282

83+
import static java.util.Collections.emptyMap;
8384
import static org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService.validateTimestampFieldMapping;
8485
import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.NO_LONGER_ASSIGNED;
8586

@@ -1092,7 +1093,7 @@ private static void validateCompositeTemplate(final ClusterState state,
10921093
new AliasValidator(),
10931094
// the context is only used for validation so it's fine to pass fake values for the
10941095
// shard id and the current timestamp
1095-
xContentRegistry, tempIndexService.newQueryShardContext(0, null, () -> 0L, null));
1096+
xContentRegistry, tempIndexService.newQueryShardContext(0, null, () -> 0L, null, emptyMap()));
10961097

10971098
// triggers inclusion of _timestamp field and its validation:
10981099
String indexName = DataStream.BACKING_INDEX_PREFIX + temporaryIndexName;

server/src/main/java/org/elasticsearch/index/IndexService.java

+9-3
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,7 @@ public IndexService(
193193
assert indexAnalyzers != null;
194194
this.mapperService = new MapperService(indexSettings, indexAnalyzers, xContentRegistry, similarityService, mapperRegistry,
195195
// we parse all percolator queries as they would be parsed on shard 0
196-
() -> newQueryShardContext(0, null, System::currentTimeMillis, null), idFieldDataEnabled, scriptService);
196+
() -> newQueryShardContext(0, null, System::currentTimeMillis, null, emptyMap()), idFieldDataEnabled, scriptService);
197197
this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache, circuitBreakerService, mapperService);
198198
if (indexSettings.getIndexSortConfig().hasIndexSort()) {
199199
// we delay the actual creation of the sort order for this index because the mapping has not been merged yet.
@@ -588,13 +588,19 @@ public IndexSettings getIndexSettings() {
588588
* Passing a {@code null} {@link IndexSearcher} will return a valid context, however it won't be able to make
589589
* {@link IndexReader}-specific optimizations, such as rewriting containing range queries.
590590
*/
591-
public QueryShardContext newQueryShardContext(int shardId, IndexSearcher searcher, LongSupplier nowInMillis, String clusterAlias) {
591+
public QueryShardContext newQueryShardContext(
592+
int shardId,
593+
IndexSearcher searcher,
594+
LongSupplier nowInMillis,
595+
String clusterAlias,
596+
Map<String, Object> runtimeMappings
597+
) {
592598
final SearchIndexNameMatcher indexNameMatcher =
593599
new SearchIndexNameMatcher(index().getName(), clusterAlias, clusterService, expressionResolver);
594600
return new QueryShardContext(
595601
shardId, indexSettings, bigArrays, indexCache.bitsetFilterCache(), indexFieldData::getForField, mapperService(),
596602
similarityService(), scriptService, xContentRegistry, namedWriteableRegistry, client, searcher, nowInMillis, clusterAlias,
597-
indexNameMatcher, allowExpensiveQueries, valuesSourceRegistry);
603+
indexNameMatcher, allowExpensiveQueries, valuesSourceRegistry, runtimeMappings);
598604
}
599605

600606
/**

0 commit comments

Comments
 (0)