Skip to content

Commit 287019a

Browse files
committed
Merge branch 'master' into feature/searchable-snapshots
2 parents 0940bcd + 7c559be commit 287019a

File tree

73 files changed

+1643
-412
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

73 files changed

+1643
-412
lines changed

buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java

Lines changed: 28 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -807,6 +807,7 @@ public synchronized void stop(boolean tailLogs) {
807807
requireNonNull(esProcess, "Can't stop `" + this + "` as it was not started or already stopped.");
808808
// Test clusters are not reused, don't spend time on a graceful shutdown
809809
stopHandle(esProcess.toHandle(), true);
810+
reaper.unregister(toString());
810811
if (tailLogs) {
811812
logFileContents("Standard output of node", esStdoutFile);
812813
logFileContents("Standard error of node", esStderrFile);
@@ -831,39 +832,43 @@ public void setNameCustomization(Function<String, String> nameCustomizer) {
831832
}
832833

833834
private void stopHandle(ProcessHandle processHandle, boolean forcibly) {
834-
// Stop all children first, ES could actually be a child when there's some wrapper process like on Windows.
835+
// No-op if the process has already exited by itself.
835836
if (processHandle.isAlive() == false) {
836837
LOGGER.info("Process was not running when we tried to terminate it.");
837838
return;
838839
}
839840

840-
// Stop all children first, ES could actually be a child when there's some wrapper process like on Windows.
841-
processHandle.children().forEach(each -> stopHandle(each, forcibly));
841+
// Stop all children last - if the ML processes are killed before the ES JVM then
842+
// they'll be recorded as having failed and won't restart when the cluster restarts.
843+
// ES could actually be a child when there's some wrapper process like on Windows,
844+
// and in that case the ML processes will be grandchildren of the wrapper.
845+
List<ProcessHandle> children = processHandle.children().collect(Collectors.toList());
846+
try {
847+
logProcessInfo(
848+
"Terminating elasticsearch process" + (forcibly ? " forcibly " : "gracefully") + ":",
849+
processHandle.info()
850+
);
842851

843-
logProcessInfo(
844-
"Terminating elasticsearch process" + (forcibly ? " forcibly " : "gracefully") + ":",
845-
processHandle.info()
846-
);
852+
if (forcibly) {
853+
processHandle.destroyForcibly();
854+
} else {
855+
processHandle.destroy();
856+
waitForProcessToExit(processHandle);
857+
if (processHandle.isAlive() == false) {
858+
return;
859+
}
860+
LOGGER.info("process did not terminate after {} {}, stopping it forcefully",
861+
ES_DESTROY_TIMEOUT, ES_DESTROY_TIMEOUT_UNIT);
862+
processHandle.destroyForcibly();
863+
}
847864

848-
if (forcibly) {
849-
processHandle.destroyForcibly();
850-
} else {
851-
processHandle.destroy();
852865
waitForProcessToExit(processHandle);
853-
if (processHandle.isAlive() == false) {
854-
return;
866+
if (processHandle.isAlive()) {
867+
throw new TestClustersException("Was not able to terminate elasticsearch process for " + this);
855868
}
856-
LOGGER.info("process did not terminate after {} {}, stopping it forcefully",
857-
ES_DESTROY_TIMEOUT, ES_DESTROY_TIMEOUT_UNIT);
858-
processHandle.destroyForcibly();
859-
}
860-
861-
waitForProcessToExit(processHandle);
862-
if (processHandle.isAlive()) {
863-
throw new TestClustersException("Was not able to terminate elasticsearch process for " + this);
869+
} finally {
870+
children.forEach(each -> stopHandle(each, forcibly));
864871
}
865-
866-
reaper.unregister(toString());
867872
}
868873

869874
private void logProcessInfo(String prefix, ProcessHandle.Info info) {

docs/reference/analysis/tokenfilters/limit-token-count-tokenfilter.asciidoc

Lines changed: 118 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,32 +1,134 @@
11
[[analysis-limit-token-count-tokenfilter]]
2-
=== Limit Token Count Token Filter
2+
=== Limit token count token filter
3+
++++
4+
<titleabbrev>Limit token count</titleabbrev>
5+
++++
36

4-
Limits the number of tokens that are indexed per document and field.
7+
Limits the number of output tokens. The `limit` filter is commonly used to limit
8+
the size of document field values based on token count.
59

6-
[cols="<,<",options="header",]
7-
|=======================================================================
8-
|Setting |Description
9-
|`max_token_count` |The maximum number of tokens that should be indexed
10-
per document and field. The default is `1`
10+
By default, the `limit` filter keeps only the first token in a stream. For
11+
example, the filter can change the token stream `[ one, two, three ]` to
12+
`[ one ]`.
1113

12-
|`consume_all_tokens` |If set to `true` the filter exhaust the stream
13-
even if `max_token_count` tokens have been consumed already. The default
14-
is `false`.
15-
|=======================================================================
14+
This filter uses Lucene's
15+
https://lucene.apache.org/core/{lucene_version_path}/analyzers-common/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilter.html[LimitTokenCountFilter].
1616

17-
Here is an example:
17+
[TIP]
18+
====
19+
If you want to limit the size of field values based on
20+
_character length_, use the <<ignore-above,`ignore_above`>> mapping parameter.
21+
====
22+
23+
[[analysis-limit-token-count-tokenfilter-configure-parms]]
24+
==== Configurable parameters
25+
26+
`max_token_count`::
27+
(Optional, integer)
28+
Maximum number of tokens to keep. Once this limit is reached, any remaining
29+
tokens are excluded from the output. Defaults to `1`.
30+
31+
`consume_all_tokens`::
32+
(Optional, boolean)
33+
If `true`, the `limit` filter exhausts the token stream, even if the
34+
`max_token_count` has already been reached. Defaults to `false`.
35+
36+
[[analysis-limit-token-count-tokenfilter-analyze-ex]]
37+
==== Example
38+
39+
The following <<indices-analyze,analyze API>> request uses the `limit`
40+
filter to keep only the first two tokens in `quick fox jumps over lazy dog`:
41+
42+
[source,console]
43+
--------------------------------------------------
44+
GET _analyze
45+
{
46+
"tokenizer": "standard",
47+
"filter": [
48+
{
49+
"type": "limit",
50+
"max_token_count": 2
51+
}
52+
],
53+
"text": "quick fox jumps over lazy dog"
54+
}
55+
--------------------------------------------------
56+
57+
The filter produces the following tokens:
58+
59+
[source,text]
60+
--------------------------------------------------
61+
[ quick, fox ]
62+
--------------------------------------------------
63+
64+
/////////////////////
65+
[source,console-result]
66+
--------------------------------------------------
67+
{
68+
"tokens": [
69+
{
70+
"token": "quick",
71+
"start_offset": 0,
72+
"end_offset": 5,
73+
"type": "<ALPHANUM>",
74+
"position": 0
75+
},
76+
{
77+
"token": "fox",
78+
"start_offset": 6,
79+
"end_offset": 9,
80+
"type": "<ALPHANUM>",
81+
"position": 1
82+
}
83+
]
84+
}
85+
--------------------------------------------------
86+
/////////////////////
87+
88+
[[analysis-limit-token-count-tokenfilter-analyzer-ex]]
89+
==== Add to an analyzer
90+
91+
The following <<indices-create-index,create index API>> request uses the
92+
`limit` filter to configure a new
93+
<<analysis-custom-analyzer,custom analyzer>>.
1894

1995
[source,console]
2096
--------------------------------------------------
21-
PUT /limit_example
97+
PUT limit_example
2298
{
2399
"settings": {
24100
"analysis": {
25101
"analyzer": {
26-
"limit_example": {
27-
"type": "custom",
102+
"standard_one_token_limit": {
28103
"tokenizer": "standard",
29-
"filter": ["lowercase", "five_token_limit"]
104+
"filter": [ "limit" ]
105+
}
106+
}
107+
}
108+
}
109+
}
110+
--------------------------------------------------
111+
112+
[[analysis-limit-token-count-tokenfilter-customize]]
113+
==== Customize
114+
115+
To customize the `limit` filter, duplicate it to create the basis
116+
for a new custom token filter. You can modify the filter using its configurable
117+
parameters.
118+
119+
For example, the following request creates a custom `limit` filter that keeps
120+
only the first five tokens of a stream:
121+
122+
[source,console]
123+
--------------------------------------------------
124+
PUT custom_limit_example
125+
{
126+
"settings": {
127+
"analysis": {
128+
"analyzer": {
129+
"whitespace_five_token_limit": {
130+
"tokenizer": "whitespace",
131+
"filter": [ "five_token_limit" ]
30132
}
31133
},
32134
"filter": {

docs/reference/ccr/getting-started.asciidoc

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,8 @@ remote cluster.
135135
"num_nodes_connected" : 1, <2>
136136
"max_connections_per_cluster" : 3,
137137
"initial_connect_timeout" : "30s",
138-
"skip_unavailable" : false
138+
"skip_unavailable" : false,
139+
"mode" : "sniff"
139140
}
140141
}
141142
--------------------------------------------------
@@ -146,7 +147,7 @@ remote cluster.
146147
alias `leader`
147148
<2> This shows the number of nodes in the remote cluster the local cluster is
148149
connected to.
149-
150+
150151
Alternatively, you can manage remote clusters on the
151152
*Management / Elasticsearch / Remote Clusters* page in {kib}:
152153

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
<div class="legalnotice">
2+
<p>
3+
Welcome to the official documentation for Elasticsearch:
4+
the search and analytics engine that powers the Elastic Stack.
5+
If you want to learn how to use Elasticsearch to search and analyze your
6+
data, you've come to the right place. This guide shows you how to:
7+
</p>
8+
<div class="itemizedlist">
9+
<ul class="itemizedlist" type="disc">
10+
<li class="listitem">Install, configure, and administer an Elasticsearch
11+
cluster.</li>
12+
<li class="listitem">Index your data, optimize your indices, and search
13+
with the Elasticsearch query language.
14+
</li>
15+
<li class="listitem">Discover trends, patterns, and anomalies with
16+
aggregations and the machine learning APIs.
17+
</li>
18+
</ul>
19+
</div>
20+
</div>

docs/reference/mapping/types/geo-shape.asciidoc

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -142,9 +142,8 @@ The following features are not yet supported with the new indexing approach:
142142
using a `bool` query with each individual point.
143143

144144
* `CONTAINS` relation query - when using the new default vector indexing strategy, `geo_shape`
145-
queries with `relation` defined as `contains` are not yet supported. If this query relation
146-
is an absolute necessity, it is recommended to set `strategy` to `quadtree` and use the
147-
deprecated PrefixTree strategy indexing approach.
145+
queries with `relation` defined as `contains` are supported for indices created with
146+
ElasticSearch 7.5.0 or higher.
148147

149148
[[prefix-trees]]
150149
[float]

docs/reference/mapping/types/shape.asciidoc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -74,8 +74,8 @@ The following features are not yet supported:
7474
over each individual point. For now, if this is absolutely needed, this can be achieved
7575
using a `bool` query with each individual point. (Note: this could be very costly)
7676

77-
* `CONTAINS` relation query - `shape` queries with `relation` defined as `contains` are not
78-
yet supported.
77+
* `CONTAINS` relation query - `shape` queries with `relation` defined as `contains` are supported
78+
for indices created with ElasticSearch 7.5.0 or higher.
7979

8080
[float]
8181
===== Example
@@ -445,4 +445,4 @@ POST /example/_doc
445445
Due to the complex input structure and index representation of shapes,
446446
it is not currently possible to sort shapes or retrieve their fields
447447
directly. The `shape` value is only retrievable through the `_source`
448-
field.
448+
field.

docs/reference/query-dsl/geo-shape-query.asciidoc

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -151,8 +151,7 @@ has nothing in common with the query geometry.
151151
* `WITHIN` - Return all documents whose `geo_shape` field
152152
is within the query geometry.
153153
* `CONTAINS` - Return all documents whose `geo_shape` field
154-
contains the query geometry. Note: this is only supported using the
155-
`recursive` Prefix Tree Strategy deprecated[6.6]
154+
contains the query geometry.
156155

157156
[float]
158157
==== Ignore Unmapped

docs/reference/query-dsl/shape-query.asciidoc

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -170,12 +170,14 @@ GET /example/_search
170170

171171
The following is a complete list of spatial relation operators available:
172172

173-
* `INTERSECTS` - (default) Return all documents whose `geo_shape` field
173+
* `INTERSECTS` - (default) Return all documents whose `shape` field
174174
intersects the query geometry.
175-
* `DISJOINT` - Return all documents whose `geo_shape` field
175+
* `DISJOINT` - Return all documents whose `shape` field
176176
has nothing in common with the query geometry.
177-
* `WITHIN` - Return all documents whose `geo_shape` field
177+
* `WITHIN` - Return all documents whose `shape` field
178178
is within the query geometry.
179+
* `CONTAINS` - Return all documents whose `shape` field
180+
contains the query geometry.
179181

180182
[float]
181183
==== Ignore Unmapped

modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorTests.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ public void testConvertScalarToList() throws Exception {
126126
public void testAppendMetadataExceptVersion() throws Exception {
127127
// here any metadata field value becomes a list, which won't make sense in most of the cases,
128128
// but support for append is streamlined like for set so we test it
129-
MetaData randomMetaData = randomFrom(MetaData.INDEX, MetaData.TYPE, MetaData.ID, MetaData.ROUTING);
129+
MetaData randomMetaData = randomFrom(MetaData.INDEX, MetaData.ID, MetaData.ROUTING);
130130
List<String> values = new ArrayList<>();
131131
Processor appendProcessor;
132132
if (randomBoolean()) {

modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorTests.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ public void testSetExistingNullFieldWithOverrideDisabled() throws Exception {
101101
}
102102

103103
public void testSetMetadataExceptVersion() throws Exception {
104-
MetaData randomMetaData = randomFrom(MetaData.INDEX, MetaData.TYPE, MetaData.ID, MetaData.ROUTING);
104+
MetaData randomMetaData = randomFrom(MetaData.INDEX, MetaData.ID, MetaData.ROUTING);
105105
Processor processor = createSetProcessor(randomMetaData.getFieldName(), "_value", true);
106106
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
107107
processor.execute(ingestDocument);

0 commit comments

Comments
 (0)