Skip to content

Commit f6ecbf2

Browse files
committed
Merge remote-tracking branch 'elastic/master' into retention-lease-unfollow
* elastic/master: (37 commits) Enable test logging for TransformIntegrationTests#testSearchTransform. stronger wording for ilm+rollover in docs (elastic#39159) Mute SingleNodeTests (elastic#39156) AwaitsFix XPackUsageIT#testXPackCcrUsage. Resolve concurrency with watcher trigger service (elastic#39092) Fix median calculation in MedianAbsoluteDeviationAggregatorTests (elastic#38979) [DOCS] Edits the remote clusters documentation (elastic#38996) add version 6.6.2 Revert "Mute failing test 20_mix_typless_typefull (elastic#38781)" (elastic#38912) Rebuild remote connections on profile changes (elastic#37678) Document 'max_size' parameter as shard size for rollover (elastic#38750) Add some missing toString() implementations (elastic#39124) Migrate Streamable to Writeable for cluster block package (elastic#37391) fix RethrottleTests retry (elastic#38978) Disable date parsing test in non english locale (elastic#39052) Remove BCryptTests (elastic#39098) [ML] Stop the ML memory tracker before closing node (elastic#39111) Allow retention lease operations under blocks (elastic#39089) ML refactor DatafeedsConfig(Update) so defaults are not populated in queries or aggs (elastic#38822) Fix retention leases sync on recovery test ...
2 parents fd795a2 + 5eef4ad commit f6ecbf2

File tree

89 files changed

+1335
-688
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

89 files changed

+1335
-688
lines changed

buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,7 @@ public void setDistribution(Distribution distribution) {
129129
public void freeze() {
130130
requireNonNull(distribution, "null distribution passed when configuring test cluster `" + this + "`");
131131
requireNonNull(version, "null version passed when configuring test cluster `" + this + "`");
132+
requireNonNull(javaHome, "null javaHome passed when configuring test cluster `" + this + "`");
132133
logger.info("Locking configuration of `{}`", this);
133134
configurationFrozen.set(true);
134135
}
@@ -204,16 +205,7 @@ private void startElasticsearchProcess(Path distroArtifact) {
204205
Map<String, String> environment = processBuilder.environment();
205206
// Don't inherit anything from the environment for as that would lack reproductability
206207
environment.clear();
207-
if (javaHome != null) {
208-
environment.put("JAVA_HOME", getJavaHome().getAbsolutePath());
209-
} else if (System.getenv().get("JAVA_HOME") != null) {
210-
logger.warn("{}: No java home configured will use it from environment: {}",
211-
this, System.getenv().get("JAVA_HOME")
212-
);
213-
environment.put("JAVA_HOME", System.getenv().get("JAVA_HOME"));
214-
} else {
215-
logger.warn("{}: No javaHome configured, will rely on default java detection", this);
216-
}
208+
environment.put("JAVA_HOME", getJavaHome().getAbsolutePath());
217209
environment.put("ES_PATH_CONF", configFile.getParent().toAbsolutePath().toString());
218210
environment.put("ES_JAVA_OPTIONS", "-Xms512m -Xmx512m");
219211
// don't buffer all in memory, make sure we don't block on the default pipes

client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1261,7 +1261,8 @@ public void testGetAlias() throws IOException {
12611261
GetAliasesResponse getAliasesResponse = execute(getAliasesRequest, highLevelClient().indices()::getAlias,
12621262
highLevelClient().indices()::getAliasAsync);
12631263

1264-
assertThat(getAliasesResponse.getAliases().size(), equalTo(3));
1264+
assertThat("Unexpected number of aliases, got: " + getAliasesResponse.getAliases().toString(),
1265+
getAliasesResponse.getAliases().size(), equalTo(3));
12651266
assertThat(getAliasesResponse.getAliases().get("index1").size(), equalTo(1));
12661267
AliasMetaData aliasMetaData1 = getAliasesResponse.getAliases().get("index1").iterator().next();
12671268
assertThat(aliasMetaData1, notNullValue());

distribution/docker/src/docker/Dockerfile

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,6 @@ RUN curl --retry 8 -s ${jdkUrl} | tar -C /opt -zxf -
2323
# REF: https://github.com/elastic/elasticsearch-docker/issues/171
2424
RUN ln -sf /etc/pki/ca-trust/extracted/java/cacerts /opt/jdk-${jdkVersion}/lib/security/cacerts
2525

26-
RUN yum install -y unzip which
27-
2826
RUN groupadd -g 1000 elasticsearch && \
2927
adduser -u 1000 -g 1000 -d /usr/share/elasticsearch elasticsearch
3028

@@ -51,7 +49,7 @@ ENV JAVA_HOME /opt/jdk-${jdkVersion}
5149
COPY --from=builder /opt/jdk-${jdkVersion} /opt/jdk-${jdkVersion}
5250

5351
RUN yum update -y && \
54-
yum install -y nc unzip wget which && \
52+
yum install -y nc && \
5553
yum clean all
5654

5755
RUN groupadd -g 1000 elasticsearch && \

docs/plugins/repository-s3.asciidoc

Lines changed: 133 additions & 85 deletions
Large diffs are not rendered by default.

docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,6 @@
11
[[analysis-synonym-graph-tokenfilter]]
22
=== Synonym Graph Token Filter
33

4-
beta[]
5-
64
The `synonym_graph` token filter allows to easily handle synonyms,
75
including multi-word synonyms correctly during the analysis process.
86

@@ -187,3 +185,8 @@ multiple versions of a token may choose which version of the token to emit when
187185
parsing synonyms, e.g. `asciifolding` will only produce the folded version of the
188186
token. Others, e.g. `multiplexer`, `word_delimiter_graph` or `ngram` will throw an
189187
error.
188+
189+
WARNING:The synonym rules should not contain words that are removed by
190+
a filter that appears after in the chain (a `stop` filter for instance).
191+
Removing a term from a synonym rule breaks the matching at query time.
192+

docs/reference/ilm/policy-definitions.asciidoc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -390,7 +390,7 @@ existing index meets one of the rollover conditions.
390390
[options="header"]
391391
|======
392392
| Name | Required | Default | Description
393-
| `max_size` | no | - | max index storage size.
393+
| `max_size` | no | - | max primary shard index storage size.
394394
See <<byte-units, Byte Units>>
395395
for formatting
396396
| `max_docs` | no | - | max number of documents an

docs/reference/ilm/set-up-lifecycle-policy.asciidoc

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,8 @@ PUT test-index
107107
-----------------------
108108
// CONSOLE
109109

110-
IMPORTANT: Its recommended not to use the create index API with a policy that
110+
IMPORTANT: Do not to use the create index API with a policy that
111111
defines a rollover action. If you do so, the new index as the result of the
112-
rollover will not carry forward the policy. Always use index templates to
113-
define policies with rollover actions.
112+
rollover will not carry forward the policy. Always use
113+
<<applying-policy-to-template, index templates>> to define policies with rollover
114+
actions.

docs/reference/ilm/using-policies-rollover.asciidoc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,8 +31,8 @@ The rollover action takes the following parameters:
3131
.`rollover` Action Parameters
3232
|===
3333
|Name |Description
34-
|max_size |The maximum estimated size the index is allowed to grow
35-
to. Defaults to `null`. Optional.
34+
|max_size |The maximum estimated size the primary shard of the index is allowed
35+
to grow to. Defaults to `null`. Optional.
3636
|max_docs |The maximum number of document the index should
3737
contain. Defaults to `null`. Optional.
3838
|max_age |The maximum age of the index. Defaults to `null`. Optional.

docs/reference/indices/rollover-index.asciidoc

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,15 @@ from the original (rolled-over) index.
1818
In this scenario, the write index will have its rollover alias' `is_write_index` set to `false`, while the newly created index
1919
will now have the rollover alias pointing to it as the write index with `is_write_index` as `true`.
2020

21+
The available conditions are:
22+
23+
.`conditions` parameters
24+
|===
25+
| Name | Description
26+
| max_age | The maximum age of the index
27+
| max_docs | The maximum number of documents the index should contain. This does not add documents multiple times for replicas
28+
| max_size | The maximum estimated size of the primary shard of the index
29+
|===
2130

2231
[source,js]
2332
--------------------------------------------------

docs/reference/modules/remote-clusters.asciidoc

Lines changed: 63 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -2,64 +2,64 @@
22
== Remote clusters
33

44
ifndef::include-xpack[]
5-
The _remote clusters_ module allows establishing uni-directional connections to
6-
a remote cluster. This functionality is used in
5+
The _remote clusters_ module enables you to establish uni-directional
6+
connections to a remote cluster. This functionality is used in
77
<<modules-cross-cluster-search,cross-cluster search>>.
88
endif::[]
99
ifdef::include-xpack[]
10-
The _remote clusters_ module allows establishing uni-directional connections to
11-
a remote cluster. This functionality is used in cross-cluster replication, and
10+
The _remote clusters_ module enables you to establish uni-directional
11+
connections to a remote cluster. This functionality is used in
12+
{stack-ov}/xpack-ccr.html[cross-cluster replication] and
1213
<<modules-cross-cluster-search,cross-cluster search>>.
1314
endif::[]
1415

1516
Remote cluster connections work by configuring a remote cluster and connecting
1617
only to a limited number of nodes in the remote cluster. Each remote cluster is
17-
referenced by a name and a list of seed nodes. When a remote cluster is
18+
referenced by a name and a list of seed nodes. When a remote cluster is
1819
registered, its cluster state is retrieved from one of the seed nodes so that by
1920
default up to three _gateway nodes_ are selected to be connected to as part of
2021
remote cluster requests. Remote cluster connections consist of uni-directional
2122
connections from the coordinating node to the previously selected remote nodes
22-
only. It is possible to tag which nodes should be selected through node
23-
attributes (see <<remote-cluster-settings>>).
23+
only. You can tag which nodes should be selected by using node attributes (see <<remote-cluster-settings>>).
2424

2525
Each node in a cluster that has remote clusters configured connects to one or
2626
more _gateway nodes_ and uses them to federate requests to the remote cluster.
2727

2828
[float]
2929
[[configuring-remote-clusters]]
30-
=== Configuring Remote Clusters
30+
=== Configuring remote clusters
3131

32-
Remote clusters can be specified globally using
33-
<<cluster-update-settings,cluster settings>> (which can be updated dynamically),
34-
or local to individual nodes using the `elasticsearch.yml` file.
32+
You can configure remote clusters globally by using
33+
<<cluster-update-settings,cluster settings>>, which you can update dynamically.
34+
Alternatively, you can configure them locally on individual nodes by using the `elasticsearch.yml` file.
3535

36-
If a remote cluster is configured via `elasticsearch.yml` only the nodes with
37-
that configuration will be able to connect to the remote cluster. In other
38-
words, functionality that relies on remote cluster requests will have to be
39-
driven specifically from those nodes. Remote clusters set via the
40-
<<cluster-update-settings,cluster settings API>> will be available on every node
41-
in the cluster.
42-
43-
The `elasticsearch.yml` config file for a node that connects to remote clusters
44-
needs to list the remote clusters that should be connected to, for instance:
36+
If you specify the settings in `elasticsearch.yml` files, only the nodes with
37+
those settings can connect to the remote cluster. In other words, functionality
38+
that relies on remote cluster requests must be driven specifically from those
39+
nodes. For example:
4540

4641
[source,yaml]
4742
--------------------------------
4843
cluster:
4944
remote:
5045
cluster_one: <1>
5146
seeds: 127.0.0.1:9300
52-
cluster_two: <1>
47+
transport.ping_schedule: 30s <2>
48+
cluster_two:
5349
seeds: 127.0.0.1:9301
50+
transport.compress: true <3>
5451
5552
--------------------------------
5653
<1> `cluster_one` and `cluster_two` are arbitrary _cluster aliases_ representing
5754
the connection to each cluster. These names are subsequently used to distinguish
5855
between local and remote indices.
56+
<2> A keep-alive ping is configured for `cluster_one`.
57+
<3> Compression is explicitly enabled for requests to `cluster_two`.
58+
59+
For more information about the optional transport settings, see
60+
<<modules-transport>>.
5961

60-
The equivalent example using the <<cluster-update-settings,cluster settings
61-
API>> to add remote clusters to all nodes in the cluster would look like the
62-
following:
62+
If you use <<cluster-update-settings,cluster settings>>, the remote clusters are available on every node in the cluster. For example:
6363

6464
[source,js]
6565
--------------------------------
@@ -71,12 +71,14 @@ PUT _cluster/settings
7171
"cluster_one": {
7272
"seeds": [
7373
"127.0.0.1:9300"
74-
]
74+
],
75+
"transport.ping_schedule": "30s"
7576
},
7677
"cluster_two": {
7778
"seeds": [
7879
"127.0.0.1:9301"
79-
]
80+
],
81+
"transport.compress": true
8082
},
8183
"cluster_three": {
8284
"seeds": [
@@ -92,6 +94,40 @@ PUT _cluster/settings
9294
// TEST[setup:host]
9395
// TEST[s/127.0.0.1:9300/\${transport_host}/]
9496

97+
You can dynamically update the compression and ping schedule settings. However,
98+
you must re-include seeds in the settings update request. For example:
99+
100+
[source,js]
101+
--------------------------------
102+
PUT _cluster/settings
103+
{
104+
"persistent": {
105+
"cluster": {
106+
"remote": {
107+
"cluster_one": {
108+
"seeds": [
109+
"127.0.0.1:9300"
110+
],
111+
"transport.ping_schedule": "60s"
112+
},
113+
"cluster_two": {
114+
"seeds": [
115+
"127.0.0.1:9301"
116+
],
117+
"transport.compress": false
118+
}
119+
}
120+
}
121+
}
122+
}
123+
--------------------------------
124+
// CONSOLE
125+
// TEST[continued]
126+
127+
NOTE: When the compression or ping schedule settings change, all the existing
128+
node connections must close and re-open, which can cause in-flight requests to
129+
fail.
130+
95131
A remote cluster can be deleted from the cluster settings by setting its seeds
96132
to `null`:
97133

@@ -173,6 +209,6 @@ PUT _cluster/settings
173209
[[retrieve-remote-clusters-info]]
174210
=== Retrieving remote clusters info
175211

176-
The <<cluster-remote-info, Remote Cluster Info API>> allows to retrieve
212+
You can use the <<cluster-remote-info, remote cluster info API>> to retrieve
177213
information about the configured remote clusters, as well as the remote nodes
178214
that the node is connected to.

libs/ssl-config/build.gradle

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,9 +34,23 @@ dependencies {
3434
testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
3535
}
3636

37+
if (isEclipse) {
38+
// in eclipse the project is under a fake root, we need to change around the source sets
39+
sourceSets {
40+
if (project.path == ":libs:ssl-config") {
41+
main.java.srcDirs = ['java']
42+
main.resources.srcDirs = ['resources']
43+
} else {
44+
test.java.srcDirs = ['java']
45+
test.resources.srcDirs = ['resources']
46+
}
47+
}
48+
}
49+
3750
forbiddenApisMain {
3851
replaceSignatureFiles 'jdk-signatures'
3952
}
53+
4054
forbiddenPatterns {
4155
exclude '**/*.key'
4256
exclude '**/*.pem'
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
1-
// this is just shell gradle file for eclipse to have separate projects for geo src and tests
1+
// this is just shell gradle file for eclipse to have separate projects for ssl-config src and tests
22
apply from: '../../build.gradle'
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
// this is just shell gradle file for eclipse to have separate projects for geo src and tests
1+
// this is just shell gradle file for eclipse to have separate projects for ssl-config src and tests
22
apply from: '../../build.gradle'
33
dependencies {
44
testCompile project(':libs:elasticsearch-ssl-config')
5-
}
5+
}

modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,6 @@ public void testInvalidJavaPattern() {
126126
}
127127

128128
public void testJavaPatternLocale() {
129-
// @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/31724")
130129
assumeFalse("Can't run in a FIPS JVM, Joda parse date error", inFipsJvm());
131130
DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10),
132131
templatize(ZoneId.of("Europe/Amsterdam")), templatize(Locale.ITALIAN),
@@ -138,6 +137,18 @@ public void testJavaPatternLocale() {
138137
assertThat(ingestDocument.getFieldValue("date_as_date", String.class), equalTo("2010-06-12T00:00:00.000+02:00"));
139138
}
140139

140+
public void testJavaPatternEnglishLocale() {
141+
// Since testJavaPatternLocale is muted in FIPS mode, test that we can correctly parse dates in english
142+
DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10),
143+
templatize(ZoneId.of("Europe/Amsterdam")), templatize(Locale.ENGLISH),
144+
"date_as_string", Collections.singletonList("yyyy dd MMMM"), "date_as_date");
145+
Map<String, Object> document = new HashMap<>();
146+
document.put("date_as_string", "2010 12 June");
147+
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
148+
dateProcessor.execute(ingestDocument);
149+
assertThat(ingestDocument.getFieldValue("date_as_date", String.class), equalTo("2010-06-12T00:00:00.000+02:00"));
150+
}
151+
141152
public void testJavaPatternDefaultYear() {
142153
String format = randomFrom("dd/MM", "8dd/MM");
143154
DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10),

modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
package org.elasticsearch.index.reindex;
2121

2222
import org.elasticsearch.ElasticsearchException;
23+
import org.elasticsearch.ExceptionsHelper;
2324
import org.elasticsearch.action.ActionFuture;
2425
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
2526
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup;
@@ -37,6 +38,7 @@
3738
import static org.hamcrest.Matchers.allOf;
3839
import static org.hamcrest.Matchers.both;
3940
import static org.hamcrest.Matchers.empty;
41+
import static org.hamcrest.Matchers.equalTo;
4042
import static org.hamcrest.Matchers.greaterThan;
4143
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
4244
import static org.hamcrest.Matchers.hasSize;
@@ -191,13 +193,15 @@ private ListTasksResponse rethrottleTask(TaskId taskToRethrottle, float newReque
191193
assertThat(rethrottleResponse.getTasks(), hasSize(1));
192194
response.set(rethrottleResponse);
193195
} catch (ElasticsearchException e) {
194-
if (e.getCause() instanceof IllegalArgumentException) {
195-
// We want to retry in this case so we throw an assertion error
196-
logger.info("caught unprepared task, retrying until prepared");
197-
throw new AssertionError("Rethrottle request for task [" + taskToRethrottle.getId() + "] failed", e);
198-
} else {
196+
Throwable unwrapped = ExceptionsHelper.unwrap(e, IllegalArgumentException.class);
197+
if (unwrapped == null) {
199198
throw e;
200199
}
200+
// We want to retry in this case so we throw an assertion error
201+
assertThat(unwrapped.getMessage(), equalTo("task [" + taskToRethrottle.getId()
202+
+ "] has not yet been initialized to the point where it knows how to rethrottle itself"));
203+
logger.info("caught unprepared task, retrying until prepared");
204+
throw new AssertionError("Rethrottle request for task [" + taskToRethrottle.getId() + "] failed", e);
201205
}
202206
});
203207

rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -104,10 +104,8 @@
104104
"Implicitly create a typeless index while there is a typed template":
105105

106106
- skip:
107-
#version: " - 6.99.99"
108-
#reason: needs typeless index operations to work on typed indices
109-
version: "all"
110-
reason: "muted, waiting for #38711"
107+
version: " - 6.99.99"
108+
reason: needs typeless index operations to work on typed indices
111109

112110
- do:
113111
indices.put_template:

0 commit comments

Comments
 (0)