Skip to content

Commit 9da3e73

Browse files
committed
Merge remote-tracking branch 'es/master' into ccr
* es/master: Add remote cluster client (#29495) Ensure flush happens on shard idle Adds SpanGapQueryBuilder in the query DSL (#28636) Control max size and count of warning headers (#28427) Make index APIs work without types. (#29479) Deprecate filtering on `_type`. (#29468) Fix auto-generated ID example format (#29461) Fix typo in max number of threads check docs (#29469) Add primary term to translog header (#29227) Add a helper method to get a random java.util.TimeZone (#29487) Move TimeValue into elasticsearch-core project (#29486) Fix NPE in InternalGeoCentroidTests#testReduceRandom (#29481) Build: introduce keystoreFile for cluster config (#29491) test: Index more docs, so that it is less likely the search request does not time out.
2 parents 0dd61fc + 694e2a9 commit 9da3e73

File tree

98 files changed

+1722
-678
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

98 files changed

+1722
-678
lines changed

buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy

+11
Original file line numberDiff line numberDiff line change
@@ -141,6 +141,8 @@ class ClusterConfiguration {
141141

142142
Map<String, String> keystoreSettings = new HashMap<>()
143143

144+
Map<String, Object> keystoreFiles = new HashMap<>()
145+
144146
// map from destination path, to source file
145147
Map<String, Object> extraConfigFiles = new HashMap<>()
146148

@@ -167,6 +169,15 @@ class ClusterConfiguration {
167169
keystoreSettings.put(name, value)
168170
}
169171

172+
/**
173+
* Adds a file to the keystore. The name is the secure setting name, and the sourceFile
174+
* is anything accepted by project.file()
175+
*/
176+
@Input
177+
void keystoreFile(String name, Object sourceFile) {
178+
keystoreFiles.put(name, sourceFile)
179+
}
180+
170181
@Input
171182
void plugin(String path) {
172183
Project pluginProject = project.project(path)

buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy

+33-1
Original file line numberDiff line numberDiff line change
@@ -180,6 +180,7 @@ class ClusterFormationTasks {
180180
setup = configureWriteConfigTask(taskName(prefix, node, 'configure'), project, setup, node, seedNode)
181181
setup = configureCreateKeystoreTask(taskName(prefix, node, 'createKeystore'), project, setup, node)
182182
setup = configureAddKeystoreSettingTasks(prefix, project, setup, node)
183+
setup = configureAddKeystoreFileTasks(prefix, project, setup, node)
183184

184185
if (node.config.plugins.isEmpty() == false) {
185186
if (node.nodeVersion == VersionProperties.elasticsearch) {
@@ -323,7 +324,7 @@ class ClusterFormationTasks {
323324

324325
/** Adds a task to create keystore */
325326
static Task configureCreateKeystoreTask(String name, Project project, Task setup, NodeInfo node) {
326-
if (node.config.keystoreSettings.isEmpty()) {
327+
if (node.config.keystoreSettings.isEmpty() && node.config.keystoreFiles.isEmpty()) {
327328
return setup
328329
} else {
329330
/*
@@ -357,6 +358,37 @@ class ClusterFormationTasks {
357358
return parentTask
358359
}
359360

361+
/** Adds tasks to add files to the keystore */
362+
static Task configureAddKeystoreFileTasks(String parent, Project project, Task setup, NodeInfo node) {
363+
Map<String, Object> kvs = node.config.keystoreFiles
364+
if (kvs.isEmpty()) {
365+
return setup
366+
}
367+
Task parentTask = setup
368+
/*
369+
* We have to delay building the string as the path will not exist during configuration which will fail on Windows due to getting
370+
* the short name requiring the path to already exist.
371+
*/
372+
final Object esKeystoreUtil = "${-> node.binPath().resolve('elasticsearch-keystore').toString()}"
373+
for (Map.Entry<String, Object> entry in kvs) {
374+
String key = entry.getKey()
375+
String name = taskName(parent, node, 'addToKeystore#' + key)
376+
String srcFileName = entry.getValue()
377+
Task t = configureExecTask(name, project, parentTask, node, esKeystoreUtil, 'add-file', key, srcFileName)
378+
t.doFirst {
379+
File srcFile = project.file(srcFileName)
380+
if (srcFile.isDirectory()) {
381+
throw new GradleException("Source for keystoreFile must be a file: ${srcFile}")
382+
}
383+
if (srcFile.exists() == false) {
384+
throw new GradleException("Source file for keystoreFile does not exist: ${srcFile}")
385+
}
386+
}
387+
parentTask = t
388+
}
389+
return parentTask
390+
}
391+
360392
static Task configureExtraConfigFilesTask(String name, Project project, Task setup, NodeInfo node) {
361393
if (node.config.extraConfigFiles.isEmpty()) {
362394
return setup

docs/painless/painless-getting-started.asciidoc

+6-6
Original file line numberDiff line numberDiff line change
@@ -239,7 +239,7 @@ their last name:
239239

240240
[source,js]
241241
----------------------------------------------------------------
242-
POST hockey/player/_update_by_query
242+
POST hockey/_update_by_query
243243
{
244244
"script": {
245245
"lang": "painless",
@@ -260,7 +260,7 @@ names start with a consonant and end with a vowel:
260260

261261
[source,js]
262262
----------------------------------------------------------------
263-
POST hockey/player/_update_by_query
263+
POST hockey/_update_by_query
264264
{
265265
"script": {
266266
"lang": "painless",
@@ -281,7 +281,7 @@ remove all of the vowels in all of their last names:
281281

282282
[source,js]
283283
----------------------------------------------------------------
284-
POST hockey/player/_update_by_query
284+
POST hockey/_update_by_query
285285
{
286286
"script": {
287287
"lang": "painless",
@@ -297,7 +297,7 @@ method so it supports `$1` and `\1` for replacements:
297297

298298
[source,js]
299299
----------------------------------------------------------------
300-
POST hockey/player/_update_by_query
300+
POST hockey/_update_by_query
301301
{
302302
"script": {
303303
"lang": "painless",
@@ -319,7 +319,7 @@ This will make all of the vowels in the hockey player's last names upper case:
319319

320320
[source,js]
321321
----------------------------------------------------------------
322-
POST hockey/player/_update_by_query
322+
POST hockey/_update_by_query
323323
{
324324
"script": {
325325
"lang": "painless",
@@ -337,7 +337,7 @@ last names upper case:
337337

338338
[source,js]
339339
----------------------------------------------------------------
340-
POST hockey/player/_update_by_query
340+
POST hockey/_update_by_query
341341
{
342342
"script": {
343343
"lang": "painless",

docs/reference/aggregations/bucket/significanttext-aggregation.asciidoc

+5-5
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ Example:
3838

3939
[source,js]
4040
--------------------------------------------------
41-
GET news/article/_search
41+
GET news/_search
4242
{
4343
"query" : {
4444
"match" : {"content" : "Bird flu"}
@@ -153,7 +153,7 @@ We can drill down into examples of these documents to see why pozmantier is conn
153153

154154
[source,js]
155155
--------------------------------------------------
156-
GET news/article/_search
156+
GET news/_search
157157
{
158158
"query": {
159159
"simple_query_string": {
@@ -221,7 +221,7 @@ with the `filter_duplicate_text` setting turned on:
221221

222222
[source,js]
223223
--------------------------------------------------
224-
GET news/article/_search
224+
GET news/_search
225225
{
226226
"query": {
227227
"match": {
@@ -424,7 +424,7 @@ context:
424424

425425
[source,js]
426426
--------------------------------------------------
427-
GET news/article/_search
427+
GET news/_search
428428
{
429429
"query" : {
430430
"match" : {
@@ -463,7 +463,7 @@ will be analyzed using the `source_fields` parameter:
463463

464464
[source,js]
465465
--------------------------------------------------
466-
GET news/article/_search
466+
GET news/_search
467467
{
468468
"query" : {
469469
"match" : {

docs/reference/aggregations/metrics/percentile-rank-aggregation.asciidoc

+1-1
Original file line numberDiff line numberDiff line change
@@ -217,7 +217,7 @@ had a value.
217217

218218
[source,js]
219219
--------------------------------------------------
220-
GET latency/data/_search
220+
GET latency/_search
221221
{
222222
"size": 0,
223223
"aggs" : {

docs/reference/docs/delete-by-query.asciidoc

+4-4
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ Back to the API format, this will delete tweets from the `twitter` index:
7575

7676
[source,js]
7777
--------------------------------------------------
78-
POST twitter/_doc/_delete_by_query?conflicts=proceed
78+
POST twitter/_delete_by_query?conflicts=proceed
7979
{
8080
"query": {
8181
"match_all": {}
@@ -85,12 +85,12 @@ POST twitter/_doc/_delete_by_query?conflicts=proceed
8585
// CONSOLE
8686
// TEST[setup:twitter]
8787

88-
It's also possible to delete documents of multiple indexes and multiple
89-
types at once, just like the search API:
88+
It's also possible to delete documents of multiple indexes at once, just like
89+
the search API:
9090

9191
[source,js]
9292
--------------------------------------------------
93-
POST twitter,blog/_docs,post/_delete_by_query
93+
POST twitter,blog/_delete_by_query
9494
{
9595
"query": {
9696
"match_all": {}

docs/reference/docs/index_.asciidoc

+2-2
Original file line numberDiff line numberDiff line change
@@ -229,14 +229,14 @@ The result of the above index operation is:
229229
},
230230
"_index" : "twitter",
231231
"_type" : "_doc",
232-
"_id" : "6a8ca01c-7896-48e9-81cc-9f70661fcb32",
232+
"_id" : "W0tpsmIBdwcYyG50zbta",
233233
"_version" : 1,
234234
"_seq_no" : 0,
235235
"_primary_term" : 1,
236236
"result": "created"
237237
}
238238
--------------------------------------------------
239-
// TESTRESPONSE[s/6a8ca01c-7896-48e9-81cc-9f70661fcb32/$body._id/ s/"successful" : 2/"successful" : 1/]
239+
// TESTRESPONSE[s/W0tpsmIBdwcYyG50zbta/$body._id/ s/"successful" : 2/"successful" : 1/]
240240

241241
[float]
242242
[[index-routing]]

docs/reference/docs/update-by-query.asciidoc

+4-4
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ Back to the API format, this will update tweets from the `twitter` index:
6767

6868
[source,js]
6969
--------------------------------------------------
70-
POST twitter/_doc/_update_by_query?conflicts=proceed
70+
POST twitter/_update_by_query?conflicts=proceed
7171
--------------------------------------------------
7272
// CONSOLE
7373
// TEST[setup:twitter]
@@ -145,12 +145,12 @@ This API doesn't allow you to move the documents it touches, just modify their
145145
source. This is intentional! We've made no provisions for removing the document
146146
from its original location.
147147

148-
It's also possible to do this whole thing on multiple indexes and multiple
149-
types at once, just like the search API:
148+
It's also possible to do this whole thing on multiple indexes at once, just
149+
like the search API:
150150

151151
[source,js]
152152
--------------------------------------------------
153-
POST twitter,blog/_doc,post/_update_by_query
153+
POST twitter,blog/_update_by_query
154154
--------------------------------------------------
155155
// CONSOLE
156156
// TEST[s/^/PUT twitter\nPUT blog\n/]

docs/reference/modules/cluster/misc.asciidoc

+1-1
Original file line numberDiff line numberDiff line change
@@ -82,4 +82,4 @@ Enable or disable allocation for persistent tasks:
8282
This setting does not affect the persistent tasks that are already being executed.
8383
Only newly created persistent tasks, or tasks that must be reassigned (after a node
8484
left the cluster, for example), are impacted by this setting.
85-
--
85+
--

docs/reference/modules/http.asciidoc

+7-1
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ http://en.wikipedia.org/wiki/Chunked_transfer_encoding[HTTP chunking].
2020

2121
The settings in the table below can be configured for HTTP. Note that none of
2222
them are dynamically updatable so for them to take effect they should be set in
23-
`elasticsearch.yml`.
23+
the Elasticsearch <<settings, configuration file>>.
2424

2525
[cols="<,<",options="header",]
2626
|=======================================================================
@@ -100,6 +100,12 @@ simple message will be returned. Defaults to `true`
100100

101101
|`http.pipelining.max_events` |The maximum number of events to be queued up in memory before a HTTP connection is closed, defaults to `10000`.
102102

103+
|`http.max_warning_header_count` |The maximum number of warning headers in
104+
client HTTP responses, defaults to unbounded.
105+
106+
|`http.max_warning_header_size` |The maximum total size of warning headers in
107+
client HTTP responses, defaults to unbounded.
108+
103109
|=======================================================================
104110

105111
It also uses the common

docs/reference/search/search.asciidoc

+1-11
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,7 @@ that match the query. The query can either be provided using a simple
1212
All search APIs can be applied across multiple types within an index, and
1313
across multiple indices with support for the
1414
<<multi-index,multi index syntax>>. For
15-
example, we can search on all documents across all types within the
16-
twitter index:
15+
example, we can search on all documents within the twitter index:
1716

1817
[source,js]
1918
--------------------------------------------------
@@ -22,15 +21,6 @@ GET /twitter/_search?q=user:kimchy
2221
// CONSOLE
2322
// TEST[setup:twitter]
2423

25-
We can also search within specific types:
26-
27-
[source,js]
28-
--------------------------------------------------
29-
GET /twitter/tweet,user/_search?q=user:kimchy
30-
--------------------------------------------------
31-
// CONSOLE
32-
// TEST[setup:twitter]
33-
3424
We can also search all tweets with a certain tag across several indices
3525
(for example, when each user has his own index):
3626

docs/reference/setup/bootstrap-checks.asciidoc

+1-1
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ that the Elasticsearch process has the rights to create enough threads
114114
under normal use. This check is enforced only on Linux. If you are on
115115
Linux, to pass the maximum number of threads check, you must configure
116116
your system to allow the Elasticsearch process the ability to create at
117-
least 2048 threads. This can be done via `/etc/security/limits.conf`
117+
least 4096 threads. This can be done via `/etc/security/limits.conf`
118118
using the `nproc` setting (note that you might have to increase the
119119
limits for the `root` user too).
120120

server/src/main/java/org/elasticsearch/common/unit/TimeValue.java renamed to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/unit/TimeValue.java

+1-9
Original file line numberDiff line numberDiff line change
@@ -19,15 +19,12 @@
1919

2020
package org.elasticsearch.common.unit;
2121

22-
import org.elasticsearch.common.xcontent.ToXContentFragment;
23-
import org.elasticsearch.common.xcontent.XContentBuilder;
24-
2522
import java.io.IOException;
2623
import java.util.Locale;
2724
import java.util.Objects;
2825
import java.util.concurrent.TimeUnit;
2926

30-
public class TimeValue implements Comparable<TimeValue>, ToXContentFragment {
27+
public class TimeValue implements Comparable<TimeValue> {
3128

3229
/** How many nano-seconds in one milli-second */
3330
public static final long NSEC_PER_MSEC = TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS);
@@ -352,9 +349,4 @@ public int compareTo(TimeValue timeValue) {
352349
double otherValue = ((double) timeValue.duration) * timeValue.timeUnit.toNanos(1);
353350
return Double.compare(thisValue, otherValue);
354351
}
355-
356-
@Override
357-
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
358-
return builder.value(toString());
359-
}
360352
}

server/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java renamed to libs/elasticsearch-core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java

-30
Original file line numberDiff line numberDiff line change
@@ -19,15 +19,10 @@
1919

2020
package org.elasticsearch.common.unit;
2121

22-
import org.elasticsearch.common.io.stream.BytesStreamOutput;
23-
import org.elasticsearch.common.io.stream.StreamInput;
2422
import org.elasticsearch.test.ESTestCase;
2523

26-
import java.io.IOException;
2724
import java.util.concurrent.TimeUnit;
2825

29-
import static org.elasticsearch.common.unit.TimeValue.timeValueNanos;
30-
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
3126
import static org.hamcrest.CoreMatchers.instanceOf;
3227
import static org.hamcrest.CoreMatchers.not;
3328
import static org.hamcrest.Matchers.containsString;
@@ -154,31 +149,6 @@ private String randomTimeUnit() {
154149
return randomFrom("nanos", "micros", "ms", "s", "m", "h", "d");
155150
}
156151

157-
private void assertEqualityAfterSerialize(TimeValue value, int expectedSize) throws IOException {
158-
BytesStreamOutput out = new BytesStreamOutput();
159-
out.writeTimeValue(value);
160-
assertEquals(expectedSize, out.size());
161-
162-
StreamInput in = out.bytes().streamInput();
163-
TimeValue inValue = in.readTimeValue();
164-
165-
assertThat(inValue, equalTo(value));
166-
assertThat(inValue.duration(), equalTo(value.duration()));
167-
assertThat(inValue.timeUnit(), equalTo(value.timeUnit()));
168-
}
169-
170-
public void testSerialize() throws Exception {
171-
assertEqualityAfterSerialize(new TimeValue(100, TimeUnit.DAYS), 3);
172-
assertEqualityAfterSerialize(timeValueNanos(-1), 2);
173-
assertEqualityAfterSerialize(timeValueNanos(1), 2);
174-
assertEqualityAfterSerialize(timeValueSeconds(30), 2);
175-
176-
final TimeValue timeValue = new TimeValue(randomIntBetween(0, 1024), randomFrom(TimeUnit.values()));
177-
BytesStreamOutput out = new BytesStreamOutput();
178-
out.writeZLong(timeValue.duration());
179-
assertEqualityAfterSerialize(timeValue, 1 + out.bytes().length());
180-
}
181-
182152
public void testFailOnUnknownUnits() {
183153
try {
184154
TimeValue.parseTimeValue("23tw", null, "test");

0 commit comments

Comments
 (0)