Skip to content

Commit 4fb6f0a

Browse files
committed
Merge branch '6.2' of github.com:elastic/elasticsearch into 6.2
2 parents 725d0b5 + e79d579 commit 4fb6f0a

File tree

27 files changed

+381
-83
lines changed

27 files changed

+381
-83
lines changed

buildSrc/version.properties

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
elasticsearch = 6.2.4
1+
elasticsearch = 6.2.5
22
lucene = 7.2.1
33

44
# optional dependencies

distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -707,10 +707,13 @@ private void installPlugin(Terminal terminal, boolean isBatch, Path tmpRoot,
707707
final PluginInfo info = loadPluginInfo(terminal, tmpRoot, isBatch, env);
708708
// read optional security policy (extra permissions), if it exists, confirm or warn the user
709709
Path policy = tmpRoot.resolve(PluginInfo.ES_PLUGIN_POLICY);
710+
final Set<String> permissions;
710711
if (Files.exists(policy)) {
711-
Set<String> permissions = PluginSecurity.parsePermissions(policy, env.tmpFile());
712-
PluginSecurity.confirmPolicyExceptions(terminal, permissions, info.hasNativeController(), isBatch);
712+
permissions = PluginSecurity.parsePermissions(policy, env.tmpFile());
713+
} else {
714+
permissions = Collections.emptySet();
713715
}
716+
PluginSecurity.confirmPolicyExceptions(terminal, permissions, info.hasNativeController(), isBatch);
714717

715718
final Path destination = env.pluginsFile().resolve(info.getName());
716719
deleteOnFailure.add(destination);

distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java

Lines changed: 103 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1191,6 +1191,59 @@ private Function<byte[], String> checksumAndString(final MessageDigest digest, f
11911191
return bytes -> MessageDigests.toHexString(digest.digest(bytes)) + s;
11921192
}
11931193

1194+
// checks the plugin requires a policy confirmation, and does not install when that is rejected by the user
1195+
// the plugin is installed after this method completes
1196+
private void assertPolicyConfirmation(Tuple<Path, Environment> env, String pluginZip, String... warnings) throws Exception {
1197+
for (int i = 0; i < warnings.length; ++i) {
1198+
String warning = warnings[i];
1199+
for (int j = 0; j < i; ++j) {
1200+
terminal.addTextInput("y"); // accept warnings we have already tested
1201+
}
1202+
// default answer, does not install
1203+
terminal.addTextInput("");
1204+
UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1()));
1205+
assertEquals("installation aborted by user", e.getMessage());
1206+
1207+
assertThat(terminal.getOutput(), containsString("WARNING: " + warning));
1208+
try (Stream<Path> fileStream = Files.list(env.v2().pluginsFile())) {
1209+
assertThat(fileStream.collect(Collectors.toList()), empty());
1210+
}
1211+
1212+
// explicitly do not install
1213+
terminal.reset();
1214+
for (int j = 0; j < i; ++j) {
1215+
terminal.addTextInput("y"); // accept warnings we have already tested
1216+
}
1217+
terminal.addTextInput("n");
1218+
e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1()));
1219+
assertEquals("installation aborted by user", e.getMessage());
1220+
assertThat(terminal.getOutput(), containsString("WARNING: " + warning));
1221+
try (Stream<Path> fileStream = Files.list(env.v2().pluginsFile())) {
1222+
assertThat(fileStream.collect(Collectors.toList()), empty());
1223+
}
1224+
}
1225+
1226+
// allow installation
1227+
terminal.reset();
1228+
for (int j = 0; j < warnings.length; ++j) {
1229+
terminal.addTextInput("y");
1230+
}
1231+
installPlugin(pluginZip, env.v1());
1232+
for (String warning : warnings) {
1233+
assertThat(terminal.getOutput(), containsString("WARNING: " + warning));
1234+
}
1235+
}
1236+
1237+
public void testPolicyConfirmation() throws Exception {
1238+
Tuple<Path, Environment> env = createEnv(fs, temp);
1239+
Path pluginDir = createPluginDir(temp);
1240+
writePluginSecurityPolicy(pluginDir, "setAccessible", "setFactory");
1241+
String pluginZip = createPluginUrl("fake", pluginDir);
1242+
1243+
assertPolicyConfirmation(env, pluginZip, "plugin requires additional permissions");
1244+
assertPlugin("fake", pluginDir, env.v2());
1245+
}
1246+
11941247
public void testMetaPluginPolicyConfirmation() throws Exception {
11951248
Tuple<Path, Environment> env = createEnv(fs, temp);
11961249
Path metaDir = createPluginDir(temp);
@@ -1204,32 +1257,60 @@ public void testMetaPluginPolicyConfirmation() throws Exception {
12041257
writePlugin("fake2", fake2Dir);
12051258
String pluginZip = createMetaPluginUrl("meta-plugin", metaDir);
12061259

1207-
// default answer, does not install
1208-
terminal.addTextInput("");
1209-
UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1()));
1210-
assertEquals("installation aborted by user", e.getMessage());
1211-
assertThat(terminal.getOutput(), containsString("WARNING: plugin requires additional permissions"));
1212-
try (Stream<Path> fileStream = Files.list(env.v2().pluginsFile())) {
1213-
assertThat(fileStream.collect(Collectors.toList()), empty());
1214-
}
1260+
assertPolicyConfirmation(env, pluginZip, "plugin requires additional permissions");
1261+
assertMetaPlugin("meta-plugin", "fake1", metaDir, env.v2());
1262+
assertMetaPlugin("meta-plugin", "fake2", metaDir, env.v2());
1263+
}
12151264

1216-
// explicitly do not install
1217-
terminal.reset();
1218-
terminal.addTextInput("n");
1219-
e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1()));
1220-
assertEquals("installation aborted by user", e.getMessage());
1221-
assertThat(terminal.getOutput(), containsString("WARNING: plugin requires additional permissions"));
1222-
try (Stream<Path> fileStream = Files.list(env.v2().pluginsFile())) {
1223-
assertThat(fileStream.collect(Collectors.toList()), empty());
1224-
}
1265+
public void testNativeControllerConfirmation() throws Exception {
1266+
Tuple<Path, Environment> env = createEnv(fs, temp);
1267+
Path pluginDir = createPluginDir(temp);
1268+
String pluginZip = createPluginUrl("fake", pluginDir, "has.native.controller", "true");
12251269

1226-
// allow installation
1227-
terminal.reset();
1228-
terminal.addTextInput("y");
1229-
installPlugin(pluginZip, env.v1());
1230-
assertThat(terminal.getOutput(), containsString("WARNING: plugin requires additional permissions"));
1270+
assertPolicyConfirmation(env, pluginZip, "plugin forks a native controller");
1271+
assertPlugin("fake", pluginDir, env.v2());
1272+
}
1273+
1274+
public void testMetaPluginNativeControllerConfirmation() throws Exception {
1275+
Tuple<Path, Environment> env = createEnv(fs, temp);
1276+
Path metaDir = createPluginDir(temp);
1277+
Path fake1Dir = metaDir.resolve("fake1");
1278+
Files.createDirectory(fake1Dir);
1279+
writePlugin("fake1", fake1Dir, "has.native.controller", "true");
1280+
Path fake2Dir = metaDir.resolve("fake2");
1281+
Files.createDirectory(fake2Dir);
1282+
writePlugin("fake2", fake2Dir);
1283+
String pluginZip = createMetaPluginUrl("meta-plugin", metaDir);
1284+
1285+
assertPolicyConfirmation(env, pluginZip, "plugin forks a native controller");
12311286
assertMetaPlugin("meta-plugin", "fake1", metaDir, env.v2());
12321287
assertMetaPlugin("meta-plugin", "fake2", metaDir, env.v2());
12331288
}
12341289

1290+
public void testNativeControllerAndPolicyConfirmation() throws Exception {
1291+
Tuple<Path, Environment> env = createEnv(fs, temp);
1292+
Path pluginDir = createPluginDir(temp);
1293+
writePluginSecurityPolicy(pluginDir, "setAccessible", "setFactory");
1294+
String pluginZip = createPluginUrl("fake", pluginDir, "has.native.controller", "true");
1295+
1296+
assertPolicyConfirmation(env, pluginZip, "plugin requires additional permissions", "plugin forks a native controller");
1297+
assertPlugin("fake", pluginDir, env.v2());
1298+
}
1299+
1300+
public void testMetaPluginNativeControllerAndPolicyConfirmation() throws Exception {
1301+
Tuple<Path, Environment> env = createEnv(fs, temp);
1302+
Path metaDir = createPluginDir(temp);
1303+
Path fake1Dir = metaDir.resolve("fake1");
1304+
Files.createDirectory(fake1Dir);
1305+
writePluginSecurityPolicy(fake1Dir, "setAccessible", "setFactory");
1306+
writePlugin("fake1", fake1Dir);
1307+
Path fake2Dir = metaDir.resolve("fake2");
1308+
Files.createDirectory(fake2Dir);
1309+
writePlugin("fake2", fake2Dir, "has.native.controller", "true");
1310+
String pluginZip = createMetaPluginUrl("meta-plugin", metaDir);
1311+
1312+
assertPolicyConfirmation(env, pluginZip, "plugin requires additional permissions", "plugin forks a native controller");
1313+
assertMetaPlugin("meta-plugin", "fake1", metaDir, env.v2());
1314+
assertMetaPlugin("meta-plugin", "fake2", metaDir, env.v2());
1315+
}
12351316
}

docs/Versions.asciidoc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
:version: 6.2.3
1+
:version: 6.2.4
22
:major-version: 6.x
33
:lucene_version: 7.2.1
44
:lucene_version_path: 7_2_0

docs/plugins/analysis.asciidoc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,7 @@ A number of analysis plugins have been contributed by our community:
5353
* https://github.com/duydo/elasticsearch-analysis-vietnamese[Vietnamese Analysis Plugin] (by Duy Do)
5454
* https://github.com/ofir123/elasticsearch-network-analysis[Network Addresses Analysis Plugin] (by Ofir123)
5555
* https://github.com/medcl/elasticsearch-analysis-string2int[String2Integer Analysis Plugin] (by Medcl)
56+
* https://github.com/ZarHenry96/elasticsearch-dandelion-plugin[Dandelion Analysis Plugin] (by ZarHenry96)
5657

5758
include::analysis-icu.asciidoc[]
5859

docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,11 +27,13 @@ POST /sales/_search?size=0
2727
// CONSOLE
2828
// TEST[setup:sales]
2929

30-
Available expressions for interval: `year`, `quarter`, `month`, `week`, `day`, `hour`, `minute`, `second`
30+
Available expressions for interval: `year` (`1y`), `quarter` (`1q`), `month` (`1M`), `week` (`1w`),
31+
`day` (`1d`), `hour` (`1h`), `minute` (`1m`), `second` (`1s`)
3132

3233
Time values can also be specified via abbreviations supported by <<time-units,time units>> parsing.
3334
Note that fractional time values are not supported, but you can address this by shifting to another
34-
time unit (e.g., `1.5h` could instead be specified as `90m`).
35+
time unit (e.g., `1.5h` could instead be specified as `90m`). Also note that time intervals larger than
36+
than days do not support arbitrary values but can only be one unit large (e.g. `1y` is valid, `2y` is not).
3537

3638
[source,js]
3739
--------------------------------------------------

docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,7 @@ A `single-value` metrics aggregation that calculates an approximate count of
55
distinct values. Values can be extracted either from specific fields in the
66
document or generated by a script.
77

8-
Assume you are indexing books and would like to count the unique authors that
9-
match a query:
8+
Assume you are indexing store sales and would like to count the unique number of sold products that match a query:
109

1110
[source,js]
1211
--------------------------------------------------

docs/reference/cluster/tasks.asciidoc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ It is also possible to retrieve information for a particular task:
6464

6565
[source,js]
6666
--------------------------------------------------
67-
GET _tasks/task_id:1 <1>
67+
GET _tasks/node_id:1 <1>
6868
--------------------------------------------------
6969
// CONSOLE
7070
// TEST[catch:missing]

docs/reference/docs/delete-by-query.asciidoc

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -284,9 +284,12 @@ executed again in order to conform to `requests_per_second`.
284284

285285
`failures`::
286286

287-
Array of all indexing failures. If this is non-empty then the request aborted
288-
because of those failures. See `conflicts` for how to prevent version conflicts
289-
from aborting the operation.
287+
Array of failures if there were any unrecoverable errors during the process. If
288+
this is non-empty then the request aborted because of those failures.
289+
Delete-by-query is implemented using batches and any failure causes the entire
290+
process to abort but all failures in the current batch are collected into the
291+
array. You can use the `conflicts` option to prevent reindex from aborting on
292+
version conflicts.
290293

291294

292295
[float]

docs/reference/docs/delete.asciidoc

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -39,11 +39,14 @@ The result of the above delete operation is:
3939
[[delete-versioning]]
4040
=== Versioning
4141

42-
Each document indexed is versioned. When deleting a document, the
43-
`version` can be specified to make sure the relevant document we are
44-
trying to delete is actually being deleted and it has not changed in the
45-
meantime. Every write operation executed on a document, deletes included,
46-
causes its version to be incremented.
42+
Each document indexed is versioned. When deleting a document, the `version` can
43+
be specified to make sure the relevant document we are trying to delete is
44+
actually being deleted and it has not changed in the meantime. Every write
45+
operation executed on a document, deletes included, causes its version to be
46+
incremented. The version number of a deleted document remains available for a
47+
short time after deletion to allow for control of concurrent operations. The
48+
length of time for which a deleted document's version remains available is
49+
determined by the `index.gc_deletes` index setting and defaults to 60 seconds.
4750

4851
[float]
4952
[[delete-routing]]

docs/reference/docs/index_.asciidoc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -229,14 +229,14 @@ The result of the above index operation is:
229229
},
230230
"_index" : "twitter",
231231
"_type" : "_doc",
232-
"_id" : "6a8ca01c-7896-48e9-81cc-9f70661fcb32",
232+
"_id" : "W0tpsmIBdwcYyG50zbta",
233233
"_version" : 1,
234234
"_seq_no" : 0,
235235
"_primary_term" : 1,
236236
"result": "created"
237237
}
238238
--------------------------------------------------
239-
// TESTRESPONSE[s/6a8ca01c-7896-48e9-81cc-9f70661fcb32/$body._id/ s/"successful" : 2/"successful" : 1/]
239+
// TESTRESPONSE[s/W0tpsmIBdwcYyG50zbta/$body._id/ s/"successful" : 2/"successful" : 1/]
240240

241241
[float]
242242
[[index-routing]]

docs/reference/docs/reindex.asciidoc

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -161,12 +161,12 @@ POST _reindex
161161

162162
`index` and `type` in `source` can both be lists, allowing you to copy from
163163
lots of sources in one request. This will copy documents from the `_doc` and
164-
`post` types in the `twitter` and `blog` index. The copied documents would include the
165-
`post` type in the `twitter` index and the `_doc` type in the `blog` index. For more
164+
`post` types in the `twitter` and `blog` index. The copied documents would include the
165+
`post` type in the `twitter` index and the `_doc` type in the `blog` index. For more
166166
specific parameters, you can use `query`.
167167

168-
The Reindex API makes no effort to handle ID collisions. For such issues, the target index
169-
will remain valid, but it's not easy to predict which document will survive because
168+
The Reindex API makes no effort to handle ID collisions. For such issues, the target index
169+
will remain valid, but it's not easy to predict which document will survive because
170170
the iteration order isn't well defined.
171171

172172
[source,js]
@@ -666,9 +666,11 @@ executed again in order to conform to `requests_per_second`.
666666

667667
`failures`::
668668

669-
Array of all indexing failures. If this is non-empty then the request aborted
670-
because of those failures. See `conflicts` for how to prevent version conflicts
671-
from aborting the operation.
669+
Array of failures if there were any unrecoverable errors during the process. If
670+
this is non-empty then the request aborted because of those failures. Reindex
671+
is implemented using batches and any failure causes the entire process to abort
672+
but all failures in the current batch are collected into the array. You can use
673+
the `conflicts` option to prevent reindex from aborting on version conflicts.
672674

673675
[float]
674676
[[docs-reindex-task-api]]
@@ -1004,7 +1006,7 @@ number for most indices. If slicing manually or otherwise tuning
10041006
automatic slicing, use these guidelines.
10051007

10061008
Query performance is most efficient when the number of `slices` is equal to the
1007-
number of shards in the index. If that number is large (e.g. 500),
1009+
number of shards in the index. If that number is large (e.g. 500),
10081010
choose a lower number as too many `slices` will hurt performance. Setting
10091011
`slices` higher than the number of shards generally does not improve efficiency
10101012
and adds overhead.
@@ -1018,7 +1020,7 @@ documents being reindexed and cluster resources.
10181020
[float]
10191021
=== Reindex daily indices
10201022

1021-
You can use `_reindex` in combination with <<modules-scripting-painless, Painless>>
1023+
You can use `_reindex` in combination with <<modules-scripting-painless, Painless>>
10221024
to reindex daily indices to apply a new template to the existing documents.
10231025

10241026
Assuming you have indices consisting of documents as follows:

docs/reference/docs/update-by-query.asciidoc

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -338,9 +338,13 @@ executed again in order to conform to `requests_per_second`.
338338

339339
`failures`::
340340

341-
Array of all indexing failures. If this is non-empty then the request aborted
342-
because of those failures. See `conflicts` for how to prevent version conflicts
343-
from aborting the operation.
341+
Array of failures if there were any unrecoverable errors during the process. If
342+
this is non-empty then the request aborted because of those failures.
343+
Update-by-query is implemented using batches and any failure causes the entire
344+
process to abort but all failures in the current batch are collected into the
345+
array. You can use the `conflicts` option to prevent reindex from aborting on
346+
version conflicts.
347+
344348

345349

346350
[float]

docs/reference/glossary.asciidoc

Lines changed: 22 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,15 @@
6161
`object`. The mapping also allows you to define (amongst other things)
6262
how the value for a field should be analyzed.
6363

64+
[[glossary-filter]] filter ::
65+
66+
A filter is a non-scoring <<glossary-query,query>>, meaning that it does not score documents.
67+
It is only concerned about answering the question - "Does this document match?".
68+
The answer is always a simple, binary yes or no. This kind of query is said to be made
69+
in a <<query-filter-context,filter context>>,
70+
hence it is called a filter. Filters are simple checks for set inclusion or exclusion.
71+
In most cases, the goal of filtering is to reduce the number of documents that have to be examined.
72+
6473
[[glossary-index]] index ::
6574

6675
An index is like a _table_ in a relational database. It has a
@@ -105,6 +114,16 @@
105114
+
106115
See also <<glossary-routing,routing>>
107116

117+
[[glossary-query]] query ::
118+
119+
A query is the basic component of a search. A search can be defined by one or more queries
120+
which can be mixed and matched in endless combinations. While <<glossary-filter,filters>> are
121+
queries that only determine if a document matches, those queries that also calculate how well
122+
the document matches are known as "scoring queries". Those queries assign it a score, which is
123+
later used to sort matched documents. Scoring queries take more resources than <<glossary-filter,non scoring queries>>
124+
and their query results are not cacheable. As a general rule, use query clauses for full-text
125+
search or for any condition that requires scoring, and use filters for everything else.
126+
108127
[[glossary-replica-shard]] replica shard ::
109128

110129
Each <<glossary-primary-shard,primary shard>> can have zero or more
@@ -161,8 +180,9 @@
161180

162181
A term is an exact value that is indexed in Elasticsearch. The terms
163182
`foo`, `Foo`, `FOO` are NOT equivalent. Terms (i.e. exact values) can
164-
be searched for using _term_ queries. +
165-
See also <<glossary-text,text>> and <<glossary-analysis,analysis>>.
183+
be searched for using _term_ queries.
184+
+
185+
See also <<glossary-text,text>> and <<glossary-analysis,analysis>>.
166186

167187
[[glossary-text]] text ::
168188

docs/reference/how-to/search-speed.asciidoc

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -377,3 +377,19 @@ criteria called <<search-adaptive-replica,adaptive replica selection>> to select
377377
the best copy of the data based on response time, service time, and queue size
378378
of the node containing each copy of the shard. This can improve query throughput
379379
and reduce latency for search-heavy applications.
380+
381+
=== Tune your queries with the Profile API
382+
383+
You can also analyse how expensive each component of your queries and
384+
aggregations are using the {ref}/search-profile.html[Profile API]. This might
385+
allow you to tune your queries to be less expensive, resulting in a positive
386+
performance result and reduced load. Also note that Profile API payloads can be
387+
easily visualised for better readability in the
388+
{kibana-ref}/xpack-profiler.html[Search Profiler], which is a Kibana dev tools
389+
UI available in all X-Pack licenses, including the free X-Pack Basic license.
390+
391+
Some caveats to the Profile API are that:
392+
393+
- the Profile API as a debugging tool adds significant overhead to search execution and can also have a very verbose output
394+
- given the added overhead, the resulting took times are not reliable indicators of actual took time, but can be used comparatively between clauses for relative timing differences
395+
- the Profile API is best for exploring possible reasons behind the most costly clauses of a query but isn't intended for accurately measuring absolute timings of each clause

0 commit comments

Comments
 (0)