Skip to content

Commit 046118c

Browse files
committed
Merge branch 'master' into feature/aggregate-metrics
2 parents 9b77f77 + c24885d commit 046118c

File tree

122 files changed

+2611
-820
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

122 files changed

+2611
-820
lines changed

.ci/bwcVersions

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,8 @@ BWC_VERSION:
1717
- "7.6.0"
1818
- "7.6.1"
1919
- "7.6.2"
20+
- "7.7.0"
21+
- "7.7.1"
2022
- "7.8.0"
2123
- "7.9.0"
2224
- "8.0.0"

build.gradle

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ task updateCIBwcVersions() {
113113
File yml = file(".ci/bwcVersions")
114114
yml.text = ""
115115
yml << "BWC_VERSION:\n"
116-
versions.indexCompatible.each {
116+
BuildParams.bwcVersions.indexCompatible.each {
117117
yml << " - \"$it\"\n"
118118
}
119119
}

buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ValidateJsonAgainstSchemaTask.java

Lines changed: 1 addition & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -29,10 +29,8 @@
2929
import org.gradle.api.DefaultTask;
3030
import org.gradle.api.UncheckedIOException;
3131
import org.gradle.api.file.FileCollection;
32-
import org.gradle.api.tasks.Input;
3332
import org.gradle.api.tasks.InputFile;
3433
import org.gradle.api.tasks.InputFiles;
35-
import org.gradle.api.tasks.Optional;
3634
import org.gradle.api.tasks.OutputFile;
3735
import org.gradle.api.tasks.TaskAction;
3836
import org.gradle.work.ChangeType;
@@ -44,9 +42,7 @@
4442
import java.io.PrintWriter;
4543
import java.nio.file.Files;
4644
import java.nio.file.StandardOpenOption;
47-
import java.util.Arrays;
4845
import java.util.Collection;
49-
import java.util.HashSet;
5046
import java.util.LinkedHashMap;
5147
import java.util.LinkedHashSet;
5248
import java.util.Map;
@@ -59,7 +55,6 @@
5955
public class ValidateJsonAgainstSchemaTask extends DefaultTask {
6056

6157
private final ObjectMapper mapper = new ObjectMapper();
62-
private Set<String> ignore = new HashSet<>();
6358
private File jsonSchema;
6459
private FileCollection inputFiles;
6560

@@ -82,16 +77,6 @@ public void setJsonSchema(File jsonSchema) {
8277
this.jsonSchema = jsonSchema;
8378
}
8479

85-
@Input
86-
@Optional
87-
public Set<String> getIgnore() {
88-
return ignore;
89-
}
90-
91-
public void ignore(String... ignore) {
92-
this.ignore.addAll(Arrays.asList(ignore));
93-
}
94-
9580
@OutputFile
9681
public File getReport() {
9782
return new File(getProject().getBuildDir(), "reports/validateJson.txt");
@@ -110,9 +95,7 @@ public void validate(InputChanges inputChanges) throws IOException {
11095
.filter(f -> f.getChangeType() != ChangeType.REMOVED)
11196
.forEach(fileChange -> {
11297
File file = fileChange.getFile();
113-
if (ignore.contains(file.getName())) {
114-
getLogger().debug("Ignoring file [{}] due to configuration", file.getName());
115-
} else if (file.isDirectory() == false) {
98+
if (file.isDirectory() == false) {
11699
// validate all files and hold on to errors for a complete report if there are failures
117100
getLogger().debug("Validating JSON [{}]", file.getName());
118101
try {

buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/BwcVersions.java

Lines changed: 1 addition & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -19,14 +19,12 @@
1919
package org.elasticsearch.gradle;
2020

2121
import java.util.ArrayList;
22-
import java.util.Arrays;
2322
import java.util.Collection;
2423
import java.util.Collections;
2524
import java.util.HashMap;
2625
import java.util.HashSet;
2726
import java.util.List;
2827
import java.util.Map;
29-
import java.util.NoSuchElementException;
3028
import java.util.Set;
3129
import java.util.SortedSet;
3230
import java.util.TreeSet;
@@ -89,7 +87,6 @@ public class BwcVersions {
8987
private static final Pattern LINE_PATTERN = Pattern.compile(
9088
"\\W+public static final Version V_(\\d+)_(\\d+)_(\\d+)(_alpha\\d+|_beta\\d+|_rc\\d+)? .*"
9189
);
92-
private static final List<Version> IGNORED_VERSIONS = Arrays.asList(Version.fromString("7.7.0"));
9390

9491
private final Version currentVersion;
9592
private final Map<Integer, List<Version>> groupByMajor;
@@ -123,7 +120,6 @@ protected BwcVersions(List<String> versionLines, Version currentVersionProperty)
123120
Integer.parseInt(match.group(3))
124121
)
125122
)
126-
.filter(v -> !IGNORED_VERSIONS.contains(v)) // remove any specifically ignored versions
127123
.collect(Collectors.toCollection(TreeSet::new)),
128124
currentVersionProperty
129125
);
@@ -276,16 +272,7 @@ public List<Version> getUnreleased() {
276272
// we found that the previous minor is staged but not yet released
277273
// in this case, the minor before that has a bugfix, should there be such a minor
278274
if (greatestMinor >= 2) {
279-
int major = groupByMinor.values()
280-
.stream()
281-
.flatMap(Collection::stream)
282-
.findFirst()
283-
.map(Version::getMajor)
284-
.orElseThrow(NoSuchElementException::new);
285-
// Don't bother searching for a version we've ignored
286-
if (IGNORED_VERSIONS.contains(new Version(major, greatestMinor - 2, 0)) == false) {
287-
unreleased.add(getLatestVersionByKey(groupByMinor, greatestMinor - 2));
288-
}
275+
unreleased.add(getLatestVersionByKey(groupByMinor, greatestMinor - 2));
289276
}
290277
}
291278
}

client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -129,10 +129,10 @@
129129
import org.elasticsearch.search.aggregations.bucket.range.RangeAggregationBuilder;
130130
import org.elasticsearch.search.aggregations.bucket.sampler.InternalSampler;
131131
import org.elasticsearch.search.aggregations.bucket.sampler.ParsedSampler;
132-
import org.elasticsearch.search.aggregations.bucket.significant.ParsedSignificantLongTerms;
133-
import org.elasticsearch.search.aggregations.bucket.significant.ParsedSignificantStringTerms;
134-
import org.elasticsearch.search.aggregations.bucket.significant.SignificantLongTerms;
135-
import org.elasticsearch.search.aggregations.bucket.significant.SignificantStringTerms;
132+
import org.elasticsearch.search.aggregations.bucket.terms.ParsedSignificantLongTerms;
133+
import org.elasticsearch.search.aggregations.bucket.terms.ParsedSignificantStringTerms;
134+
import org.elasticsearch.search.aggregations.bucket.terms.SignificantLongTerms;
135+
import org.elasticsearch.search.aggregations.bucket.terms.SignificantStringTerms;
136136
import org.elasticsearch.search.aggregations.bucket.terms.DoubleTerms;
137137
import org.elasticsearch.search.aggregations.bucket.terms.LongTerms;
138138
import org.elasticsearch.search.aggregations.bucket.terms.ParsedDoubleTerms;

client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreRequest.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
import org.elasticsearch.common.xcontent.XContentBuilder;
2929
import org.elasticsearch.index.query.QueryBuilder;
3030
import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder;
31-
import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms;
31+
import org.elasticsearch.search.aggregations.bucket.terms.SignificantTerms;
3232
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator;
3333

3434
import java.io.IOException;

docs/reference/ilm/actions/ilm-allocate.asciidoc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,16 +7,16 @@ Phases allowed: warm, cold.
77
Updates the index settings to change which nodes are allowed to host the index shards
88
and change the number of replicas.
99

10+
The allocate action is not allowed in the hot phase.
11+
The initial allocation for the index must be done manually or via
12+
<<indices-templates, index templates>>.
13+
1014
You can configure this action to modify both the allocation rules and number of replicas,
1115
only the allocation rules, or only the number of replicas.
12-
1316
For more information about how {es} uses replicas for scaling, see
1417
<<scalability>>. See <<shard-allocation-filtering>> for more information about
1518
controlling where {es} allocates shards of a particular index.
1619

17-
NOTE: The allocate action is not allowed in the hot phase.
18-
The initial allocation for the index must be done manually or via index templates.
19-
{ilm-init} doesn't handle index allocation during the hot phase.
2020

2121
[[ilm-allocate-options]]
2222
==== Options

docs/reference/ilm/ilm-index-lifecycle.asciidoc

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,15 @@ the index must be older than the minimum age of the next phase.
3636
The minimum age defaults to zero, which causes {ilm-init} to move indices to the next phase
3737
as soon as all actions in the current phase complete.
3838

39+
If an index has unallocated shards and the <<cluster-health,cluster health status>> is yellow,
40+
the index can still transition to the next phase according to its {ilm} policy.
41+
However, because {es} can only perform certain clean up tasks on a green
42+
cluster, there might be unexpected side effects.
43+
44+
To avoid increased disk usage and reliability issues,
45+
address any cluster health problems in a timely fashion.
46+
47+
3948
[discrete]
4049
[[ilm-phase-execution]]
4150
=== Phase execution

docs/reference/ilm/ilm-tutorial.asciidoc

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,7 @@
1010
This tutorial demonstrates how to use {ilm}
1111
({ilm-init}) to manage indices that contain time-series data.
1212

13-
When you continuously index timestamped documents into {es} using
14-
Filebeat, Logstash, or some other mechanism,
13+
When you continuously index timestamped documents into {es},
1514
you typically use an index alias so you can periodically roll over to a new index.
1615
This enables you to implement a hot-warm-cold architecture to meet your performance
1716
requirements for your newest data, control costs over time, enforce retention policies,
@@ -28,10 +27,12 @@ as expected.
2827

2928
For an introduction to rolling indices, see <<index-rollover>>.
3029

31-
NOTE: {filebeat} includes a default {ilm-init} policy that initiates the rollover action when
32-
the index size reaches 50GB or becomes 30 days old.
33-
You can use this policy as a starting point, or replace it with a custom policy.
34-
See {kibana-ref}/example-using-index-lifecycle-policy.html[Use {ilm-init} to manage Filebeat time-based indices].
30+
IMPORTANT: When you enable {ilm} for {beats} or the {ls} {es} output plugin,
31+
lifecycle policies are set up automatically.
32+
You do not need bootstrap the initial index or take any other actions.
33+
You can modify the default policies through
34+
{kibana-ref}/example-using-index-lifecycle-policy.html[{kib} Management]
35+
or the {ilm-init} APIs.
3536

3637

3738
[discrete]

docs/reference/ilm/index.asciidoc

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,10 @@ For example, you could use {ilm-init} to:
1313
* Create a new index each day, week, or month and archive previous ones
1414
* Delete stale indices to enforce data retention standards
1515

16+
When you enable {ilm} for {beats} or the {ls} {es} output plugin,
17+
{ilm-init} is configured automatically.
18+
You can modify the default policies through {kib} Management or the {ilm-init} APIs.
19+
1620
[TIP]
1721
To automatically back up your indices and manage snapshots,
1822
use <<getting-started-snapshot-lifecycle-management,snapshot lifecycle policies>>.
@@ -48,4 +52,3 @@ include::using-policies-rollover.asciidoc[]
4852
include::ilm-with-existing-indices.asciidoc[]
4953

5054
include::ilm-and-snapshots.asciidoc[]
51-

docs/reference/ilm/set-up-lifecycle-policy.asciidoc

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,12 @@ you create the policy and add it to the index template.
1212
To use a policy to manage an index that doesn't roll over,
1313
you can specify the policy directly when you create it.
1414

15+
IMPORTANT: When you enable {ilm} for {beats} or the {ls} {es} output plugin,
16+
the necessary policies and configuration changes are applied automatically.
17+
You can modify the default policies, but you do not need to explicitly configure a policy or
18+
bootstrap an initial index.
19+
20+
1521
[discrete]
1622
[[ilm-create-policy]]
1723
=== Create lifecycle policy

docs/reference/indices/analyze.asciidoc

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ the `analyzer` or `<field>` parameter overrides this value.
4444
If no analyzer or field are specified,
4545
the analyze API uses the default analyzer for the index.
4646

47-
If no index is specified
47+
If no index is specified
4848
or the index does not have a default analyzer,
4949
the analyze API uses the <<analysis-standard-analyzer,standard analyzer>>.
5050
--
@@ -56,11 +56,9 @@ the analyze API uses the <<analysis-standard-analyzer,standard analyzer>>.
5656
`analyzer`::
5757
+
5858
--
59-
(Optional, string or <<analysis-custom-analyzer,custom analyzer object>>)
60-
Analyzer used to analyze for the provided `text`.
61-
62-
See <<analysis-analyzers>> for a list of built-in analyzers.
63-
You can also provide a <<analysis-custom-analyzer,custom analyzer>>.
59+
(Optional, string)
60+
The name of the analyzer that should be applied to the provided `text`. This could be a
61+
<<analysis-analyzers, built-in analyzer>>, or an analyzer that's been configured in the index.
6462

6563
If this parameter is not specified,
6664
the analyze API uses the analyzer defined in the field's mapping.
@@ -187,8 +185,6 @@ GET /_analyze
187185
}
188186
--------------------------------------------------
189187

190-
deprecated[5.0.0, Use `filter`/`char_filter` instead of `filters`/`char_filters` and `token_filters` has been removed]
191-
192188
Custom tokenizers, token filters, and character filters can be specified in the request body as follows:
193189

194190
[source,console]

modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,7 @@ public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower
275275
}
276276
hi = Math.round(Math.floor(dValue));
277277
}
278-
Query query = NumberFieldMapper.NumberType.LONG.rangeQuery(name(), lo, hi, true, true, hasDocValues());
278+
Query query = NumberFieldMapper.NumberType.LONG.rangeQuery(name(), lo, hi, true, true, hasDocValues(), context);
279279
if (boost() != 1f) {
280280
query = new BoostQuery(query, boost());
281281
}

modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -117,8 +117,8 @@ public void testRangeQuery() throws IOException {
117117
Double u = randomBoolean() ? null : (randomDouble() * 2 - 1) * 10000;
118118
boolean includeLower = randomBoolean();
119119
boolean includeUpper = randomBoolean();
120-
Query doubleQ = NumberFieldMapper.NumberType.DOUBLE.rangeQuery("double", l, u, includeLower, includeUpper, false);
121-
Query scaledFloatQ = ft.rangeQuery(l, u, includeLower, includeUpper, null);
120+
Query doubleQ = NumberFieldMapper.NumberType.DOUBLE.rangeQuery("double", l, u, includeLower, includeUpper, false, MOCK_QSC);
121+
Query scaledFloatQ = ft.rangeQuery(l, u, includeLower, includeUpper, MOCK_QSC);
122122
assertEquals(searcher.count(doubleQ), searcher.count(scaledFloatQ));
123123
}
124124
IOUtils.close(reader, dir);
@@ -128,37 +128,37 @@ public void testRoundsUpperBoundCorrectly() {
128128
ScaledFloatFieldMapper.ScaledFloatFieldType ft = new ScaledFloatFieldMapper.ScaledFloatFieldType();
129129
ft.setName("scaled_float");
130130
ft.setScalingFactor(100.0);
131-
Query scaledFloatQ = ft.rangeQuery(null, 0.1, true, false, null);
131+
Query scaledFloatQ = ft.rangeQuery(null, 0.1, true, false, MOCK_QSC);
132132
assertEquals("scaled_float:[-9223372036854775808 TO 9]", scaledFloatQ.toString());
133-
scaledFloatQ = ft.rangeQuery(null, 0.1, true, true, null);
133+
scaledFloatQ = ft.rangeQuery(null, 0.1, true, true, MOCK_QSC);
134134
assertEquals("scaled_float:[-9223372036854775808 TO 10]", scaledFloatQ.toString());
135-
scaledFloatQ = ft.rangeQuery(null, 0.095, true, false, null);
135+
scaledFloatQ = ft.rangeQuery(null, 0.095, true, false, MOCK_QSC);
136136
assertEquals("scaled_float:[-9223372036854775808 TO 9]", scaledFloatQ.toString());
137-
scaledFloatQ = ft.rangeQuery(null, 0.095, true, true, null);
137+
scaledFloatQ = ft.rangeQuery(null, 0.095, true, true, MOCK_QSC);
138138
assertEquals("scaled_float:[-9223372036854775808 TO 9]", scaledFloatQ.toString());
139-
scaledFloatQ = ft.rangeQuery(null, 0.105, true, false, null);
139+
scaledFloatQ = ft.rangeQuery(null, 0.105, true, false, MOCK_QSC);
140140
assertEquals("scaled_float:[-9223372036854775808 TO 10]", scaledFloatQ.toString());
141-
scaledFloatQ = ft.rangeQuery(null, 0.105, true, true, null);
141+
scaledFloatQ = ft.rangeQuery(null, 0.105, true, true, MOCK_QSC);
142142
assertEquals("scaled_float:[-9223372036854775808 TO 10]", scaledFloatQ.toString());
143-
scaledFloatQ = ft.rangeQuery(null, 79.99, true, true, null);
143+
scaledFloatQ = ft.rangeQuery(null, 79.99, true, true, MOCK_QSC);
144144
assertEquals("scaled_float:[-9223372036854775808 TO 7999]", scaledFloatQ.toString());
145145
}
146146

147147
public void testRoundsLowerBoundCorrectly() {
148148
ScaledFloatFieldMapper.ScaledFloatFieldType ft = new ScaledFloatFieldMapper.ScaledFloatFieldType();
149149
ft.setName("scaled_float");
150150
ft.setScalingFactor(100.0);
151-
Query scaledFloatQ = ft.rangeQuery(-0.1, null, false, true, null);
151+
Query scaledFloatQ = ft.rangeQuery(-0.1, null, false, true, MOCK_QSC);
152152
assertEquals("scaled_float:[-9 TO 9223372036854775807]", scaledFloatQ.toString());
153-
scaledFloatQ = ft.rangeQuery(-0.1, null, true, true, null);
153+
scaledFloatQ = ft.rangeQuery(-0.1, null, true, true, MOCK_QSC);
154154
assertEquals("scaled_float:[-10 TO 9223372036854775807]", scaledFloatQ.toString());
155-
scaledFloatQ = ft.rangeQuery(-0.095, null, false, true, null);
155+
scaledFloatQ = ft.rangeQuery(-0.095, null, false, true, MOCK_QSC);
156156
assertEquals("scaled_float:[-9 TO 9223372036854775807]", scaledFloatQ.toString());
157-
scaledFloatQ = ft.rangeQuery(-0.095, null, true, true, null);
157+
scaledFloatQ = ft.rangeQuery(-0.095, null, true, true, MOCK_QSC);
158158
assertEquals("scaled_float:[-9 TO 9223372036854775807]", scaledFloatQ.toString());
159-
scaledFloatQ = ft.rangeQuery(-0.105, null, false, true, null);
159+
scaledFloatQ = ft.rangeQuery(-0.105, null, false, true, MOCK_QSC);
160160
assertEquals("scaled_float:[-10 TO 9223372036854775807]", scaledFloatQ.toString());
161-
scaledFloatQ = ft.rangeQuery(-0.105, null, true, true, null);
161+
scaledFloatQ = ft.rangeQuery(-0.105, null, true, true, MOCK_QSC);
162162
assertEquals("scaled_float:[-10 TO 9223372036854775807]", scaledFloatQ.toString());
163163
}
164164

0 commit comments

Comments
 (0)