diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f1f78910e9a4d..b8f4262ca5ce8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -209,6 +209,14 @@ Before submitting your changes, run the test suite to make sure that nothing is ./gradlew check ``` +If your changes affect only the documentation, run: + +```sh +./gradlew -p docs check +``` +For more information about testing code examples in the documentation, see +https://github.com/elastic/elasticsearch/blob/master/docs/README.asciidoc + ### Project layout This repository is split into many top level directories. The most important diff --git a/buildSrc/version.properties b/buildSrc/version.properties index c98e265792b5b..6009021da14ed 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 7.0.0-alpha1 -lucene = 7.4.0-snapshot-cc2ee23050 +lucene = 7.4.0-snapshot-1cbadda4d3 # optional dependencies spatial4j = 0.7 diff --git a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/transport/TransportClientBenchmark.java b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/transport/TransportClientBenchmark.java index d2aee2251a67b..c9ab38fe35545 100644 --- a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/transport/TransportClientBenchmark.java +++ b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/transport/TransportClientBenchmark.java @@ -79,7 +79,7 @@ private static final class TransportBulkRequestExecutor implements BulkRequestEx @Override public boolean bulkIndex(List bulkData) { - NoopBulkRequestBuilder builder = NoopBulkAction.INSTANCE.newRequestBuilder(client); + NoopBulkRequestBuilder builder = new NoopBulkRequestBuilder(client,NoopBulkAction.INSTANCE); for (String bulkItem : bulkData) { builder.add(new IndexRequest(indexName, typeName).source(bulkItem.getBytes(StandardCharsets.UTF_8), XContentType.JSON)); } @@ -108,7 +108,7 @@ private TransportSearchRequestExecutor(TransportClient client, String indexName) @Override public boolean search(String source) { final SearchResponse response; - NoopSearchRequestBuilder builder = NoopSearchAction.INSTANCE.newRequestBuilder(client); + NoopSearchRequestBuilder builder = new NoopSearchRequestBuilder(client, NoopSearchAction.INSTANCE); try { builder.setIndices(indexName); builder.setQuery(QueryBuilders.wrapperQuery(source)); diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkAction.java index 7f5ec6edd8e49..73678b2f5ea65 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkAction.java @@ -21,9 +21,8 @@ import org.elasticsearch.action.Action; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.client.ElasticsearchClient; -public class NoopBulkAction extends Action { +public class NoopBulkAction extends Action { public static final String NAME = "mock:data/write/bulk"; public static final NoopBulkAction INSTANCE = new NoopBulkAction(); @@ -32,11 +31,6 @@ private NoopBulkAction() { super(NAME); } - @Override - public NoopBulkRequestBuilder newRequestBuilder(ElasticsearchClient client) { - return new NoopBulkRequestBuilder(client, this); - } - @Override public BulkResponse newResponse() { return new BulkResponse(null, 0); diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkRequestBuilder.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkRequestBuilder.java index 468152a88df3b..af97921bc6007 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkRequestBuilder.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkRequestBuilder.java @@ -35,7 +35,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; -public class NoopBulkRequestBuilder extends ActionRequestBuilder +public class NoopBulkRequestBuilder extends ActionRequestBuilder implements WriteRequestBuilder { public NoopBulkRequestBuilder(ElasticsearchClient client, NoopBulkAction action) { diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java index b24190b6946d0..ca2c3d9adfc41 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java @@ -21,9 +21,8 @@ import org.elasticsearch.action.Action; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.client.ElasticsearchClient; -public class NoopSearchAction extends Action { +public class NoopSearchAction extends Action { public static final NoopSearchAction INSTANCE = new NoopSearchAction(); public static final String NAME = "mock:data/read/search"; @@ -31,11 +30,6 @@ public NoopSearchAction() { super(NAME); } - @Override - public NoopSearchRequestBuilder newRequestBuilder(ElasticsearchClient client) { - return new NoopSearchRequestBuilder(client, this); - } - @Override public SearchResponse newResponse() { return new SearchResponse(); diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchRequestBuilder.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchRequestBuilder.java index acba314f926b8..e73edb143e0d0 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchRequestBuilder.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchRequestBuilder.java @@ -42,7 +42,7 @@ import java.util.Arrays; import java.util.List; -public class NoopSearchRequestBuilder extends ActionRequestBuilder { +public class NoopSearchRequestBuilder extends ActionRequestBuilder { public NoopSearchRequestBuilder(ElasticsearchClient client, NoopSearchAction action) { super(client, action, new SearchRequest()); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java index 0f9e9e582263c..846f29bfb6ef9 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java @@ -21,8 +21,6 @@ import org.apache.http.Header; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; -import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.action.ingest.PutPipelineRequest; @@ -68,28 +66,6 @@ public void putSettingsAsync(ClusterUpdateSettingsRequest clusterUpdateSettingsR ClusterUpdateSettingsResponse::fromXContent, listener, emptySet(), headers); } - /** - * Get current tasks using the Task Management API - *

- * See - * Task Management API on elastic.co - */ - public ListTasksResponse listTasks(ListTasksRequest request, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::listTasks, ListTasksResponse::fromXContent, - emptySet(), headers); - } - - /** - * Asynchronously get current tasks using the Task Management API - *

- * See - * Task Management API on elastic.co - */ - public void listTasksAsync(ListTasksRequest request, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::listTasks, ListTasksResponse::fromXContent, - listener, emptySet(), headers); - } - /** * Add a pipeline or update an existing pipeline in the cluster *

diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index 5aa64a5c1375e..b08b045d287c0 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -34,6 +34,7 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; @@ -269,6 +270,28 @@ public void flushAsync(FlushRequest flushRequest, ActionListener listener, emptySet(), headers); } + /** Initiate a synced flush manually using the synced flush API + *

+ * See + * Synced flush API on elastic.co + */ + public SyncedFlushResponse flushSynced(SyncedFlushRequest syncedFlushRequest, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(syncedFlushRequest, RequestConverters::flushSynced, + SyncedFlushResponse::fromXContent, emptySet(), headers); + } + + /** + * Asynchronously initiate a synced flush manually using the synced flush API + *

+ * See + * Synced flush API on elastic.co + */ + public void flushSyncedAsync(SyncedFlushRequest syncedFlushRequest, ActionListener listener, Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(syncedFlushRequest, RequestConverters::flushSynced, + SyncedFlushResponse::fromXContent, listener, emptySet(), headers); + } + + /** * Retrieve the settings of one or more indices *

diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 6126d59b16a71..435381774b0c3 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -33,6 +33,7 @@ import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; @@ -41,6 +42,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; @@ -211,6 +213,14 @@ static Request flush(FlushRequest flushRequest) { return request; } + static Request flushSynced(SyncedFlushRequest syncedFlushRequest) { + String[] indices = syncedFlushRequest.indices() == null ? Strings.EMPTY_ARRAY : syncedFlushRequest.indices(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint(indices, "_flush/synced")); + Params parameters = new Params(request); + parameters.withIndicesOptions(syncedFlushRequest.indicesOptions()); + return request; + } + static Request forceMerge(ForceMergeRequest forceMergeRequest) { String[] indices = forceMergeRequest.indices() == null ? Strings.EMPTY_ARRAY : forceMergeRequest.indices(); Request request = new Request(HttpPost.METHOD_NAME, endpoint(indices, "_forcemerge")); @@ -738,6 +748,19 @@ static Request deleteRepository(DeleteRepositoryRequest deleteRepositoryRequest) return request; } + static Request verifyRepository(VerifyRepositoryRequest verifyRepositoryRequest) { + String endpoint = new EndpointBuilder().addPathPartAsIs("_snapshot") + .addPathPart(verifyRepositoryRequest.name()) + .addPathPartAsIs("_verify") + .build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + + Params parameters = new Params(request); + parameters.withMasterTimeout(verifyRepositoryRequest.masterNodeTimeout()); + parameters.withTimeout(verifyRepositoryRequest.timeout()); + return request; + } + static Request putTemplate(PutIndexTemplateRequest putIndexTemplateRequest) throws IOException { String endpoint = new EndpointBuilder().addPathPartAsIs("_template").addPathPart(putIndexTemplateRequest.name()).build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 68e32abb69dc0..fc74a43dd8038 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -192,6 +192,7 @@ public class RestHighLevelClient implements Closeable { private final IndicesClient indicesClient = new IndicesClient(this); private final ClusterClient clusterClient = new ClusterClient(this); private final SnapshotClient snapshotClient = new SnapshotClient(this); + private final TasksClient tasksClient = new TasksClient(this); /** * Creates a {@link RestHighLevelClient} given the low level {@link RestClientBuilder} that allows to build the @@ -264,11 +265,31 @@ public final SnapshotClient snapshot() { return snapshotClient; } + /** + * Provides a {@link TasksClient} which can be used to access the Tasks API. + * + * See Task Management API on elastic.co + */ + public final TasksClient tasks() { + return tasksClient; + } + + /** + * Executes a bulk request using the Bulk API + * + * See Bulk API on elastic.co + */ + public final BulkResponse bulk(BulkRequest bulkRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity(bulkRequest, RequestConverters::bulk, options, BulkResponse::fromXContent, emptySet()); + } + /** * Executes a bulk request using the Bulk API * * See Bulk API on elastic.co + * @deprecated Prefer {@link #bulk(BulkRequest, RequestOptions)} */ + @Deprecated public final BulkResponse bulk(BulkRequest bulkRequest, Header... headers) throws IOException { return performRequestAndParseEntity(bulkRequest, RequestConverters::bulk, BulkResponse::fromXContent, emptySet(), headers); } @@ -278,6 +299,17 @@ public final BulkResponse bulk(BulkRequest bulkRequest, Header... headers) throw * * See Bulk API on elastic.co */ + public final void bulkAsync(BulkRequest bulkRequest, RequestOptions options, ActionListener listener) { + performRequestAsyncAndParseEntity(bulkRequest, RequestConverters::bulk, options, BulkResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously executes a bulk request using the Bulk API + * + * See Bulk API on elastic.co + * @deprecated Prefer {@link #bulkAsync(BulkRequest, RequestOptions, ActionListener)} + */ + @Deprecated public final void bulkAsync(BulkRequest bulkRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(bulkRequest, RequestConverters::bulk, BulkResponse::fromXContent, listener, emptySet(), headers); } @@ -574,6 +606,7 @@ public final void fieldCapsAsync(FieldCapabilitiesRequest fieldCapabilitiesReque FieldCapabilitiesResponse::fromXContent, listener, emptySet(), headers); } + @Deprecated protected final Resp performRequestAndParseEntity(Req request, CheckedFunction requestConverter, CheckedFunction entityParser, @@ -581,16 +614,34 @@ protected final Resp performRequestAndParseEnt return performRequest(request, requestConverter, (response) -> parseEntity(response.getEntity(), entityParser), ignores, headers); } + protected final Resp performRequestAndParseEntity(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction entityParser, + Set ignores) throws IOException { + return performRequest(request, requestConverter, options, + response -> parseEntity(response.getEntity(), entityParser), ignores); + } + + @Deprecated protected final Resp performRequest(Req request, CheckedFunction requestConverter, CheckedFunction responseConverter, Set ignores, Header... headers) throws IOException { + return performRequest(request, requestConverter, optionsForHeaders(headers), responseConverter, ignores); + } + + protected final Resp performRequest(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction responseConverter, + Set ignores) throws IOException { ActionRequestValidationException validationException = request.validate(); if (validationException != null) { throw validationException; } Request req = requestConverter.apply(request); - addHeaders(req, headers); + req.setOptions(options); Response response; try { response = client.performRequest(req); @@ -616,6 +667,7 @@ protected final Resp performRequest(Req reques } } + @Deprecated protected final void performRequestAsyncAndParseEntity(Req request, CheckedFunction requestConverter, CheckedFunction entityParser, @@ -624,10 +676,28 @@ protected final void performRequestAsyncAndPar listener, ignores, headers); } + protected final void performRequestAsyncAndParseEntity(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction entityParser, + ActionListener listener, Set ignores) { + performRequestAsync(request, requestConverter, options, + response -> parseEntity(response.getEntity(), entityParser), listener, ignores); + } + + @Deprecated protected final void performRequestAsync(Req request, CheckedFunction requestConverter, CheckedFunction responseConverter, ActionListener listener, Set ignores, Header... headers) { + performRequestAsync(request, requestConverter, optionsForHeaders(headers), responseConverter, listener, ignores); + } + + protected final void performRequestAsync(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction responseConverter, + ActionListener listener, Set ignores) { ActionRequestValidationException validationException = request.validate(); if (validationException != null) { listener.onFailure(validationException); @@ -640,19 +710,12 @@ protected final void performRequestAsync(Req r listener.onFailure(e); return; } - addHeaders(req, headers); + req.setOptions(options); ResponseListener responseListener = wrapResponseListener(responseConverter, listener, ignores); client.performRequestAsync(req, responseListener); } - private static void addHeaders(Request request, Header... headers) { - Objects.requireNonNull(headers, "headers cannot be null"); - for (Header header : headers) { - request.addHeader(header.getName(), header.getValue()); - } - } - final ResponseListener wrapResponseListener(CheckedFunction responseConverter, ActionListener actionListener, Set ignores) { return new ResponseListener() { @@ -736,6 +799,15 @@ protected final Resp parseEntity(final HttpEntity entity, } } + private static RequestOptions optionsForHeaders(Header[] headers) { + RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); + for (Header header : headers) { + Objects.requireNonNull(header, "header cannot be null"); + options.addHeader(header.getName(), header.getValue()); + } + return options.build(); + } + static boolean convertExistsResponse(Response response) { return response.getStatusLine().getStatusCode() == 200; } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java index d969232f0d70f..104bc91271148 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java @@ -27,6 +27,8 @@ import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; +import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; import java.io.IOException; @@ -116,4 +118,28 @@ public void deleteRepositoryAsync(DeleteRepositoryRequest deleteRepositoryReques restHighLevelClient.performRequestAsyncAndParseEntity(deleteRepositoryRequest, RequestConverters::deleteRepository, DeleteRepositoryResponse::fromXContent, listener, emptySet(), headers); } + + /** + * Verifies a snapshot repository. + *

+ * See Snapshot and Restore + * API on elastic.co + */ + public VerifyRepositoryResponse verifyRepository(VerifyRepositoryRequest verifyRepositoryRequest, Header... headers) + throws IOException { + return restHighLevelClient.performRequestAndParseEntity(verifyRepositoryRequest, RequestConverters::verifyRepository, + VerifyRepositoryResponse::fromXContent, emptySet(), headers); + } + + /** + * Asynchronously verifies a snapshot repository. + *

+ * See Snapshot and Restore + * API on elastic.co + */ + public void verifyRepositoryAsync(VerifyRepositoryRequest verifyRepositoryRequest, + ActionListener listener, Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(verifyRepositoryRequest, RequestConverters::verifyRepository, + VerifyRepositoryResponse::fromXContent, listener, emptySet(), headers); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SyncedFlushResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SyncedFlushResponse.java new file mode 100644 index 0000000000000..53f3f3358ba2f --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SyncedFlushResponse.java @@ -0,0 +1,344 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentLocation; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; + +import java.io.IOException; +import java.util.Map; +import java.util.HashMap; +import java.util.Collections; +import java.util.List; +import java.util.ArrayList; + +public class SyncedFlushResponse extends ActionResponse implements ToXContentFragment { + + public static final String SHARDS_FIELD = "_shards"; + + private ShardCounts totalCounts; + private Map indexResults; + + SyncedFlushResponse(ShardCounts totalCounts, Map indexResults) { + this.totalCounts = new ShardCounts(totalCounts.total, totalCounts.successful, totalCounts.failed); + this.indexResults = Collections.unmodifiableMap(indexResults); + } + + /** + * @return The total number of shard copies that were processed across all indexes + */ + public int totalShards() { + return totalCounts.total; + } + + /** + * @return The number of successful shard copies that were processed across all indexes + */ + public int successfulShards() { + return totalCounts.successful; + } + + /** + * @return The number of failed shard copies that were processed across all indexes + */ + public int failedShards() { + return totalCounts.failed; + } + + /** + * @return A map of results for each index where the keys of the map are the index names + * and the values are the results encapsulated in {@link IndexResult}. + */ + public Map getIndexResults() { + return indexResults; + } + + ShardCounts getShardCounts() { + return totalCounts; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(SHARDS_FIELD); + totalCounts.toXContent(builder, params); + builder.endObject(); + for (Map.Entry entry: indexResults.entrySet()) { + String indexName = entry.getKey(); + IndexResult indexResult = entry.getValue(); + builder.startObject(indexName); + indexResult.toXContent(builder, params); + builder.endObject(); + } + return builder; + } + + public static SyncedFlushResponse fromXContent(XContentParser parser) throws IOException { + ensureExpectedToken(Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); + ShardCounts totalCounts = null; + Map indexResults = new HashMap<>(); + XContentLocation startLoc = parser.getTokenLocation(); + while (parser.nextToken().equals(Token.FIELD_NAME)) { + if (parser.currentName().equals(SHARDS_FIELD)) { + ensureExpectedToken(Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); + totalCounts = ShardCounts.fromXContent(parser); + } else { + String indexName = parser.currentName(); + IndexResult indexResult = IndexResult.fromXContent(parser); + indexResults.put(indexName, indexResult); + } + } + if (totalCounts != null) { + return new SyncedFlushResponse(totalCounts, indexResults); + } else { + throw new ParsingException( + startLoc, + "Unable to reconstruct object. Total counts for shards couldn't be parsed." + ); + } + } + + /** + * Encapsulates the number of total successful and failed shard copies + */ + public static final class ShardCounts implements ToXContentFragment { + + public static final String TOTAL_FIELD = "total"; + public static final String SUCCESSFUL_FIELD = "successful"; + public static final String FAILED_FIELD = "failed"; + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>( + "shardcounts", + a -> new ShardCounts((Integer) a[0], (Integer) a[1], (Integer) a[2]) + ); + static { + PARSER.declareInt(constructorArg(), new ParseField(TOTAL_FIELD)); + PARSER.declareInt(constructorArg(), new ParseField(SUCCESSFUL_FIELD)); + PARSER.declareInt(constructorArg(), new ParseField(FAILED_FIELD)); + } + + private int total; + private int successful; + private int failed; + + + ShardCounts(int total, int successful, int failed) { + this.total = total; + this.successful = successful; + this.failed = failed; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(TOTAL_FIELD, total); + builder.field(SUCCESSFUL_FIELD, successful); + builder.field(FAILED_FIELD, failed); + return builder; + } + + public static ShardCounts fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + public boolean equals(ShardCounts other) { + if (other != null) { + return + other.total == this.total && + other.successful == this.successful && + other.failed == this.failed; + } else { + return false; + } + } + + } + + /** + * Description for the flush/synced results for a particular index. + * This includes total, successful and failed copies along with failure description for each failed copy. + */ + public static final class IndexResult implements ToXContentFragment { + + public static final String TOTAL_FIELD = "total"; + public static final String SUCCESSFUL_FIELD = "successful"; + public static final String FAILED_FIELD = "failed"; + public static final String FAILURES_FIELD = "failures"; + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>( + "indexresult", + a -> new IndexResult((Integer) a[0], (Integer) a[1], (Integer) a[2], (List)a[3]) + ); + static { + PARSER.declareInt(constructorArg(), new ParseField(TOTAL_FIELD)); + PARSER.declareInt(constructorArg(), new ParseField(SUCCESSFUL_FIELD)); + PARSER.declareInt(constructorArg(), new ParseField(FAILED_FIELD)); + PARSER.declareObjectArray(optionalConstructorArg(), ShardFailure.PARSER, new ParseField(FAILURES_FIELD)); + } + + private ShardCounts counts; + private List failures; + + IndexResult(int total, int successful, int failed, List failures) { + counts = new ShardCounts(total, successful, failed); + if (failures != null) { + this.failures = Collections.unmodifiableList(failures); + } else { + this.failures = Collections.unmodifiableList(new ArrayList<>()); + } + } + + /** + * @return The total number of shard copies that were processed for this index. + */ + public int totalShards() { + return counts.total; + } + + /** + * @return The number of successful shard copies that were processed for this index. + */ + public int successfulShards() { + return counts.successful; + } + + /** + * @return The number of failed shard copies that were processed for this index. + */ + public int failedShards() { + return counts.failed; + } + + /** + * @return A list of {@link ShardFailure} objects that describe each of the failed shard copies for this index. + */ + public List failures() { + return failures; + } + + ShardCounts getShardCounts() { + return counts; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + counts.toXContent(builder, params); + if (failures.size() > 0) { + builder.startArray(FAILURES_FIELD); + for (ShardFailure failure : failures) { + failure.toXContent(builder, params); + } + builder.endArray(); + } + return builder; + } + + public static IndexResult fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + } + + /** + * Description of a failed shard copy for an index. + */ + public static final class ShardFailure implements ToXContentFragment { + + public static String SHARD_ID_FIELD = "shard"; + public static String FAILURE_REASON_FIELD = "reason"; + public static String ROUTING_FIELD = "routing"; + + private int shardId; + private String failureReason; + private Map routing; + + @SuppressWarnings("unchecked") + static ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "shardfailure", + a -> new ShardFailure((Integer)a[0], (String)a[1], (Map)a[2]) + ); + static { + PARSER.declareInt(constructorArg(), new ParseField(SHARD_ID_FIELD)); + PARSER.declareString(constructorArg(), new ParseField(FAILURE_REASON_FIELD)); + PARSER.declareObject( + optionalConstructorArg(), + (parser, c) -> parser.map(), + new ParseField(ROUTING_FIELD) + ); + } + + ShardFailure(int shardId, String failureReason, Map routing) { + this.shardId = shardId; + this.failureReason = failureReason; + if (routing != null) { + this.routing = Collections.unmodifiableMap(routing); + } else { + this.routing = Collections.unmodifiableMap(new HashMap<>()); + } + } + + /** + * @return Id of the shard whose copy failed + */ + public int getShardId() { + return shardId; + } + + /** + * @return Reason for failure of the shard copy + */ + public String getFailureReason() { + return failureReason; + } + + /** + * @return Additional information about the failure. + */ + public Map getRouting() { + return routing; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(SHARD_ID_FIELD, shardId); + builder.field(FAILURE_REASON_FIELD, failureReason); + if (routing.size() > 0) { + builder.field(ROUTING_FIELD, routing); + } + builder.endObject(); + return builder; + } + + public static ShardFailure fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java new file mode 100644 index 0000000000000..214f1e7884a2a --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.Header; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; + +import java.io.IOException; + +import static java.util.Collections.emptySet; + +/** + * A wrapper for the {@link RestHighLevelClient} that provides methods for accessing the Tasks API. + *

+ * See Task Management API on elastic.co + */ +public class TasksClient { + private final RestHighLevelClient restHighLevelClient; + + TasksClient(RestHighLevelClient restHighLevelClient) { + this.restHighLevelClient = restHighLevelClient; + } + + /** + * Get current tasks using the Task Management API + *

+ * See + * Task Management API on elastic.co + */ + public ListTasksResponse list(ListTasksRequest request, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::listTasks, ListTasksResponse::fromXContent, + emptySet(), headers); + } + + /** + * Asynchronously get current tasks using the Task Management API + *

+ * See + * Task Management API on elastic.co + */ + public void listAsync(ListTasksRequest request, ActionListener listener, Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::listTasks, ListTasksResponse::fromXContent, + listener, emptySet(), headers); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java index d41117ceb6dd6..44332b058bc15 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java @@ -20,9 +20,6 @@ package org.elasticsearch.client; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; -import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; -import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.action.ingest.PutPipelineRequest; @@ -37,16 +34,13 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.ingest.Pipeline; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.tasks.TaskInfo; import java.io.IOException; import java.util.HashMap; import java.util.Map; -import static java.util.Collections.emptyList; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -117,31 +111,6 @@ public void testClusterUpdateSettingNonExistent() { "Elasticsearch exception [type=illegal_argument_exception, reason=transient setting [" + setting + "], not recognized]")); } - public void testListTasks() throws IOException { - ListTasksRequest request = new ListTasksRequest(); - ListTasksResponse response = execute(request, highLevelClient().cluster()::listTasks, highLevelClient().cluster()::listTasksAsync); - - assertThat(response, notNullValue()); - assertThat(response.getNodeFailures(), equalTo(emptyList())); - assertThat(response.getTaskFailures(), equalTo(emptyList())); - // It's possible that there are other tasks except 'cluster:monitor/tasks/lists[n]' and 'action":"cluster:monitor/tasks/lists' - assertThat(response.getTasks().size(), greaterThanOrEqualTo(2)); - boolean listTasksFound = false; - for (TaskGroup taskGroup : response.getTaskGroups()) { - TaskInfo parent = taskGroup.getTaskInfo(); - if ("cluster:monitor/tasks/lists".equals(parent.getAction())) { - assertThat(taskGroup.getChildTasks().size(), equalTo(1)); - TaskGroup childGroup = taskGroup.getChildTasks().iterator().next(); - assertThat(childGroup.getChildTasks().isEmpty(), equalTo(true)); - TaskInfo child = childGroup.getTaskInfo(); - assertThat(child.getAction(), equalTo("cluster:monitor/tasks/lists[n]")); - assertThat(child.getParentTaskId(), equalTo(parent.getTaskId())); - listTasksFound = true; - } - } - assertTrue("List tasks were not found", listTasksFound); - } - public void testPutPipeline() throws IOException { String id = "some_pipeline_id"; XContentType xContentType = randomFrom(XContentType.values()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java index 0bd6ecef8fb5c..3d1db23da16b6 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java @@ -26,7 +26,6 @@ import org.apache.http.client.methods.HttpGet; import org.apache.http.entity.ByteArrayEntity; import org.apache.http.entity.ContentType; -import org.apache.http.message.BasicHeader; import org.apache.http.message.BasicRequestLine; import org.apache.http.message.BasicStatusLine; import org.apache.lucene.util.BytesRef; @@ -48,11 +47,13 @@ import java.lang.reflect.Modifier; import java.util.Arrays; import java.util.Collections; -import java.util.List; +import java.util.Set; +import java.util.TreeSet; import java.util.stream.Collectors; import static java.util.Collections.emptySet; -import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.hasSize; import static org.mockito.Matchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -73,12 +74,12 @@ public void initClients() throws IOException { final RestClient restClient = mock(RestClient.class); restHighLevelClient = new CustomRestClient(restClient); - doAnswer(inv -> mockPerformRequest(((Request) inv.getArguments()[0]).getHeaders().iterator().next())) + doAnswer(inv -> mockPerformRequest((Request) inv.getArguments()[0])) .when(restClient) .performRequest(any(Request.class)); doAnswer(inv -> mockPerformRequestAsync( - ((Request) inv.getArguments()[0]).getHeaders().iterator().next(), + ((Request) inv.getArguments()[0]), (ResponseListener) inv.getArguments()[1])) .when(restClient) .performRequestAsync(any(Request.class), any(ResponseListener.class)); @@ -87,26 +88,32 @@ public void initClients() throws IOException { public void testCustomEndpoint() throws IOException { final MainRequest request = new MainRequest(); - final Header header = new BasicHeader("node_name", randomAlphaOfLengthBetween(1, 10)); + String nodeName = randomAlphaOfLengthBetween(1, 10); - MainResponse response = restHighLevelClient.custom(request, header); - assertEquals(header.getValue(), response.getNodeName()); + MainResponse response = restHighLevelClient.custom(request, optionsForNodeName(nodeName)); + assertEquals(nodeName, response.getNodeName()); - response = restHighLevelClient.customAndParse(request, header); - assertEquals(header.getValue(), response.getNodeName()); + response = restHighLevelClient.customAndParse(request, optionsForNodeName(nodeName)); + assertEquals(nodeName, response.getNodeName()); } public void testCustomEndpointAsync() throws Exception { final MainRequest request = new MainRequest(); - final Header header = new BasicHeader("node_name", randomAlphaOfLengthBetween(1, 10)); + String nodeName = randomAlphaOfLengthBetween(1, 10); PlainActionFuture future = PlainActionFuture.newFuture(); - restHighLevelClient.customAsync(request, future, header); - assertEquals(header.getValue(), future.get().getNodeName()); + restHighLevelClient.customAsync(request, optionsForNodeName(nodeName), future); + assertEquals(nodeName, future.get().getNodeName()); future = PlainActionFuture.newFuture(); - restHighLevelClient.customAndParseAsync(request, future, header); - assertEquals(header.getValue(), future.get().getNodeName()); + restHighLevelClient.customAndParseAsync(request, optionsForNodeName(nodeName), future); + assertEquals(nodeName, future.get().getNodeName()); + } + + private static RequestOptions optionsForNodeName(String nodeName) { + RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); + options.addHeader("node_name", nodeName); + return options.build(); } /** @@ -115,27 +122,27 @@ public void testCustomEndpointAsync() throws Exception { */ @SuppressForbidden(reason = "We're forced to uses Class#getDeclaredMethods() here because this test checks protected methods") public void testMethodsVisibility() throws ClassNotFoundException { - final String[] methodNames = new String[]{"performRequest", - "performRequestAsync", + final String[] methodNames = new String[]{"parseEntity", + "parseResponseException", + "performRequest", "performRequestAndParseEntity", - "performRequestAsyncAndParseEntity", - "parseEntity", - "parseResponseException"}; + "performRequestAsync", + "performRequestAsyncAndParseEntity"}; - final List protectedMethods = Arrays.stream(RestHighLevelClient.class.getDeclaredMethods()) + final Set protectedMethods = Arrays.stream(RestHighLevelClient.class.getDeclaredMethods()) .filter(method -> Modifier.isProtected(method.getModifiers())) .map(Method::getName) - .collect(Collectors.toList()); + .collect(Collectors.toCollection(TreeSet::new)); - assertThat(protectedMethods, containsInAnyOrder(methodNames)); + assertThat(protectedMethods, contains(methodNames)); } /** - * Mocks the asynchronous request execution by calling the {@link #mockPerformRequest(Header)} method. + * Mocks the asynchronous request execution by calling the {@link #mockPerformRequest(Request)} method. */ - private Void mockPerformRequestAsync(Header httpHeader, ResponseListener responseListener) { + private Void mockPerformRequestAsync(Request request, ResponseListener responseListener) { try { - responseListener.onSuccess(mockPerformRequest(httpHeader)); + responseListener.onSuccess(mockPerformRequest(request)); } catch (IOException e) { responseListener.onFailure(e); } @@ -145,7 +152,9 @@ private Void mockPerformRequestAsync(Header httpHeader, ResponseListener respons /** * Mocks the synchronous request execution like if it was executed by Elasticsearch. */ - private Response mockPerformRequest(Header httpHeader) throws IOException { + private Response mockPerformRequest(Request request) throws IOException { + assertThat(request.getOptions().getHeaders(), hasSize(1)); + Header httpHeader = request.getOptions().getHeaders().get(0); final Response mockResponse = mock(Response.class); when(mockResponse.getHost()).thenReturn(new HttpHost("localhost", 9200)); @@ -171,20 +180,20 @@ private CustomRestClient(RestClient restClient) { super(restClient, RestClient::close, Collections.emptyList()); } - MainResponse custom(MainRequest mainRequest, Header... headers) throws IOException { - return performRequest(mainRequest, this::toRequest, this::toResponse, emptySet(), headers); + MainResponse custom(MainRequest mainRequest, RequestOptions options) throws IOException { + return performRequest(mainRequest, this::toRequest, options, this::toResponse, emptySet()); } - MainResponse customAndParse(MainRequest mainRequest, Header... headers) throws IOException { - return performRequestAndParseEntity(mainRequest, this::toRequest, MainResponse::fromXContent, emptySet(), headers); + MainResponse customAndParse(MainRequest mainRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity(mainRequest, this::toRequest, options, MainResponse::fromXContent, emptySet()); } - void customAsync(MainRequest mainRequest, ActionListener listener, Header... headers) { - performRequestAsync(mainRequest, this::toRequest, this::toResponse, listener, emptySet(), headers); + void customAsync(MainRequest mainRequest, RequestOptions options, ActionListener listener) { + performRequestAsync(mainRequest, this::toRequest, options, this::toResponse, listener, emptySet()); } - void customAndParseAsync(MainRequest mainRequest, ActionListener listener, Header... headers) { - performRequestAsyncAndParseEntity(mainRequest, this::toRequest, MainResponse::fromXContent, listener, emptySet(), headers); + void customAndParseAsync(MainRequest mainRequest, RequestOptions options, ActionListener listener) { + performRequestAsyncAndParseEntity(mainRequest, this::toRequest, options, MainResponse::fromXContent, listener, emptySet()); } Request toRequest(MainRequest mainRequest) throws IOException { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 88e4a2568158f..448ff0138d3ac 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -38,6 +38,7 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; @@ -563,6 +564,39 @@ public void testFlush() throws IOException { } } + public void testSyncedFlush() throws IOException { + { + String index = "index"; + Settings settings = Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + createIndex(index, settings); + SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(index); + SyncedFlushResponse flushResponse = + execute(syncedFlushRequest, highLevelClient().indices()::flushSynced, highLevelClient().indices()::flushSyncedAsync); + assertThat(flushResponse.totalShards(), equalTo(1)); + assertThat(flushResponse.successfulShards(), equalTo(1)); + assertThat(flushResponse.failedShards(), equalTo(0)); + } + { + String nonExistentIndex = "non_existent_index"; + assertFalse(indexExists(nonExistentIndex)); + SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(nonExistentIndex); + ElasticsearchException exception = expectThrows( + ElasticsearchException.class, + () -> + execute( + syncedFlushRequest, + highLevelClient().indices()::flushSynced, + highLevelClient().indices()::flushSyncedAsync + ) + ); + assertEquals(RestStatus.NOT_FOUND, exception.status()); + } + } + + public void testClearCache() throws IOException { { String index = "index"; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 1573071da3372..5388b5ba82e6f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; @@ -43,6 +44,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; @@ -645,6 +647,29 @@ public void testFlush() { assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); } + public void testSyncedFlush() { + String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5); + SyncedFlushRequest syncedFlushRequest; + if (randomBoolean()) { + syncedFlushRequest = new SyncedFlushRequest(indices); + } else { + syncedFlushRequest = new SyncedFlushRequest(); + syncedFlushRequest.indices(indices); + } + Map expectedParams = new HashMap<>(); + setRandomIndicesOptions(syncedFlushRequest::indicesOptions, syncedFlushRequest::indicesOptions, expectedParams); + Request request = RequestConverters.flushSynced(syncedFlushRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + if (indices != null && indices.length > 0) { + endpoint.add(String.join(",", indices)); + } + endpoint.add("_flush/synced"); + assertThat(request.getEndpoint(), equalTo(endpoint.toString())); + assertThat(request.getParameters(), equalTo(expectedParams)); + assertThat(request.getEntity(), nullValue()); + assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + } + public void testForceMerge() { String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5); ForceMergeRequest forceMergeRequest; @@ -1608,6 +1633,21 @@ public void testDeleteRepository() { assertNull(request.getEntity()); } + public void testVerifyRepository() { + Map expectedParams = new HashMap<>(); + String repository = randomIndicesNames(1, 1)[0]; + String endpoint = "/_snapshot/" + repository + "/_verify"; + + VerifyRepositoryRequest verifyRepositoryRequest = new VerifyRepositoryRequest(repository); + setRandomMasterTimeout(verifyRepositoryRequest, expectedParams); + setRandomTimeout(verifyRepositoryRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request request = RequestConverters.verifyRepository(verifyRepositoryRequest); + assertThat(endpoint, equalTo(request.getEndpoint())); + assertThat(HttpPost.METHOD_NAME, equalTo(request.getMethod())); + assertThat(expectedParams, equalTo(request.getParameters())); + } + public void testPutTemplateRequest() throws Exception { Map names = new HashMap<>(); names.put("log", "log"); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java index 02e03bfec764e..aaba5da820613 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java @@ -28,6 +28,8 @@ import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; +import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.rest.RestStatus; @@ -86,10 +88,7 @@ public void testSnapshotGetRepositoriesNonExistent() { public void testSnapshotDeleteRepository() throws IOException { String repository = "test"; - String repositorySettings = "{\"type\":\"fs\", \"settings\":{\"location\": \".\"}}"; - - highLevelClient().getLowLevelClient().performRequest("put", "_snapshot/" + repository, - Collections.emptyMap(), new StringEntity(repositorySettings, ContentType.APPLICATION_JSON)); + assertTrue(createTestRepository(repository, FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged()); GetRepositoriesRequest request = new GetRepositoriesRequest(); GetRepositoriesResponse response = execute(request, highLevelClient().snapshot()::getRepositories, @@ -102,4 +101,14 @@ public void testSnapshotDeleteRepository() throws IOException { assertTrue(deleteResponse.isAcknowledged()); } + + public void testVerifyRepository() throws IOException { + PutRepositoryResponse putRepositoryResponse = createTestRepository("test", FsRepository.TYPE, "{\"location\": \".\"}"); + assertTrue(putRepositoryResponse.isAcknowledged()); + + VerifyRepositoryRequest request = new VerifyRepositoryRequest("test"); + VerifyRepositoryResponse response = execute(request, highLevelClient().snapshot()::verifyRepository, + highLevelClient().snapshot()::verifyRepositoryAsync); + assertThat(response.getNodes().size(), equalTo(1)); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SyncedFlushResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SyncedFlushResponseTests.java new file mode 100644 index 0000000000000..bc8fc90dd75e6 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SyncedFlushResponseTests.java @@ -0,0 +1,269 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import java.io.IOException; +import java.util.Map; +import java.util.HashMap; +import java.util.List; +import java.util.ArrayList; +import java.util.Set; +import java.util.HashSet; + +import com.carrotsearch.hppc.ObjectIntHashMap; +import com.carrotsearch.hppc.ObjectIntMap; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.flush.ShardsSyncedFlushResult; +import org.elasticsearch.indices.flush.SyncedFlushService; +import org.elasticsearch.test.ESTestCase; + +public class SyncedFlushResponseTests extends ESTestCase { + + public void testXContentSerialization() throws IOException { + final XContentType xContentType = randomFrom(XContentType.values()); + TestPlan plan = createTestPlan(); + + XContentBuilder serverResponsebuilder = XContentBuilder.builder(xContentType.xContent()); + assertNotNull(plan.result); + serverResponsebuilder.startObject(); + plan.result.toXContent(serverResponsebuilder, ToXContent.EMPTY_PARAMS); + serverResponsebuilder.endObject(); + XContentBuilder clientResponsebuilder = XContentBuilder.builder(xContentType.xContent()); + assertNotNull(plan.result); + clientResponsebuilder.startObject(); + plan.clientResult.toXContent(clientResponsebuilder, ToXContent.EMPTY_PARAMS); + clientResponsebuilder.endObject(); + Map serverContentMap = convertFailureListToSet( + serverResponsebuilder + .generator() + .contentType() + .xContent() + .createParser( + xContentRegistry(), + LoggingDeprecationHandler.INSTANCE, + BytesReference.bytes(serverResponsebuilder).streamInput() + ).map() + ); + Map clientContentMap = convertFailureListToSet( + clientResponsebuilder + .generator() + .contentType() + .xContent() + .createParser( + xContentRegistry(), + LoggingDeprecationHandler.INSTANCE, + BytesReference.bytes(clientResponsebuilder).streamInput() + ) + .map() + ); + assertEquals(serverContentMap, clientContentMap); + } + + public void testXContentDeserialization() throws IOException { + final XContentType xContentType = randomFrom(XContentType.values()); + TestPlan plan = createTestPlan(); + XContentBuilder builder = XContentBuilder.builder(xContentType.xContent()); + builder.startObject(); + plan.result.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + XContentParser parser = builder + .generator() + .contentType() + .xContent() + .createParser( + xContentRegistry(), LoggingDeprecationHandler.INSTANCE, BytesReference.bytes(builder).streamInput() + ); + SyncedFlushResponse originalResponse = plan.clientResult; + SyncedFlushResponse parsedResponse = SyncedFlushResponse.fromXContent(parser); + assertNotNull(parsedResponse); + assertShardCounts(originalResponse.getShardCounts(), parsedResponse.getShardCounts()); + for (Map.Entry entry: originalResponse.getIndexResults().entrySet()) { + String index = entry.getKey(); + SyncedFlushResponse.IndexResult responseResult = entry.getValue(); + SyncedFlushResponse.IndexResult parsedResult = parsedResponse.getIndexResults().get(index); + assertNotNull(responseResult); + assertNotNull(parsedResult); + assertShardCounts(responseResult.getShardCounts(), parsedResult.getShardCounts()); + assertEquals(responseResult.failures().size(), parsedResult.failures().size()); + for (SyncedFlushResponse.ShardFailure responseShardFailure: responseResult.failures()) { + assertTrue(containsFailure(parsedResult.failures(), responseShardFailure)); + } + } + } + + static class TestPlan { + SyncedFlushResponse.ShardCounts totalCounts; + Map countsPerIndex = new HashMap<>(); + ObjectIntMap expectedFailuresPerIndex = new ObjectIntHashMap<>(); + org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse result; + SyncedFlushResponse clientResult; + } + + TestPlan createTestPlan() throws IOException { + final TestPlan testPlan = new TestPlan(); + final Map> indicesResults = new HashMap<>(); + Map indexResults = new HashMap<>(); + final XContentType xContentType = randomFrom(XContentType.values()); + final int indexCount = randomIntBetween(1, 10); + int totalShards = 0; + int totalSuccessful = 0; + int totalFailed = 0; + for (int i = 0; i < indexCount; i++) { + final String index = "index_" + i; + int shards = randomIntBetween(1, 4); + int replicas = randomIntBetween(0, 2); + int successful = 0; + int failed = 0; + int failures = 0; + List shardsResults = new ArrayList<>(); + List shardFailures = new ArrayList<>(); + for (int shard = 0; shard < shards; shard++) { + final ShardId shardId = new ShardId(index, "_na_", shard); + if (randomInt(5) < 2) { + // total shard failure + failed += replicas + 1; + failures++; + shardsResults.add(new ShardsSyncedFlushResult(shardId, replicas + 1, "simulated total failure")); + shardFailures.add( + new SyncedFlushResponse.ShardFailure( + shardId.id(), + "simulated total failure", + new HashMap<>() + ) + ); + } else { + Map shardResponses = new HashMap<>(); + for (int copy = 0; copy < replicas + 1; copy++) { + final ShardRouting shardRouting = + TestShardRouting.newShardRouting( + index, shard, "node_" + shardId + "_" + copy, null, + copy == 0, ShardRoutingState.STARTED + ); + if (randomInt(5) < 2) { + // shard copy failure + failed++; + failures++; + shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse("copy failure " + shardId)); + // Building the shardRouting map here. + XContentBuilder builder = XContentBuilder.builder(xContentType.xContent()); + Map routing = + shardRouting.toXContent(builder, ToXContent.EMPTY_PARAMS) + .generator() + .contentType() + .xContent() + .createParser( + xContentRegistry(), LoggingDeprecationHandler.INSTANCE, + BytesReference.bytes(builder).streamInput() + ) + .map(); + shardFailures.add( + new SyncedFlushResponse.ShardFailure( + shardId.id(), + "copy failure " + shardId, + routing + ) + ); + } else { + successful++; + shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse()); + } + } + shardsResults.add(new ShardsSyncedFlushResult(shardId, "_sync_id_" + shard, replicas + 1, shardResponses)); + } + } + indicesResults.put(index, shardsResults); + indexResults.put( + index, + new SyncedFlushResponse.IndexResult( + shards * (replicas + 1), + successful, + failed, + shardFailures + ) + ); + testPlan.countsPerIndex.put(index, new SyncedFlushResponse.ShardCounts(shards * (replicas + 1), successful, failed)); + testPlan.expectedFailuresPerIndex.put(index, failures); + totalFailed += failed; + totalShards += shards * (replicas + 1); + totalSuccessful += successful; + } + testPlan.result = new org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse(indicesResults); + testPlan.totalCounts = new SyncedFlushResponse.ShardCounts(totalShards, totalSuccessful, totalFailed); + testPlan.clientResult = new SyncedFlushResponse( + new SyncedFlushResponse.ShardCounts(totalShards, totalSuccessful, totalFailed), + indexResults + ); + return testPlan; + } + + public boolean containsFailure(List failures, SyncedFlushResponse.ShardFailure origFailure) { + for (SyncedFlushResponse.ShardFailure failure: failures) { + if (failure.getShardId() == origFailure.getShardId() && + failure.getFailureReason().equals(origFailure.getFailureReason()) && + failure.getRouting().equals(origFailure.getRouting())) { + return true; + } + } + return false; + } + + + public void assertShardCounts(SyncedFlushResponse.ShardCounts first, SyncedFlushResponse.ShardCounts second) { + if (first == null) { + assertNull(second); + } else { + assertTrue(first.equals(second)); + } + } + + public Map convertFailureListToSet(Map input) { + Map retMap = new HashMap<>(); + for (Map.Entry entry: input.entrySet()) { + if (entry.getKey().equals(SyncedFlushResponse.SHARDS_FIELD)) { + retMap.put(entry.getKey(), entry.getValue()); + } else { + // This was an index entry. + @SuppressWarnings("unchecked") + Map indexResult = (Map)entry.getValue(); + Map retResult = new HashMap<>(); + for (Map.Entry entry2: indexResult.entrySet()) { + if (entry2.getKey().equals(SyncedFlushResponse.IndexResult.FAILURES_FIELD)) { + @SuppressWarnings("unchecked") + List failures = (List)entry2.getValue(); + Set retSet = new HashSet<>(failures); + retResult.put(entry.getKey(), retSet); + } else { + retResult.put(entry2.getKey(), entry2.getValue()); + } + } + retMap.put(entry.getKey(), retResult); + } + } + return retMap; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksIT.java new file mode 100644 index 0000000000000..fc7d70a36e10e --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksIT.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; +import org.elasticsearch.tasks.TaskInfo; + +import java.io.IOException; + +import static java.util.Collections.emptyList; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.notNullValue; + +public class TasksIT extends ESRestHighLevelClientTestCase { + + public void testListTasks() throws IOException { + ListTasksRequest request = new ListTasksRequest(); + ListTasksResponse response = execute(request, highLevelClient().tasks()::list, highLevelClient().tasks()::listAsync); + + assertThat(response, notNullValue()); + assertThat(response.getNodeFailures(), equalTo(emptyList())); + assertThat(response.getTaskFailures(), equalTo(emptyList())); + // It's possible that there are other tasks except 'cluster:monitor/tasks/lists[n]' and 'action":"cluster:monitor/tasks/lists' + assertThat(response.getTasks().size(), greaterThanOrEqualTo(2)); + boolean listTasksFound = false; + for (TaskGroup taskGroup : response.getTaskGroups()) { + TaskInfo parent = taskGroup.getTaskInfo(); + if ("cluster:monitor/tasks/lists".equals(parent.getAction())) { + assertThat(taskGroup.getChildTasks().size(), equalTo(1)); + TaskGroup childGroup = taskGroup.getChildTasks().iterator().next(); + assertThat(childGroup.getChildTasks().isEmpty(), equalTo(true)); + TaskInfo child = childGroup.getTaskInfo(); + assertThat(child.getAction(), equalTo("cluster:monitor/tasks/lists[n]")); + assertThat(child.getParentTaskId(), equalTo(parent.getTaskId())); + listTasksFound = true; + } + } + assertTrue("List tasks were not found", listTasksFound); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java index b9329f99a3cde..29bb2d05afcc7 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java @@ -19,13 +19,8 @@ package org.elasticsearch.client.documentation; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; -import org.elasticsearch.action.TaskOperationFailure; -import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; -import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; -import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.action.ingest.PutPipelineRequest; @@ -39,21 +34,15 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.indices.recovery.RecoverySettings; -import org.elasticsearch.tasks.TaskId; -import org.elasticsearch.tasks.TaskInfo; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import static java.util.Collections.emptyList; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.notNullValue; /** * This class is used to generate the Java Cluster API documentation. @@ -193,89 +182,6 @@ public void onFailure(Exception e) { } } - public void testListTasks() throws IOException { - RestHighLevelClient client = highLevelClient(); - { - // tag::list-tasks-request - ListTasksRequest request = new ListTasksRequest(); - // end::list-tasks-request - - // tag::list-tasks-request-filter - request.setActions("cluster:*"); // <1> - request.setNodes("nodeId1", "nodeId2"); // <2> - request.setParentTaskId(new TaskId("parentTaskId", 42)); // <3> - // end::list-tasks-request-filter - - // tag::list-tasks-request-detailed - request.setDetailed(true); // <1> - // end::list-tasks-request-detailed - - // tag::list-tasks-request-wait-completion - request.setWaitForCompletion(true); // <1> - request.setTimeout(TimeValue.timeValueSeconds(50)); // <2> - request.setTimeout("50s"); // <3> - // end::list-tasks-request-wait-completion - } - - ListTasksRequest request = new ListTasksRequest(); - - // tag::list-tasks-execute - ListTasksResponse response = client.cluster().listTasks(request); - // end::list-tasks-execute - - assertThat(response, notNullValue()); - - // tag::list-tasks-response-tasks - List tasks = response.getTasks(); // <1> - // end::list-tasks-response-tasks - - // tag::list-tasks-response-calc - Map> perNodeTasks = response.getPerNodeTasks(); // <1> - List groups = response.getTaskGroups(); // <2> - // end::list-tasks-response-calc - - // tag::list-tasks-response-failures - List nodeFailures = response.getNodeFailures(); // <1> - List taskFailures = response.getTaskFailures(); // <2> - // end::list-tasks-response-failures - - assertThat(response.getNodeFailures(), equalTo(emptyList())); - assertThat(response.getTaskFailures(), equalTo(emptyList())); - assertThat(response.getTasks().size(), greaterThanOrEqualTo(2)); - } - - public void testListTasksAsync() throws Exception { - RestHighLevelClient client = highLevelClient(); - { - ListTasksRequest request = new ListTasksRequest(); - - // tag::list-tasks-execute-listener - ActionListener listener = - new ActionListener() { - @Override - public void onResponse(ListTasksResponse response) { - // <1> - } - - @Override - public void onFailure(Exception e) { - // <2> - } - }; - // end::list-tasks-execute-listener - - // Replace the empty listener by a blocking listener in test - final CountDownLatch latch = new CountDownLatch(1); - listener = new LatchedActionListener<>(listener, latch); - - // tag::list-tasks-execute-async - client.cluster().listTasksAsync(request, listener); // <1> - // end::list-tasks-execute-async - - assertTrue(latch.await(30L, TimeUnit.SECONDS)); - } - } - public void testPutPipeline() throws IOException { RestHighLevelClient client = highLevelClient(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index 38a963fa33c8c..fd733b83d5ace 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -37,6 +37,7 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; @@ -55,8 +56,6 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeType; -import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; -import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateResponse; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse; import org.elasticsearch.action.support.ActiveShardCount; @@ -64,6 +63,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.SyncedFlushResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -81,8 +81,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import static org.hamcrest.Matchers.equalTo; - /** * This class is used to generate the Java Indices API documentation. * You need to wrap your code between two tags like: @@ -784,6 +782,89 @@ public void onFailure(Exception e) { } } + public void testSyncedFlushIndex() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + createIndex("index1", Settings.EMPTY); + } + + { + // tag::flush-synced-request + SyncedFlushRequest request = new SyncedFlushRequest("index1"); // <1> + SyncedFlushRequest requestMultiple = new SyncedFlushRequest("index1", "index2"); // <2> + SyncedFlushRequest requestAll = new SyncedFlushRequest(); // <3> + // end::flush-synced-request + + // tag::flush-synced-request-indicesOptions + request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1> + // end::flush-synced-request-indicesOptions + + // tag::flush-synced-execute + SyncedFlushResponse flushSyncedResponse = client.indices().flushSynced(request); + // end::flush-synced-execute + + // tag::flush-synced-response + int totalShards = flushSyncedResponse.totalShards(); // <1> + int successfulShards = flushSyncedResponse.successfulShards(); // <2> + int failedShards = flushSyncedResponse.failedShards(); // <3> + + for (Map.Entry responsePerIndexEntry: + flushSyncedResponse.getIndexResults().entrySet()) { + String indexName = responsePerIndexEntry.getKey(); // <4> + SyncedFlushResponse.IndexResult indexResult = responsePerIndexEntry.getValue(); + int totalShardsForIndex = indexResult.totalShards(); // <5> + int successfulShardsForIndex = indexResult.successfulShards(); // <6> + int failedShardsForIndex = indexResult.failedShards(); // <7> + if (failedShardsForIndex > 0) { + for (SyncedFlushResponse.ShardFailure failureEntry: indexResult.failures()) { + int shardId = failureEntry.getShardId(); // <8> + String failureReason = failureEntry.getFailureReason(); // <9> + Map routing = failureEntry.getRouting(); // <10> + } + } + } + // end::flush-synced-response + + // tag::flush-synced-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(SyncedFlushResponse refreshResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::flush-synced-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::flush-synced-execute-async + client.indices().flushSyncedAsync(request, listener); // <1> + // end::flush-synced-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + + { + // tag::flush-synced-notfound + try { + SyncedFlushRequest request = new SyncedFlushRequest("does_not_exist"); + client.indices().flushSynced(request); + } catch (ElasticsearchException exception) { + if (exception.status() == RestStatus.NOT_FOUND) { + // <1> + } + } + // end::flush-synced-notfound + } + } + public void testGetSettings() throws Exception { RestHighLevelClient client = highLevelClient(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java index 0a57fafe5be59..2890ad50c2666 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java @@ -27,6 +27,8 @@ import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; +import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.cluster.metadata.RepositoryMetaData; @@ -45,7 +47,7 @@ import static org.hamcrest.Matchers.equalTo; /** - * This class is used to generate the Java Cluster API documentation. + * This class is used to generate the Java Snapshot API documentation. * You need to wrap your code between two tags like: * // tag::example * // end::example @@ -297,6 +299,66 @@ public void onFailure(Exception e) { } } + public void testSnapshotVerifyRepository() throws IOException { + RestHighLevelClient client = highLevelClient(); + createTestRepositories(); + + // tag::verify-repository-request + VerifyRepositoryRequest request = new VerifyRepositoryRequest(repositoryName); + // end::verify-repository-request + + // tag::verify-repository-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::verify-repository-request-masterTimeout + // tag::verify-repository-request-timeout + request.timeout(TimeValue.timeValueMinutes(1)); // <1> + request.timeout("1m"); // <2> + // end::verify-repository-request-timeout + + // tag::verify-repository-execute + VerifyRepositoryResponse response = client.snapshot().verifyRepository(request); + // end::verify-repository-execute + + // tag::verify-repository-response + List repositoryMetaDataResponse = response.getNodes(); + // end::verify-repository-response + assertThat(1, equalTo(repositoryMetaDataResponse.size())); + assertThat("node-0", equalTo(repositoryMetaDataResponse.get(0).getName())); + } + + public void testSnapshotVerifyRepositoryAsync() throws InterruptedException { + RestHighLevelClient client = highLevelClient(); + { + VerifyRepositoryRequest request = new VerifyRepositoryRequest(repositoryName); + + // tag::verify-repository-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(VerifyRepositoryResponse verifyRepositoryRestResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::verify-repository-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::verify-repository-execute-async + client.snapshot().verifyRepositoryAsync(request, listener); // <1> + // end::verify-repository-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + private void createTestRepositories() throws IOException { PutRepositoryRequest request = new PutRepositoryRequest(repositoryName); request.type(FsRepository.TYPE); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java new file mode 100644 index 0000000000000..faf447a4143b1 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java @@ -0,0 +1,148 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.documentation; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; +import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.tasks.TaskInfo; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static java.util.Collections.emptyList; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.notNullValue; + +/** + * This class is used to generate the Java Tasks API documentation. + * You need to wrap your code between two tags like: + * // tag::example + * // end::example + * + * Where example is your tag name. + * + * Then in the documentation, you can extract what is between tag and end tags with + * ["source","java",subs="attributes,callouts,macros"] + * -------------------------------------------------- + * include-tagged::{doc-tests}/{@link TasksClientDocumentationIT}.java[example] + * -------------------------------------------------- + * + * The column width of the code block is 84. If the code contains a line longer + * than 84, the line will be cut and a horizontal scroll bar will be displayed. + * (the code indentation of the tag is not included in the width) + */ +public class TasksClientDocumentationIT extends ESRestHighLevelClientTestCase { + + public void testListTasks() throws IOException { + RestHighLevelClient client = highLevelClient(); + { + // tag::list-tasks-request + ListTasksRequest request = new ListTasksRequest(); + // end::list-tasks-request + + // tag::list-tasks-request-filter + request.setActions("cluster:*"); // <1> + request.setNodes("nodeId1", "nodeId2"); // <2> + request.setParentTaskId(new TaskId("parentTaskId", 42)); // <3> + // end::list-tasks-request-filter + + // tag::list-tasks-request-detailed + request.setDetailed(true); // <1> + // end::list-tasks-request-detailed + + // tag::list-tasks-request-wait-completion + request.setWaitForCompletion(true); // <1> + request.setTimeout(TimeValue.timeValueSeconds(50)); // <2> + request.setTimeout("50s"); // <3> + // end::list-tasks-request-wait-completion + } + + ListTasksRequest request = new ListTasksRequest(); + + // tag::list-tasks-execute + ListTasksResponse response = client.tasks().list(request); + // end::list-tasks-execute + + assertThat(response, notNullValue()); + + // tag::list-tasks-response-tasks + List tasks = response.getTasks(); // <1> + // end::list-tasks-response-tasks + + // tag::list-tasks-response-calc + Map> perNodeTasks = response.getPerNodeTasks(); // <1> + List groups = response.getTaskGroups(); // <2> + // end::list-tasks-response-calc + + // tag::list-tasks-response-failures + List nodeFailures = response.getNodeFailures(); // <1> + List taskFailures = response.getTaskFailures(); // <2> + // end::list-tasks-response-failures + + assertThat(response.getNodeFailures(), equalTo(emptyList())); + assertThat(response.getTaskFailures(), equalTo(emptyList())); + assertThat(response.getTasks().size(), greaterThanOrEqualTo(2)); + } + + public void testListTasksAsync() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + ListTasksRequest request = new ListTasksRequest(); + + // tag::list-tasks-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(ListTasksResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::list-tasks-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::list-tasks-execute-async + client.tasks().listAsync(request, listener); // <1> + // end::list-tasks-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } +} diff --git a/client/rest/src/main/java/org/elasticsearch/client/Request.java b/client/rest/src/main/java/org/elasticsearch/client/Request.java index 59b82e5bf9649..a6febe91ae8d0 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest/src/main/java/org/elasticsearch/client/Request.java @@ -19,17 +19,11 @@ package org.elasticsearch.client; -import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.entity.ContentType; -import org.apache.http.message.BasicHeader; import org.apache.http.nio.entity.NStringEntity; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; -import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Objects; @@ -42,11 +36,9 @@ public final class Request { private final String method; private final String endpoint; private final Map parameters = new HashMap<>(); - private final List
headers = new ArrayList<>(); private HttpEntity entity; - private HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory = - HttpAsyncResponseConsumerFactory.DEFAULT; + private RequestOptions options = RequestOptions.DEFAULT; /** * Create the {@linkplain Request}. @@ -127,40 +119,29 @@ public HttpEntity getEntity() { } /** - * Add the provided header to the request. + * Set the portion of an HTTP request to Elasticsearch that can be + * manipulated without changing Elasticsearch's behavior. */ - public void addHeader(String name, String value) { - Objects.requireNonNull(name, "header name cannot be null"); - Objects.requireNonNull(value, "header value cannot be null"); - this.headers.add(new ReqHeader(name, value)); + public void setOptions(RequestOptions options) { + Objects.requireNonNull(options, "options cannot be null"); + this.options = options; } /** - * Headers to attach to the request. + * Set the portion of an HTTP request to Elasticsearch that can be + * manipulated without changing Elasticsearch's behavior. */ - List
getHeaders() { - return Collections.unmodifiableList(headers); + public void setOptions(RequestOptions.Builder options) { + Objects.requireNonNull(options, "options cannot be null"); + this.options = options.build(); } /** - * set the {@link HttpAsyncResponseConsumerFactory} used to create one - * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the - * response body gets streamed from a non-blocking HTTP connection on the - * client side. + * Get the portion of an HTTP request to Elasticsearch that can be + * manipulated without changing Elasticsearch's behavior. */ - public void setHttpAsyncResponseConsumerFactory(HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) { - this.httpAsyncResponseConsumerFactory = - Objects.requireNonNull(httpAsyncResponseConsumerFactory, "httpAsyncResponseConsumerFactory cannot be null"); - } - - /** - * The {@link HttpAsyncResponseConsumerFactory} used to create one - * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the - * response body gets streamed from a non-blocking HTTP connection on the - * client side. - */ - public HttpAsyncResponseConsumerFactory getHttpAsyncResponseConsumerFactory() { - return httpAsyncResponseConsumerFactory; + public RequestOptions getOptions() { + return options; } @Override @@ -175,18 +156,7 @@ public String toString() { if (entity != null) { b.append(", entity=").append(entity); } - if (headers.size() > 0) { - b.append(", headers="); - for (int h = 0; h < headers.size(); h++) { - if (h != 0) { - b.append(','); - } - b.append(headers.get(h).toString()); - } - } - if (httpAsyncResponseConsumerFactory != HttpAsyncResponseConsumerFactory.DEFAULT) { - b.append(", consumerFactory=").append(httpAsyncResponseConsumerFactory); - } + b.append(", options=").append(options); return b.append('}').toString(); } @@ -204,40 +174,11 @@ public boolean equals(Object obj) { && endpoint.equals(other.endpoint) && parameters.equals(other.parameters) && Objects.equals(entity, other.entity) - && headers.equals(other.headers) - && httpAsyncResponseConsumerFactory.equals(other.httpAsyncResponseConsumerFactory); + && options.equals(other.options); } @Override public int hashCode() { - return Objects.hash(method, endpoint, parameters, entity, headers.hashCode(), httpAsyncResponseConsumerFactory); - } - - /** - * Custom implementation of {@link BasicHeader} that overrides equals and hashCode. - */ - static final class ReqHeader extends BasicHeader { - - ReqHeader(String name, String value) { - super(name, value); - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } - if (other instanceof ReqHeader) { - Header otherHeader = (Header) other; - return Objects.equals(getName(), otherHeader.getName()) && - Objects.equals(getValue(), otherHeader.getValue()); - } - return false; - } - - @Override - public int hashCode() { - return Objects.hash(getName(), getValue()); - } + return Objects.hash(method, endpoint, parameters, entity, options); } } diff --git a/client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java b/client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java new file mode 100644 index 0000000000000..e31db17a336b0 --- /dev/null +++ b/client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java @@ -0,0 +1,175 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.message.BasicHeader; +import org.apache.http.Header; +import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; +import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; + + +import java.util.ArrayList; + +/** + * The portion of an HTTP request to Elasticsearch that can be + * manipulated without changing Elasticsearch's behavior. + */ +public final class RequestOptions { + public static final RequestOptions DEFAULT = new Builder( + Collections.
emptyList(), HeapBufferedResponseConsumerFactory.DEFAULT).build(); + + private final List
headers; + private final HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory; + + private RequestOptions(Builder builder) { + this.headers = Collections.unmodifiableList(new ArrayList<>(builder.headers)); + this.httpAsyncResponseConsumerFactory = builder.httpAsyncResponseConsumerFactory; + } + + public Builder toBuilder() { + Builder builder = new Builder(headers, httpAsyncResponseConsumerFactory); + return builder; + } + + /** + * Headers to attach to the request. + */ + public List
getHeaders() { + return headers; + } + + /** + * The {@link HttpAsyncResponseConsumerFactory} used to create one + * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the + * response body gets streamed from a non-blocking HTTP connection on the + * client side. + */ + public HttpAsyncResponseConsumerFactory getHttpAsyncResponseConsumerFactory() { + return httpAsyncResponseConsumerFactory; + } + + @Override + public String toString() { + StringBuilder b = new StringBuilder(); + b.append("RequestOptions{"); + if (headers.size() > 0) { + b.append(", headers="); + for (int h = 0; h < headers.size(); h++) { + if (h != 0) { + b.append(','); + } + b.append(headers.get(h).toString()); + } + } + if (httpAsyncResponseConsumerFactory != HttpAsyncResponseConsumerFactory.DEFAULT) { + b.append(", consumerFactory=").append(httpAsyncResponseConsumerFactory); + } + return b.append('}').toString(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || (obj.getClass() != getClass())) { + return false; + } + if (obj == this) { + return true; + } + + RequestOptions other = (RequestOptions) obj; + return headers.equals(other.headers) + && httpAsyncResponseConsumerFactory.equals(other.httpAsyncResponseConsumerFactory); + } + + @Override + public int hashCode() { + return Objects.hash(headers, httpAsyncResponseConsumerFactory); + } + + public static class Builder { + private final List
headers; + private HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory; + + private Builder(List
headers, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) { + this.headers = new ArrayList<>(headers); + this.httpAsyncResponseConsumerFactory = httpAsyncResponseConsumerFactory; + } + + /** + * Build the {@linkplain RequestOptions}. + */ + public RequestOptions build() { + return new RequestOptions(this); + } + + /** + * Add the provided header to the request. + */ + public void addHeader(String name, String value) { + Objects.requireNonNull(name, "header name cannot be null"); + Objects.requireNonNull(value, "header value cannot be null"); + this.headers.add(new ReqHeader(name, value)); + } + + /** + * set the {@link HttpAsyncResponseConsumerFactory} used to create one + * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the + * response body gets streamed from a non-blocking HTTP connection on the + * client side. + */ + public void setHttpAsyncResponseConsumerFactory(HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) { + this.httpAsyncResponseConsumerFactory = + Objects.requireNonNull(httpAsyncResponseConsumerFactory, "httpAsyncResponseConsumerFactory cannot be null"); + } + } + + /** + * Custom implementation of {@link BasicHeader} that overrides equals and + * hashCode so it is easier to test equality of {@link RequestOptions}. + */ + static final class ReqHeader extends BasicHeader { + + ReqHeader(String name, String value) { + super(name, value); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other instanceof ReqHeader) { + Header otherHeader = (Header) other; + return Objects.equals(getName(), otherHeader.getName()) && + Objects.equals(getValue(), otherHeader.getValue()); + } + return false; + } + + @Override + public int hashCode() { + return Objects.hash(getName(), getValue()); + } + } +} diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 33171e18e743d..0e603c4069ae4 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -61,6 +61,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; +import java.util.LinkedHashSet; import java.util.List; import java.util.Locale; import java.util.Map; @@ -132,7 +133,7 @@ public synchronized void setHosts(HttpHost... hosts) { if (hosts == null || hosts.length == 0) { throw new IllegalArgumentException("hosts must not be null nor empty"); } - Set httpHosts = new HashSet<>(); + Set httpHosts = new LinkedHashSet<>(); AuthCache authCache = new BasicAuthCache(); for (HttpHost host : hosts) { Objects.requireNonNull(host, "host cannot be null"); @@ -143,6 +144,13 @@ public synchronized void setHosts(HttpHost... hosts) { this.blacklist.clear(); } + /** + * Returns the configured hosts + */ + public List getHosts() { + return new ArrayList<>(hostTuple.hosts); + } + /** * Sends a request to the Elasticsearch cluster that the client points to. * Blocks until the request is completed and returns its response or fails @@ -304,8 +312,7 @@ public Response performRequest(String method, String endpoint, Map> hostTuple, final HttpRequestBase request, @@ -883,11 +889,24 @@ private static class HostTuple { */ @Deprecated private static void addHeaders(Request request, Header... headers) { + setOptions(request, RequestOptions.DEFAULT.getHttpAsyncResponseConsumerFactory(), headers); + } + + /** + * Add all headers from the provided varargs argument to a {@link Request}. This only exists + * to support methods that exist for backwards compatibility. + */ + @Deprecated + private static void setOptions(Request request, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, + Header... headers) { Objects.requireNonNull(headers, "headers cannot be null"); + RequestOptions.Builder options = request.getOptions().toBuilder(); for (Header header : headers) { Objects.requireNonNull(header, "header cannot be null"); - request.addHeader(header.getName(), header.getValue()); + options.addHeader(header.getName(), header.getValue()); } + options.setHttpAsyncResponseConsumerFactory(httpAsyncResponseConsumerFactory); + request.setOptions(options); } /** diff --git a/client/rest/src/test/java/org/elasticsearch/client/RequestOptionsTests.java b/client/rest/src/test/java/org/elasticsearch/client/RequestOptionsTests.java new file mode 100644 index 0000000000000..19106792228d9 --- /dev/null +++ b/client/rest/src/test/java/org/elasticsearch/client/RequestOptionsTests.java @@ -0,0 +1,142 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.Header; +import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.mock; + +public class RequestOptionsTests extends RestClientTestCase { + public void testDefault() { + assertEquals(Collections.
emptyList(), RequestOptions.DEFAULT.getHeaders()); + assertEquals(HttpAsyncResponseConsumerFactory.DEFAULT, RequestOptions.DEFAULT.getHttpAsyncResponseConsumerFactory()); + assertEquals(RequestOptions.DEFAULT, RequestOptions.DEFAULT.toBuilder().build()); + } + + public void testAddHeader() { + try { + randomBuilder().addHeader(null, randomAsciiLettersOfLengthBetween(3, 10)); + fail("expected failure"); + } catch (NullPointerException e) { + assertEquals("header name cannot be null", e.getMessage()); + } + + try { + randomBuilder().addHeader(randomAsciiLettersOfLengthBetween(3, 10), null); + fail("expected failure"); + } catch (NullPointerException e) { + assertEquals("header value cannot be null", e.getMessage()); + } + + RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); + int numHeaders = between(0, 5); + List
headers = new ArrayList<>(); + for (int i = 0; i < numHeaders; i++) { + Header header = new RequestOptions.ReqHeader(randomAsciiAlphanumOfLengthBetween(5, 10), randomAsciiAlphanumOfLength(3)); + headers.add(header); + builder.addHeader(header.getName(), header.getValue()); + } + RequestOptions options = builder.build(); + assertEquals(headers, options.getHeaders()); + + try { + options.getHeaders().add( + new RequestOptions.ReqHeader(randomAsciiAlphanumOfLengthBetween(5, 10), randomAsciiAlphanumOfLength(3))); + fail("expected failure"); + } catch (UnsupportedOperationException e) { + assertNull(e.getMessage()); + } + } + + public void testSetHttpAsyncResponseConsumerFactory() { + try { + RequestOptions.DEFAULT.toBuilder().setHttpAsyncResponseConsumerFactory(null); + fail("expected failure"); + } catch (NullPointerException e) { + assertEquals("httpAsyncResponseConsumerFactory cannot be null", e.getMessage()); + } + + HttpAsyncResponseConsumerFactory factory = mock(HttpAsyncResponseConsumerFactory.class); + RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); + builder.setHttpAsyncResponseConsumerFactory(factory); + RequestOptions options = builder.build(); + assertSame(factory, options.getHttpAsyncResponseConsumerFactory()); + } + + public void testEqualsAndHashCode() { + RequestOptions request = randomBuilder().build(); + assertEquals(request, request); + + RequestOptions copy = copy(request); + assertEquals(request, copy); + assertEquals(copy, request); + assertEquals(request.hashCode(), copy.hashCode()); + + RequestOptions mutant = mutate(request); + assertNotEquals(request, mutant); + assertNotEquals(mutant, request); + } + + static RequestOptions.Builder randomBuilder() { + RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); + + if (randomBoolean()) { + int headerCount = between(1, 5); + for (int i = 0; i < headerCount; i++) { + builder.addHeader(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3)); + } + } + + if (randomBoolean()) { + builder.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(1)); + } + + return builder; + } + + private static RequestOptions copy(RequestOptions options) { + return options.toBuilder().build(); + } + + private static RequestOptions mutate(RequestOptions options) { + RequestOptions.Builder mutant = options.toBuilder(); + int mutationType = between(0, 1); + switch (mutationType) { + case 0: + mutant.addHeader("extra", "m"); + return mutant.build(); + case 1: + mutant.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(5)); + return mutant.build(); + default: + throw new UnsupportedOperationException("Unknown mutation type [" + mutationType + "]"); + } + } +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java index 29bbf23a1f20e..a7cf625b61d68 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java @@ -37,6 +37,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; import static org.junit.Assert.fail; public class RequestTests extends RestClientTestCase { @@ -127,33 +128,33 @@ public void testSetJsonEntity() throws IOException { assertEquals(json, new String(os.toByteArray(), ContentType.APPLICATION_JSON.getCharset())); } - public void testAddHeader() { + public void testSetOptions() { final String method = randomFrom(new String[] {"GET", "PUT", "POST", "HEAD", "DELETE"}); final String endpoint = randomAsciiLettersOfLengthBetween(1, 10); Request request = new Request(method, endpoint); try { - request.addHeader(null, randomAsciiLettersOfLengthBetween(3, 10)); + request.setOptions((RequestOptions) null); fail("expected failure"); } catch (NullPointerException e) { - assertEquals("header name cannot be null", e.getMessage()); + assertEquals("options cannot be null", e.getMessage()); } try { - request.addHeader(randomAsciiLettersOfLengthBetween(3, 10), null); + request.setOptions((RequestOptions.Builder) null); fail("expected failure"); } catch (NullPointerException e) { - assertEquals("header value cannot be null", e.getMessage()); + assertEquals("options cannot be null", e.getMessage()); } - int numHeaders = between(0, 5); - List
headers = new ArrayList<>(); - for (int i = 0; i < numHeaders; i++) { - Header header = new Request.ReqHeader(randomAsciiAlphanumOfLengthBetween(5, 10), randomAsciiAlphanumOfLength(3)); - headers.add(header); - request.addHeader(header.getName(), header.getValue()); - } - assertEquals(headers, new ArrayList<>(request.getHeaders())); + RequestOptions.Builder builder = RequestOptionsTests.randomBuilder(); + request.setOptions(builder); + assertEquals(builder.build(), request.getOptions()); + + builder = RequestOptionsTests.randomBuilder(); + RequestOptions options = builder.build(); + request.setOptions(options); + assertSame(options, request.getOptions()); } public void testEqualsAndHashCode() { @@ -193,14 +194,9 @@ private static Request randomRequest() { } if (randomBoolean()) { - int headerCount = between(1, 5); - for (int i = 0; i < headerCount; i++) { - request.addHeader(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3)); - } - } - - if (randomBoolean()) { - request.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(1)); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(1)); + request.setOptions(options); } return request; @@ -222,7 +218,7 @@ private static Request mutate(Request request) { return mutant; } Request mutant = copy(request); - int mutationType = between(0, 3); + int mutationType = between(0, 2); switch (mutationType) { case 0: mutant.addParameter(randomAsciiAlphanumOfLength(mutant.getParameters().size() + 4), "extra"); @@ -231,10 +227,9 @@ private static Request mutate(Request request) { mutant.setJsonEntity("mutant"); // randomRequest can't produce this value return mutant; case 2: - mutant.addHeader("extra", "m"); - return mutant; - case 3: - mutant.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(5)); + RequestOptions.Builder options = mutant.getOptions().toBuilder(); + options.addHeader("extra", "m"); + mutant.setOptions(options); return mutant; default: throw new UnsupportedOperationException("Unknown mutation type [" + mutationType + "]"); @@ -246,9 +241,6 @@ private static void copyMutables(Request from, Request to) { to.addParameter(param.getKey(), param.getValue()); } to.setEntity(from.getEntity()); - for (Header header : from.getHeaders()) { - to.addHeader(header.getName(), header.getValue()); - } - to.setHttpAsyncResponseConsumerFactory(from.getHttpAsyncResponseConsumerFactory()); + to.setOptions(from.getOptions()); } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java index a3d0196dab9a8..114d34c73da89 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java @@ -378,9 +378,11 @@ private Response bodyTest(RestClient restClient, String method, int statusCode, String requestBody = "{ \"field\": \"value\" }"; Request request = new Request(method, "/" + statusCode); request.setJsonEntity(requestBody); + RequestOptions.Builder options = request.getOptions().toBuilder(); for (Header header : headers) { - request.addHeader(header.getName(), header.getValue()); + options.addHeader(header.getName(), header.getValue()); } + request.setOptions(options); Response esResponse; try { esResponse = restClient.performRequest(request); diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index 3811b60023b43..634929c5de156 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -362,9 +362,11 @@ public void testHeaders() throws IOException { final Header[] requestHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header"); final int statusCode = randomStatusCode(getRandom()); Request request = new Request(method, "/" + statusCode); + RequestOptions.Builder options = request.getOptions().toBuilder(); for (Header requestHeader : requestHeaders) { - request.addHeader(requestHeader.getName(), requestHeader.getValue()); + options.addHeader(requestHeader.getName(), requestHeader.getValue()); } + request.setOptions(options); Response esResponse; try { esResponse = restClient.performRequest(request); @@ -438,11 +440,13 @@ private HttpUriRequest performRandomRequest(String method) throws Exception { final Set uniqueNames = new HashSet<>(); if (randomBoolean()) { Header[] headers = RestClientTestUtil.randomHeaders(getRandom(), "Header"); + RequestOptions.Builder options = request.getOptions().toBuilder(); for (Header header : headers) { - request.addHeader(header.getName(), header.getValue()); - expectedRequest.addHeader(new Request.ReqHeader(header.getName(), header.getValue())); + options.addHeader(header.getName(), header.getValue()); + expectedRequest.addHeader(new RequestOptions.ReqHeader(header.getName(), header.getValue())); uniqueNames.add(header.getName()); } + request.setOptions(options); } for (Header defaultHeader : defaultHeaders) { // request level headers override default headers diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index 15fa5c0f99596..5fe5fcae78fee 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.net.URI; +import java.util.Arrays; import java.util.Collections; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -251,6 +252,37 @@ public void testSetHostsWrongArguments() throws IOException { } } + public void testSetHostsPreservesOrdering() throws Exception { + try (RestClient restClient = createRestClient()) { + HttpHost[] hosts = randomHosts(); + restClient.setHosts(hosts); + assertEquals(Arrays.asList(hosts), restClient.getHosts()); + } + } + + private static HttpHost[] randomHosts() { + int numHosts = randomIntBetween(1, 10); + HttpHost[] hosts = new HttpHost[numHosts]; + for (int i = 0; i < hosts.length; i++) { + hosts[i] = new HttpHost("host-" + i, 9200); + } + return hosts; + } + + public void testSetHostsDuplicatedHosts() throws Exception { + try (RestClient restClient = createRestClient()) { + int numHosts = randomIntBetween(1, 10); + HttpHost[] hosts = new HttpHost[numHosts]; + HttpHost host = new HttpHost("host", 9200); + for (int i = 0; i < hosts.length; i++) { + hosts[i] = host; + } + restClient.setHosts(hosts); + assertEquals(1, restClient.getHosts().size()); + assertEquals(host, restClient.getHosts().get(0)); + } + } + /** * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testConstructor()}. */ diff --git a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java index f3ce112fea1a1..d73c29bd91bc4 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java @@ -38,6 +38,7 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.client.HttpAsyncResponseConsumerFactory; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseListener; import org.elasticsearch.client.RestClient; @@ -171,14 +172,22 @@ public void onFailure(Exception exception) { //tag::rest-client-body-shorter request.setJsonEntity("{\"json\":\"text\"}"); //end::rest-client-body-shorter - //tag::rest-client-headers - request.addHeader("Accept", "text/plain"); - request.addHeader("Cache-Control", "no-cache"); - //end::rest-client-headers - //tag::rest-client-response-consumer - request.setHttpAsyncResponseConsumerFactory( - new HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory(30 * 1024 * 1024)); - //end::rest-client-response-consumer + { + //tag::rest-client-headers + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader("Accept", "text/plain"); + options.addHeader("Cache-Control", "no-cache"); + request.setOptions(options); + //end::rest-client-headers + } + { + //tag::rest-client-response-consumer + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.setHttpAsyncResponseConsumerFactory( + new HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory(30 * 1024 * 1024)); + request.setOptions(options); + //end::rest-client-response-consumer + } } { HttpEntity[] documents = new HttpEntity[10]; diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java index cbc77351de98b..41051555bae2c 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java @@ -58,7 +58,6 @@ public void onFailure(HttpHost host) { if (sniffer == null) { throw new IllegalStateException("sniffer was not set, unable to sniff on failure"); } - //re-sniff immediately but take out the node that failed - sniffer.sniffOnFailure(host); + sniffer.sniffOnFailure(); } } diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index c655babd9ed3d..dc873ccd44e10 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -31,12 +31,14 @@ import java.security.PrivilegedAction; import java.util.List; import java.util.concurrent.Executors; +import java.util.concurrent.Future; import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; /** * Class responsible for sniffing nodes from some source (default is elasticsearch itself) and setting them to a provided instance of @@ -51,101 +53,175 @@ public class Sniffer implements Closeable { private static final Log logger = LogFactory.getLog(Sniffer.class); private static final String SNIFFER_THREAD_NAME = "es_rest_client_sniffer"; - private final Task task; + private final HostsSniffer hostsSniffer; + private final RestClient restClient; + private final long sniffIntervalMillis; + private final long sniffAfterFailureDelayMillis; + private final Scheduler scheduler; + private final AtomicBoolean initialized = new AtomicBoolean(false); + private volatile ScheduledTask nextScheduledTask; Sniffer(RestClient restClient, HostsSniffer hostsSniffer, long sniffInterval, long sniffAfterFailureDelay) { - this.task = new Task(hostsSniffer, restClient, sniffInterval, sniffAfterFailureDelay); + this(restClient, hostsSniffer, new DefaultScheduler(), sniffInterval, sniffAfterFailureDelay); + } + + Sniffer(RestClient restClient, HostsSniffer hostsSniffer, Scheduler scheduler, long sniffInterval, long sniffAfterFailureDelay) { + this.hostsSniffer = hostsSniffer; + this.restClient = restClient; + this.sniffIntervalMillis = sniffInterval; + this.sniffAfterFailureDelayMillis = sniffAfterFailureDelay; + this.scheduler = scheduler; + /* + * The first sniffing round is async, so this constructor returns before nextScheduledTask is assigned to a task. + * The initialized flag is a protection against NPE due to that. + */ + Task task = new Task(sniffIntervalMillis) { + @Override + public void run() { + super.run(); + initialized.compareAndSet(false, true); + } + }; + /* + * We do not keep track of the returned future as we never intend to cancel the initial sniffing round, we rather + * prevent any other operation from being executed till the sniffer is properly initialized + */ + scheduler.schedule(task, 0L); } /** - * Triggers a new sniffing round and explicitly takes out the failed host provided as argument + * Schedule sniffing to run as soon as possible if it isn't already running. Once such sniffing round runs + * it will also schedule a new round after sniffAfterFailureDelay ms. */ - public void sniffOnFailure(HttpHost failedHost) { - this.task.sniffOnFailure(failedHost); + public void sniffOnFailure() { + //sniffOnFailure does nothing until the initial sniffing round has been completed + if (initialized.get()) { + /* + * If sniffing is already running, there is no point in scheduling another round right after the current one. + * Concurrent calls may be checking the same task state, but only the first skip call on the same task returns true. + * The task may also get replaced while we check its state, in which case calling skip on it returns false. + */ + if (this.nextScheduledTask.skip()) { + /* + * We do not keep track of this future as the task will immediately run and we don't intend to cancel it + * due to concurrent sniffOnFailure runs. Effectively the previous (now cancelled or skipped) task will stay + * assigned to nextTask till this onFailure round gets run and schedules its corresponding afterFailure round. + */ + scheduler.schedule(new Task(sniffAfterFailureDelayMillis), 0L); + } + } } - @Override - public void close() throws IOException { - task.shutdown(); + enum TaskState { + WAITING, SKIPPED, STARTED } - private static class Task implements Runnable { - private final HostsSniffer hostsSniffer; - private final RestClient restClient; - - private final long sniffIntervalMillis; - private final long sniffAfterFailureDelayMillis; - private final ScheduledExecutorService scheduledExecutorService; - private final AtomicBoolean running = new AtomicBoolean(false); - private ScheduledFuture scheduledFuture; - - private Task(HostsSniffer hostsSniffer, RestClient restClient, long sniffIntervalMillis, long sniffAfterFailureDelayMillis) { - this.hostsSniffer = hostsSniffer; - this.restClient = restClient; - this.sniffIntervalMillis = sniffIntervalMillis; - this.sniffAfterFailureDelayMillis = sniffAfterFailureDelayMillis; - SnifferThreadFactory threadFactory = new SnifferThreadFactory(SNIFFER_THREAD_NAME); - this.scheduledExecutorService = Executors.newScheduledThreadPool(1, threadFactory); - scheduleNextRun(0); - } - - synchronized void scheduleNextRun(long delayMillis) { - if (scheduledExecutorService.isShutdown() == false) { - try { - if (scheduledFuture != null) { - //regardless of when the next sniff is scheduled, cancel it and schedule a new one with updated delay - this.scheduledFuture.cancel(false); - } - logger.debug("scheduling next sniff in " + delayMillis + " ms"); - this.scheduledFuture = this.scheduledExecutorService.schedule(this, delayMillis, TimeUnit.MILLISECONDS); - } catch(Exception e) { - logger.error("error while scheduling next sniffer task", e); - } - } + class Task implements Runnable { + final long nextTaskDelay; + final AtomicReference taskState = new AtomicReference<>(TaskState.WAITING); + + Task(long nextTaskDelay) { + this.nextTaskDelay = nextTaskDelay; } @Override public void run() { - sniff(null, sniffIntervalMillis); - } - - void sniffOnFailure(HttpHost failedHost) { - sniff(failedHost, sniffAfterFailureDelayMillis); - } - - void sniff(HttpHost excludeHost, long nextSniffDelayMillis) { - if (running.compareAndSet(false, true)) { - try { - List sniffedHosts = hostsSniffer.sniffHosts(); - logger.debug("sniffed hosts: " + sniffedHosts); - if (excludeHost != null) { - sniffedHosts.remove(excludeHost); - } - if (sniffedHosts.isEmpty()) { - logger.warn("no hosts to set, hosts will be updated at the next sniffing round"); - } else { - this.restClient.setHosts(sniffedHosts.toArray(new HttpHost[sniffedHosts.size()])); - } - } catch (Exception e) { - logger.error("error while sniffing nodes", e); - } finally { - scheduleNextRun(nextSniffDelayMillis); - running.set(false); - } + /* + * Skipped or already started tasks do nothing. In most cases tasks will be cancelled and not run, but we want to protect for + * cases where future#cancel returns true yet the task runs. We want to make sure that such tasks do nothing otherwise they will + * schedule another round at the end and so on, leaving us with multiple parallel sniffing "tracks" whish is undesirable. + */ + if (taskState.compareAndSet(TaskState.WAITING, TaskState.STARTED) == false) { + return; } - } - - synchronized void shutdown() { - scheduledExecutorService.shutdown(); try { - if (scheduledExecutorService.awaitTermination(1000, TimeUnit.MILLISECONDS)) { - return; - } - scheduledExecutorService.shutdownNow(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); + sniff(); + } catch (Exception e) { + logger.error("error while sniffing nodes", e); + } finally { + Task task = new Task(sniffIntervalMillis); + Future future = scheduler.schedule(task, nextTaskDelay); + //tasks are run by a single threaded executor, so swapping is safe with a simple volatile variable + ScheduledTask previousTask = nextScheduledTask; + nextScheduledTask = new ScheduledTask(task, future); + assert initialized.get() == false || + previousTask.task.isSkipped() || previousTask.task.hasStarted() : "task that we are replacing is neither " + + "cancelled nor has it ever started"; } } + + /** + * Returns true if the task has started, false in case it didn't start (yet?) or it was skipped + */ + boolean hasStarted() { + return taskState.get() == TaskState.STARTED; + } + + /** + * Sets this task to be skipped. Returns true if the task will be skipped, false if the task has already started. + */ + boolean skip() { + /* + * Threads may still get run although future#cancel returns true. We make sure that a task is either cancelled (or skipped), + * or entirely run. In the odd case that future#cancel returns true and the thread still runs, the task won't do anything. + * In case future#cancel returns true but the task has already started, this state change will not succeed hence this method + * returns false and the task will normally run. + */ + return taskState.compareAndSet(TaskState.WAITING, TaskState.SKIPPED); + } + + /** + * Returns true if the task was set to be skipped before it was started + */ + boolean isSkipped() { + return taskState.get() == TaskState.SKIPPED; + } + } + + static final class ScheduledTask { + final Task task; + final Future future; + + ScheduledTask(Task task, Future future) { + this.task = task; + this.future = future; + } + + /** + * Cancels this task. Returns true if the task has been successfully cancelled, meaning it won't be executed + * or if it is its execution won't have any effect. Returns false if the task cannot be cancelled (possibly it was + * already cancelled or already completed). + */ + boolean skip() { + /* + * Future#cancel should return false whenever a task cannot be cancelled, most likely as it has already started. We don't + * trust it much though so we try to cancel hoping that it will work. At the same time we always call skip too, which means + * that if the task has already started the state change will fail. We could potentially not call skip when cancel returns + * false but we prefer to stay on the safe side. + */ + future.cancel(false); + return task.skip(); + } + } + + final void sniff() throws IOException { + List sniffedHosts = hostsSniffer.sniffHosts(); + if (logger.isDebugEnabled()) { + logger.debug("sniffed hosts: " + sniffedHosts); + } + if (sniffedHosts.isEmpty()) { + logger.warn("no hosts to set, hosts will be updated at the next sniffing round"); + } else { + restClient.setHosts(sniffedHosts.toArray(new HttpHost[sniffedHosts.size()])); + } + } + + @Override + public void close() { + if (initialized.get()) { + nextScheduledTask.skip(); + } + this.scheduler.shutdown(); } /** @@ -158,8 +234,62 @@ public static SnifferBuilder builder(RestClient restClient) { return new SnifferBuilder(restClient); } - private static class SnifferThreadFactory implements ThreadFactory { + /** + * The Scheduler interface allows to isolate the sniffing scheduling aspects so that we can test + * the sniffer by injecting when needed a custom scheduler that is more suited for testing. + */ + interface Scheduler { + /** + * Schedules the provided {@link Runnable} to be executed in delayMillis milliseconds + */ + Future schedule(Task task, long delayMillis); + + /** + * Shuts this scheduler down + */ + void shutdown(); + } + + /** + * Default implementation of {@link Scheduler}, based on {@link ScheduledExecutorService} + */ + static final class DefaultScheduler implements Scheduler { + final ScheduledExecutorService executor; + + DefaultScheduler() { + this(initScheduledExecutorService()); + } + + DefaultScheduler(ScheduledExecutorService executor) { + this.executor = executor; + } + + private static ScheduledExecutorService initScheduledExecutorService() { + ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(1, new SnifferThreadFactory(SNIFFER_THREAD_NAME)); + executor.setRemoveOnCancelPolicy(true); + return executor; + } + + @Override + public Future schedule(Task task, long delayMillis) { + return executor.schedule(task, delayMillis, TimeUnit.MILLISECONDS); + } + + @Override + public void shutdown() { + executor.shutdown(); + try { + if (executor.awaitTermination(1000, TimeUnit.MILLISECONDS)) { + return; + } + executor.shutdownNow(); + } catch (InterruptedException ignore) { + Thread.currentThread().interrupt(); + } + } + } + static class SnifferThreadFactory implements ThreadFactory { private final AtomicInteger threadNumber = new AtomicInteger(1); private final String namePrefix; private final ThreadFactory originalThreadFactory; diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java index 5a52151d76e01..7550459e9ea50 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java @@ -21,7 +21,6 @@ import org.apache.http.HttpHost; -import java.io.IOException; import java.util.Collections; import java.util.List; @@ -30,7 +29,7 @@ */ class MockHostsSniffer implements HostsSniffer { @Override - public List sniffHosts() throws IOException { + public List sniffHosts() { return Collections.singletonList(new HttpHost("localhost", 9200)); } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java new file mode 100644 index 0000000000000..8172774a77d80 --- /dev/null +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java @@ -0,0 +1,656 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.sniff; + +import org.apache.http.HttpHost; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.RestClientTestCase; +import org.elasticsearch.client.sniff.Sniffer.DefaultScheduler; +import org.elasticsearch.client.sniff.Sniffer.Scheduler; +import org.mockito.Matchers; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CancellationException; +import java.util.concurrent.CopyOnWriteArraySet; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class SnifferTests extends RestClientTestCase { + + /** + * Tests the {@link Sniffer#sniff()} method in isolation. Verifies that it uses the {@link HostsSniffer} implementation + * to retrieve nodes and set them (when not empty) to the provided {@link RestClient} instance. + */ + public void testSniff() throws IOException { + HttpHost initialHost = new HttpHost("localhost", 9200); + try (RestClient restClient = RestClient.builder(initialHost).build()) { + Scheduler noOpScheduler = new Scheduler() { + @Override + public Future schedule(Sniffer.Task task, long delayMillis) { + return mock(Future.class); + } + + @Override + public void shutdown() { + + } + }; + CountingHostsSniffer hostsSniffer = new CountingHostsSniffer(); + int iters = randomIntBetween(5, 30); + try (Sniffer sniffer = new Sniffer(restClient, hostsSniffer, noOpScheduler, 1000L, -1)){ + { + assertEquals(1, restClient.getHosts().size()); + HttpHost httpHost = restClient.getHosts().get(0); + assertEquals("localhost", httpHost.getHostName()); + assertEquals(9200, httpHost.getPort()); + } + int emptyList = 0; + int failures = 0; + int runs = 0; + List lastHosts = Collections.singletonList(initialHost); + for (int i = 0; i < iters; i++) { + try { + runs++; + sniffer.sniff(); + if (hostsSniffer.failures.get() > failures) { + failures++; + fail("should have failed given that hostsSniffer says it threw an exception"); + } else if (hostsSniffer.emptyList.get() > emptyList) { + emptyList++; + assertEquals(lastHosts, restClient.getHosts()); + } else { + assertNotEquals(lastHosts, restClient.getHosts()); + List expectedHosts = CountingHostsSniffer.buildHosts(runs); + assertEquals(expectedHosts, restClient.getHosts()); + lastHosts = restClient.getHosts(); + } + } catch(IOException e) { + if (hostsSniffer.failures.get() > failures) { + failures++; + assertEquals("communication breakdown", e.getMessage()); + } + } + } + assertEquals(hostsSniffer.emptyList.get(), emptyList); + assertEquals(hostsSniffer.failures.get(), failures); + assertEquals(hostsSniffer.runs.get(), runs); + } + } + } + + /** + * Test multiple sniffing rounds by mocking the {@link Scheduler} as well as the {@link HostsSniffer}. + * Simulates the ordinary behaviour of {@link Sniffer} when sniffing on failure is not enabled. + * The {@link CountingHostsSniffer} doesn't make any network connection but may throw exception or return no hosts, which makes + * it possible to verify that errors are properly handled and don't affect subsequent runs and their scheduling. + * The {@link Scheduler} implementation submits rather than scheduling tasks, meaning that it doesn't respect the requested sniff + * delays while allowing to assert that the requested delays for each requested run and the following one are the expected values. + */ + public void testOrdinarySniffRounds() throws Exception { + final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE); + long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE); + RestClient restClient = mock(RestClient.class); + CountingHostsSniffer hostsSniffer = new CountingHostsSniffer(); + final int iters = randomIntBetween(30, 100); + final Set> futures = new CopyOnWriteArraySet<>(); + final CountDownLatch completionLatch = new CountDownLatch(1); + final AtomicInteger runs = new AtomicInteger(iters); + final ExecutorService executor = Executors.newSingleThreadExecutor(); + final AtomicReference> lastFuture = new AtomicReference<>(); + final AtomicReference lastTask = new AtomicReference<>(); + Scheduler scheduler = new Scheduler() { + @Override + public Future schedule(Sniffer.Task task, long delayMillis) { + assertEquals(sniffInterval, task.nextTaskDelay); + int numberOfRuns = runs.getAndDecrement(); + if (numberOfRuns == iters) { + //the first call is to schedule the first sniff round from the Sniffer constructor, with delay O + assertEquals(0L, delayMillis); + assertEquals(sniffInterval, task.nextTaskDelay); + } else { + //all of the subsequent times "schedule" is called with delay set to the configured sniff interval + assertEquals(sniffInterval, delayMillis); + assertEquals(sniffInterval, task.nextTaskDelay); + if (numberOfRuns == 0) { + completionLatch.countDown(); + return null; + } + } + //we submit rather than scheduling to make the test quick and not depend on time + Future future = executor.submit(task); + futures.add(future); + if (numberOfRuns == 1) { + lastFuture.set(future); + lastTask.set(task); + } + return future; + } + + @Override + public void shutdown() { + //the executor is closed externally, shutdown is tested separately + } + }; + try { + new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); + assertTrue("timeout waiting for sniffing rounds to be completed", completionLatch.await(1000, TimeUnit.MILLISECONDS)); + assertEquals(iters, futures.size()); + //the last future is the only one that may not be completed yet, as the count down happens + //while scheduling the next round which is still part of the execution of the runnable itself. + assertTrue(lastTask.get().hasStarted()); + lastFuture.get().get(); + for (Future future : futures) { + assertTrue(future.isDone()); + future.get(); + } + } finally { + executor.shutdown(); + assertTrue(executor.awaitTermination(1000, TimeUnit.MILLISECONDS)); + } + int totalRuns = hostsSniffer.runs.get(); + assertEquals(iters, totalRuns); + int setHostsRuns = totalRuns - hostsSniffer.failures.get() - hostsSniffer.emptyList.get(); + verify(restClient, times(setHostsRuns)).setHosts(Matchers.anyVararg()); + verifyNoMoreInteractions(restClient); + } + + /** + * Test that {@link Sniffer#close()} shuts down the underlying {@link Scheduler}, and that such calls are idempotent. + * Also verifies that the next scheduled round gets cancelled. + */ + public void testClose() { + final Future future = mock(Future.class); + long sniffInterval = randomLongBetween(1, Long.MAX_VALUE); + long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE); + RestClient restClient = mock(RestClient.class); + final AtomicInteger shutdown = new AtomicInteger(0); + final AtomicBoolean initialized = new AtomicBoolean(false); + Scheduler scheduler = new Scheduler() { + @Override + public Future schedule(Sniffer.Task task, long delayMillis) { + if (initialized.compareAndSet(false, true)) { + //run from the same thread so the sniffer gets for sure initialized and the scheduled task gets cancelled on close + task.run(); + } + return future; + } + + @Override + public void shutdown() { + shutdown.incrementAndGet(); + } + }; + + Sniffer sniffer = new Sniffer(restClient, new MockHostsSniffer(), scheduler, sniffInterval, sniffAfterFailureDelay); + assertEquals(0, shutdown.get()); + int iters = randomIntBetween(3, 10); + for (int i = 1; i <= iters; i++) { + sniffer.close(); + verify(future, times(i)).cancel(false); + assertEquals(i, shutdown.get()); + } + } + + public void testSniffOnFailureNotInitialized() { + RestClient restClient = mock(RestClient.class); + CountingHostsSniffer hostsSniffer = new CountingHostsSniffer(); + long sniffInterval = randomLongBetween(1, Long.MAX_VALUE); + long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE); + final AtomicInteger scheduleCalls = new AtomicInteger(0); + Scheduler scheduler = new Scheduler() { + @Override + public Future schedule(Sniffer.Task task, long delayMillis) { + scheduleCalls.incrementAndGet(); + return null; + } + + @Override + public void shutdown() { + } + }; + + Sniffer sniffer = new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); + for (int i = 0; i < 10; i++) { + sniffer.sniffOnFailure(); + } + assertEquals(1, scheduleCalls.get()); + int totalRuns = hostsSniffer.runs.get(); + assertEquals(0, totalRuns); + int setHostsRuns = totalRuns - hostsSniffer.failures.get() - hostsSniffer.emptyList.get(); + verify(restClient, times(setHostsRuns)).setHosts(Matchers.anyVararg()); + verifyNoMoreInteractions(restClient); + } + + /** + * Test behaviour when a bunch of onFailure sniffing rounds are triggered in parallel. Each run will always + * schedule a subsequent afterFailure round. Also, for each onFailure round that starts, the net scheduled round + * (either afterFailure or ordinary) gets cancelled. + */ + public void testSniffOnFailure() throws Exception { + RestClient restClient = mock(RestClient.class); + CountingHostsSniffer hostsSniffer = new CountingHostsSniffer(); + final AtomicBoolean initializing = new AtomicBoolean(true); + final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE); + final long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE); + int minNumOnFailureRounds = randomIntBetween(5, 10); + final CountDownLatch initializingLatch = new CountDownLatch(1); + final Set ordinaryRoundsTasks = new CopyOnWriteArraySet<>(); + final AtomicReference> initializingFuture = new AtomicReference<>(); + final Set onFailureTasks = new CopyOnWriteArraySet<>(); + final Set afterFailureTasks = new CopyOnWriteArraySet<>(); + final AtomicBoolean onFailureCompleted = new AtomicBoolean(false); + final CountDownLatch completionLatch = new CountDownLatch(1); + final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); + try { + Scheduler scheduler = new Scheduler() { + @Override + public Future schedule(final Sniffer.Task task, long delayMillis) { + if (initializing.compareAndSet(true, false)) { + assertEquals(0L, delayMillis); + Future future = executor.submit(new Runnable() { + @Override + public void run() { + try { + task.run(); + } finally { + //we need to make sure that the sniffer is initialized, so the sniffOnFailure + //call does what it needs to do. Otherwise nothing happens until initialized. + initializingLatch.countDown(); + } + } + }); + assertTrue(initializingFuture.compareAndSet(null, future)); + return future; + } + if (delayMillis == 0L) { + Future future = executor.submit(task); + onFailureTasks.add(new Sniffer.ScheduledTask(task, future)); + return future; + } + if (delayMillis == sniffAfterFailureDelay) { + Future future = scheduleOrSubmit(task); + afterFailureTasks.add(new Sniffer.ScheduledTask(task, future)); + return future; + } + + assertEquals(sniffInterval, delayMillis); + assertEquals(sniffInterval, task.nextTaskDelay); + + if (onFailureCompleted.get() && onFailureTasks.size() == afterFailureTasks.size()) { + completionLatch.countDown(); + return mock(Future.class); + } + + Future future = scheduleOrSubmit(task); + ordinaryRoundsTasks.add(new Sniffer.ScheduledTask(task, future)); + return future; + } + + private Future scheduleOrSubmit(Sniffer.Task task) { + if (randomBoolean()) { + return executor.schedule(task, randomLongBetween(0L, 200L), TimeUnit.MILLISECONDS); + } else { + return executor.submit(task); + } + } + + @Override + public void shutdown() { + } + }; + final Sniffer sniffer = new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); + assertTrue("timeout waiting for sniffer to get initialized", initializingLatch.await(1000, TimeUnit.MILLISECONDS)); + + ExecutorService onFailureExecutor = Executors.newFixedThreadPool(randomIntBetween(5, 20)); + Set> onFailureFutures = new CopyOnWriteArraySet<>(); + try { + //with tasks executing quickly one after each other, it is very likely that the onFailure round gets skipped + //as another round is already running. We retry till enough runs get through as that's what we want to test. + while (onFailureTasks.size() < minNumOnFailureRounds) { + onFailureFutures.add(onFailureExecutor.submit(new Runnable() { + @Override + public void run() { + sniffer.sniffOnFailure(); + } + })); + } + assertThat(onFailureFutures.size(), greaterThanOrEqualTo(minNumOnFailureRounds)); + for (Future onFailureFuture : onFailureFutures) { + assertNull(onFailureFuture.get()); + } + onFailureCompleted.set(true); + } finally { + onFailureExecutor.shutdown(); + onFailureExecutor.awaitTermination(1000, TimeUnit.MILLISECONDS); + } + + assertFalse(initializingFuture.get().isCancelled()); + assertTrue(initializingFuture.get().isDone()); + assertNull(initializingFuture.get().get()); + + assertTrue("timeout waiting for sniffing rounds to be completed", completionLatch.await(1000, TimeUnit.MILLISECONDS)); + assertThat(onFailureTasks.size(), greaterThanOrEqualTo(minNumOnFailureRounds)); + assertEquals(onFailureTasks.size(), afterFailureTasks.size()); + + for (Sniffer.ScheduledTask onFailureTask : onFailureTasks) { + assertFalse(onFailureTask.future.isCancelled()); + assertTrue(onFailureTask.future.isDone()); + assertNull(onFailureTask.future.get()); + assertTrue(onFailureTask.task.hasStarted()); + assertFalse(onFailureTask.task.isSkipped()); + } + + int cancelledTasks = 0; + int completedTasks = onFailureTasks.size() + 1; + for (Sniffer.ScheduledTask afterFailureTask : afterFailureTasks) { + if (assertTaskCancelledOrCompleted(afterFailureTask)) { + completedTasks++; + } else { + cancelledTasks++; + } + } + + assertThat(ordinaryRoundsTasks.size(), greaterThan(0)); + for (Sniffer.ScheduledTask task : ordinaryRoundsTasks) { + if (assertTaskCancelledOrCompleted(task)) { + completedTasks++; + } else { + cancelledTasks++; + } + } + assertEquals(onFailureTasks.size(), cancelledTasks); + + assertEquals(completedTasks, hostsSniffer.runs.get()); + int setHostsRuns = hostsSniffer.runs.get() - hostsSniffer.failures.get() - hostsSniffer.emptyList.get(); + verify(restClient, times(setHostsRuns)).setHosts(Matchers.anyVararg()); + verifyNoMoreInteractions(restClient); + } finally { + executor.shutdown(); + executor.awaitTermination(1000L, TimeUnit.MILLISECONDS); + } + } + + private static boolean assertTaskCancelledOrCompleted(Sniffer.ScheduledTask task) throws ExecutionException, InterruptedException { + if (task.task.isSkipped()) { + assertTrue(task.future.isCancelled()); + try { + task.future.get(); + fail("cancellation exception should have been thrown"); + } catch(CancellationException ignore) { + } + return false; + } else { + try { + assertNull(task.future.get()); + } catch(CancellationException ignore) { + assertTrue(task.future.isCancelled()); + } + assertTrue(task.future.isDone()); + assertTrue(task.task.hasStarted()); + return true; + } + } + + public void testTaskCancelling() throws Exception { + RestClient restClient = mock(RestClient.class); + HostsSniffer hostsSniffer = mock(HostsSniffer.class); + Scheduler noOpScheduler = new Scheduler() { + @Override + public Future schedule(Sniffer.Task task, long delayMillis) { + return null; + } + + @Override + public void shutdown() { + } + }; + Sniffer sniffer = new Sniffer(restClient, hostsSniffer, noOpScheduler, 0L, 0L); + ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); + try { + int numIters = randomIntBetween(50, 100); + for (int i = 0; i < numIters; i++) { + Sniffer.Task task = sniffer.new Task(0L); + TaskWrapper wrapper = new TaskWrapper(task); + Future future; + if (rarely()) { + future = executor.schedule(wrapper, randomLongBetween(0L, 200L), TimeUnit.MILLISECONDS); + } else { + future = executor.submit(wrapper); + } + Sniffer.ScheduledTask scheduledTask = new Sniffer.ScheduledTask(task, future); + boolean skip = scheduledTask.skip(); + try { + assertNull(future.get()); + } catch(CancellationException ignore) { + assertTrue(future.isCancelled()); + } + + if (skip) { + //the task was either cancelled before starting, in which case it will never start (thanks to Future#cancel), + //or skipped, in which case it will run but do nothing (thanks to Task#skip). + //Here we want to make sure that whenever skip returns true, the task either won't run or it won't do anything, + //otherwise we may end up with parallel sniffing tracks given that each task schedules the following one. We need to + // make sure that onFailure takes scheduling over while at the same time ordinary rounds don't go on. + assertFalse(task.hasStarted()); + assertTrue(task.isSkipped()); + assertTrue(future.isCancelled()); + assertTrue(future.isDone()); + } else { + //if a future is cancelled when its execution has already started, future#get throws CancellationException before + //completion. The execution continues though so we use a latch to try and wait for the task to be completed. + //Here we want to make sure that whenever skip returns false, the task will be completed, otherwise we may be + //missing to schedule the following round, which means no sniffing will ever happen again besides on failure sniffing. + assertTrue(wrapper.await()); + //the future may or may not be cancelled but the task has for sure started and completed + assertTrue(task.toString(), task.hasStarted()); + assertFalse(task.isSkipped()); + assertTrue(future.isDone()); + } + //subsequent cancel calls return false for sure + int cancelCalls = randomIntBetween(1, 10); + for (int j = 0; j < cancelCalls; j++) { + assertFalse(scheduledTask.skip()); + } + } + } finally { + executor.shutdown(); + executor.awaitTermination(1000, TimeUnit.MILLISECONDS); + } + } + + /** + * Wraps a {@link Sniffer.Task} and allows to wait for its completion. This is needed to verify + * that tasks are either never started or always completed. Calling {@link Future#get()} against a cancelled future will + * throw {@link CancellationException} straight-away but the execution of the task will continue if it had already started, + * in which case {@link Future#cancel(boolean)} returns true which is not very helpful. + */ + private static final class TaskWrapper implements Runnable { + final Sniffer.Task task; + final CountDownLatch completionLatch = new CountDownLatch(1); + + TaskWrapper(Sniffer.Task task) { + this.task = task; + } + + @Override + public void run() { + try { + task.run(); + } finally { + completionLatch.countDown(); + } + } + + boolean await() throws InterruptedException { + return completionLatch.await(1000, TimeUnit.MILLISECONDS); + } + } + + /** + * Mock {@link HostsSniffer} implementation used for testing, which most of the times return a fixed host. + * It rarely throws exception or return an empty list of hosts, to make sure that such situations are properly handled. + * It also asserts that it never gets called concurrently, based on the assumption that only one sniff run can be run + * at a given point in time. + */ + private static class CountingHostsSniffer implements HostsSniffer { + private final AtomicInteger runs = new AtomicInteger(0); + private final AtomicInteger failures = new AtomicInteger(0); + private final AtomicInteger emptyList = new AtomicInteger(0); + + @Override + public List sniffHosts() throws IOException { + int run = runs.incrementAndGet(); + if (rarely()) { + failures.incrementAndGet(); + //check that if communication breaks, sniffer keeps on working + throw new IOException("communication breakdown"); + } + if (rarely()) { + emptyList.incrementAndGet(); + return Collections.emptyList(); + } + return buildHosts(run); + } + + private static List buildHosts(int run) { + int size = run % 5 + 1; + assert size > 0; + List hosts = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + hosts.add(new HttpHost("sniffed-" + run, 9200 + i)); + } + return hosts; + } + } + + @SuppressWarnings("unchecked") + public void testDefaultSchedulerSchedule() { + RestClient restClient = mock(RestClient.class); + HostsSniffer hostsSniffer = mock(HostsSniffer.class); + Scheduler noOpScheduler = new Scheduler() { + @Override + public Future schedule(Sniffer.Task task, long delayMillis) { + return mock(Future.class); + } + + @Override + public void shutdown() { + + } + }; + Sniffer sniffer = new Sniffer(restClient, hostsSniffer, noOpScheduler, 0L, 0L); + Sniffer.Task task = sniffer.new Task(randomLongBetween(1, Long.MAX_VALUE)); + + ScheduledExecutorService scheduledExecutorService = mock(ScheduledExecutorService.class); + final ScheduledFuture mockedFuture = mock(ScheduledFuture.class); + when(scheduledExecutorService.schedule(any(Runnable.class), any(Long.class), any(TimeUnit.class))) + .then(new Answer>() { + @Override + public ScheduledFuture answer(InvocationOnMock invocationOnMock) { + return mockedFuture; + } + }); + DefaultScheduler scheduler = new DefaultScheduler(scheduledExecutorService); + long delay = randomLongBetween(1, Long.MAX_VALUE); + Future future = scheduler.schedule(task, delay); + assertSame(mockedFuture, future); + verify(scheduledExecutorService).schedule(task, delay, TimeUnit.MILLISECONDS); + verifyNoMoreInteractions(scheduledExecutorService, mockedFuture); + } + + public void testDefaultSchedulerThreadFactory() { + DefaultScheduler defaultScheduler = new DefaultScheduler(); + try { + ScheduledExecutorService executorService = defaultScheduler.executor; + assertThat(executorService, instanceOf(ScheduledThreadPoolExecutor.class)); + assertThat(executorService, instanceOf(ScheduledThreadPoolExecutor.class)); + ScheduledThreadPoolExecutor executor = (ScheduledThreadPoolExecutor) executorService; + assertTrue(executor.getRemoveOnCancelPolicy()); + assertFalse(executor.getContinueExistingPeriodicTasksAfterShutdownPolicy()); + assertTrue(executor.getExecuteExistingDelayedTasksAfterShutdownPolicy()); + assertThat(executor.getThreadFactory(), instanceOf(Sniffer.SnifferThreadFactory.class)); + int iters = randomIntBetween(3, 10); + for (int i = 1; i <= iters; i++) { + Thread thread = executor.getThreadFactory().newThread(new Runnable() { + @Override + public void run() { + + } + }); + assertThat(thread.getName(), equalTo("es_rest_client_sniffer[T#" + i + "]")); + assertThat(thread.isDaemon(), is(true)); + } + } finally { + defaultScheduler.shutdown(); + } + } + + public void testDefaultSchedulerShutdown() throws Exception { + ScheduledThreadPoolExecutor executor = mock(ScheduledThreadPoolExecutor.class); + DefaultScheduler defaultScheduler = new DefaultScheduler(executor); + defaultScheduler.shutdown(); + verify(executor).shutdown(); + verify(executor).awaitTermination(1000, TimeUnit.MILLISECONDS); + verify(executor).shutdownNow(); + verifyNoMoreInteractions(executor); + + when(executor.awaitTermination(1000, TimeUnit.MILLISECONDS)).thenReturn(true); + defaultScheduler.shutdown(); + verify(executor, times(2)).shutdown(); + verify(executor, times(2)).awaitTermination(1000, TimeUnit.MILLISECONDS); + verifyNoMoreInteractions(executor); + } +} diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle index ae4e6a431c977..c1097b68b898f 100644 --- a/distribution/archives/build.gradle +++ b/distribution/archives/build.gradle @@ -106,15 +106,23 @@ tasks.withType(AbstractArchiveTask) { baseName = "elasticsearch${ subdir.contains('oss') ? '-oss' : ''}" } +Closure commonZipConfig = { + dirMode 0755 + fileMode 0644 +} + task buildIntegTestZip(type: Zip) { + configure(commonZipConfig) with archiveFiles(transportModulesFiles, 'zip', false) } task buildZip(type: Zip) { + configure(commonZipConfig) with archiveFiles(modulesFiles(false), 'zip', false) } task buildOssZip(type: Zip) { + configure(commonZipConfig) with archiveFiles(modulesFiles(true), 'zip', true) } diff --git a/docs/README.asciidoc b/docs/README.asciidoc index 2963359d44c28..766aeae0c5d94 100644 --- a/docs/README.asciidoc +++ b/docs/README.asciidoc @@ -6,7 +6,9 @@ See: https://github.com/elastic/docs Snippets marked with `// CONSOLE` are automatically annotated with "VIEW IN CONSOLE" and "COPY AS CURL" in the documentation and are automatically tested by the command `gradle :docs:check`. To test just the docs from a single page, -use e.g. `gradle :docs:check -Dtests.method="*rollover*"`. +use e.g. `gradle :docs:check -Dtests.method="\*rollover*"`. + +NOTE: If you have an elasticsearch-extra folder alongside your elasticsearch folder, you must temporarily rename it when you are testing 6.3 or later branches. By default each `// CONSOLE` snippet runs as its own isolated test. You can manipulate the test execution in the following ways: diff --git a/docs/java-rest/high-level/indices/flush_synced.asciidoc b/docs/java-rest/high-level/indices/flush_synced.asciidoc new file mode 100644 index 0000000000000..65afaa533a640 --- /dev/null +++ b/docs/java-rest/high-level/indices/flush_synced.asciidoc @@ -0,0 +1,91 @@ +[[java-rest-high-flush-synced]] +=== Flush Synced API + +[[java-rest-high-flush-synced-request]] +==== Flush Synced Request + +A `SyncedFlushRequest` can be applied to one or more indices, or even on `_all` the indices: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[flush-synced-request] +-------------------------------------------------- +<1> Flush synced one index +<2> Flush synced multiple indices +<3> Flush synced all the indices + +==== Optional arguments + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[flush-synced-request-indicesOptions] +-------------------------------------------------- +<1> Setting `IndicesOptions` controls how unavailable indices are resolved and +how wildcard expressions are expanded + +[[java-rest-high-flush-synced-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[flush-synced-execute] +-------------------------------------------------- + +[[java-rest-high-flush-synced-async]] +==== Asynchronous Execution + +The asynchronous execution of a flush request requires both the `SyncedFlushRequest` +instance and an `ActionListener` instance to be passed to the asynchronous +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[flush-synced-execute-async] +-------------------------------------------------- +<1> The `SyncedFlushRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `SyncedFlushResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[flush-synced-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-flush-synced-response]] +==== Flush Synced Response + +The returned `SyncedFlushResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[flush-synced-response] +-------------------------------------------------- +<1> Total number of shards hit by the flush request +<2> Number of shards where the flush has succeeded +<3> Number of shards where the flush has failed +<4> Name of the index whose results we are about to calculate. +<5> Total number of shards for index mentioned in 4. +<6> Successful shards for index mentioned in 4. +<7> Failed shards for index mentioned in 4. +<8> One of the failed shard ids of the failed index mentioned in 4. +<9> Reason for failure of copies of the shard mentioned in 8. +<10> JSON represented by a Map. Contains shard related information like id, state, version etc. +for the failed shard copies. If the entire shard failed then this returns an empty map. + +By default, if the indices were not found, an `ElasticsearchException` will be thrown: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[flush-synced-notfound] +-------------------------------------------------- +<1> Do something if the indices to be flushed were not found diff --git a/docs/java-rest/high-level/snapshot/verify_repository.asciidoc b/docs/java-rest/high-level/snapshot/verify_repository.asciidoc new file mode 100644 index 0000000000000..4f03d1e5fe382 --- /dev/null +++ b/docs/java-rest/high-level/snapshot/verify_repository.asciidoc @@ -0,0 +1,81 @@ +[[java-rest-high-snapshot-verify-repository]] +=== Snapshot Verify Repository API + +The Snapshot Verify Repository API allows to verify a registered repository. + +[[java-rest-high-snapshot-verify-repository-request]] +==== Snapshot Verify Repository Request + +A `VerifyRepositoryRequest`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[verify-repository-request] +-------------------------------------------------- + +==== Optional Arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the all the nodes to acknowledge the settings were applied +as a `TimeValue` +<2> Timeout to wait for the all the nodes to acknowledge the settings were applied +as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[verify-repository-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +[[java-rest-high-snapshot-verify-repository-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[verify-repository-execute] +-------------------------------------------------- + +[[java-rest-high-snapshot-verify-repository-async]] +==== Asynchronous Execution + +The asynchronous execution of a snapshot verify repository requires both the +`VerifyRepositoryRequest` instance and an `ActionListener` instance to be +passed to the asynchronous method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[verify-repository-execute-async] +-------------------------------------------------- +<1> The `VerifyRepositoryRequest` to execute and the `ActionListener` +to use when the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `VerifyRepositoryResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[verify-repository-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of a failure. The raised exception is provided as an argument + +[[java-rest-high-cluster-verify-repository-response]] +==== Snapshot Verify Repository Response + +The returned `VerifyRepositoryResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[verify-repository-response] +-------------------------------------------------- \ No newline at end of file diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index b04cbb8df79b7..ff1e03afe3ec3 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -67,6 +67,7 @@ Index Management:: * <> * <> * <> +* <> * <> * <> * <> @@ -89,6 +90,7 @@ include::indices/shrink_index.asciidoc[] include::indices/split_index.asciidoc[] include::indices/refresh.asciidoc[] include::indices/flush.asciidoc[] +include::indices/flush_synced.asciidoc[] include::indices/clear_cache.asciidoc[] include::indices/force_merge.asciidoc[] include::indices/rollover.asciidoc[] @@ -104,11 +106,9 @@ include::indices/put_template.asciidoc[] The Java High Level REST Client supports the following Cluster APIs: * <> -* <> * <> include::cluster/put_settings.asciidoc[] -include::cluster/list_tasks.asciidoc[] include::cluster/put_pipeline.asciidoc[] == Snapshot APIs @@ -118,7 +118,17 @@ The Java High Level REST Client supports the following Snapshot APIs: * <> * <> * <> +* <> include::snapshot/get_repository.asciidoc[] include::snapshot/create_repository.asciidoc[] include::snapshot/delete_repository.asciidoc[] +include::snapshot/verify_repository.asciidoc[] + +== Tasks APIs + +The Java High Level REST Client supports the following Tasks APIs: + +* <> + +include::tasks/list_tasks.asciidoc[] diff --git a/docs/java-rest/high-level/cluster/list_tasks.asciidoc b/docs/java-rest/high-level/tasks/list_tasks.asciidoc similarity index 79% rename from docs/java-rest/high-level/cluster/list_tasks.asciidoc rename to docs/java-rest/high-level/tasks/list_tasks.asciidoc index 1a2117b2e66e6..e60ca61247e74 100644 --- a/docs/java-rest/high-level/cluster/list_tasks.asciidoc +++ b/docs/java-rest/high-level/tasks/list_tasks.asciidoc @@ -1,4 +1,4 @@ -[[java-rest-high-cluster-list-tasks]] +[[java-rest-high-tasks-list]] === List Tasks API The List Tasks API allows to get information about the tasks currently executing in the cluster. @@ -10,7 +10,7 @@ A `ListTasksRequest`: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-request] +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-request] -------------------------------------------------- There is no required parameters. By default the client will list all tasks and will not wait for task completion. @@ -19,7 +19,7 @@ for task completion. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-request-filter] +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-request-filter] -------------------------------------------------- <1> Request only cluster-related tasks <2> Request all tasks running on nodes nodeId1 and nodeId2 @@ -27,13 +27,13 @@ include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-request ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-request-detailed] +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-request-detailed] -------------------------------------------------- <1> Should the information include detailed, potentially slow to generate data. Defaults to `false` ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-request-wait-completion] +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-request-wait-completion] -------------------------------------------------- <1> Should this request wait for all found tasks to complete. Defaults to `false` <2> Timeout for the request as a `TimeValue`. Applicable only if `setWaitForCompletion` is `true`. @@ -45,7 +45,7 @@ Defaults to 30 seconds ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-execute] +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-execute] -------------------------------------------------- [[java-rest-high-cluster-list-tasks-async]] @@ -57,7 +57,7 @@ passed to the asynchronous method: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-execute-async] +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-execute-async] -------------------------------------------------- <1> The `ListTasksRequest` to execute and the `ActionListener` to use when the execution completes @@ -71,7 +71,7 @@ A typical listener for `ListTasksResponse` looks like: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-execute-listener] +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-execute-listener] -------------------------------------------------- <1> Called when the execution is successfully completed. The response is provided as an argument @@ -82,20 +82,20 @@ provided as an argument ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-response-tasks] +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-response-tasks] -------------------------------------------------- <1> List of currently running tasks ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-response-calc] +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-response-calc] -------------------------------------------------- <1> List of tasks grouped by a node <2> List of tasks grouped by a parent task ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-response-failures] +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-response-failures] -------------------------------------------------- <1> List of node failures <2> List of tasks failures diff --git a/docs/painless/painless-debugging.asciidoc b/docs/painless/painless-debugging.asciidoc index a909593ff1745..8523116616d18 100644 --- a/docs/painless/painless-debugging.asciidoc +++ b/docs/painless/painless-debugging.asciidoc @@ -48,7 +48,7 @@ Which shows that the class of `doc.first` is "java_class": "org.elasticsearch.index.fielddata.ScriptDocValues$Longs", ... }, - "status": 500 + "status": 400 } --------------------------------------------------------- // TESTRESPONSE[s/\.\.\./"script_stack": $body.error.script_stack, "script": $body.error.script, "lang": $body.error.lang, "caused_by": $body.error.caused_by, "root_cause": $body.error.root_cause, "reason": $body.error.reason/] diff --git a/docs/reference/aggregations/bucket/composite-aggregation.asciidoc b/docs/reference/aggregations/bucket/composite-aggregation.asciidoc index 688cf20c5320f..019094cfa3fe2 100644 --- a/docs/reference/aggregations/bucket/composite-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/composite-aggregation.asciidoc @@ -348,6 +348,34 @@ GET /_search \... will sort the composite bucket in descending order when comparing values from the `date_histogram` source and in ascending order when comparing values from the `terms` source. +====== Missing bucket + +By default documents without a value for a given source are ignored. +It is possible to include them in the response by setting `missing_bucket` to +`true` (defaults to `false`): + +[source,js] +-------------------------------------------------- +GET /_search +{ + "aggs" : { + "my_buckets": { + "composite" : { + "sources" : [ + { "product_name": { "terms" : { "field": "product", "missing_bucket": true } } } + ] + } + } + } +} +-------------------------------------------------- +// CONSOLE + +In the example above the source `product_name` will emit an explicit `null` value +for documents without a value for the field `product`. +The `order` specified in the source dictates whether the `null` values should rank +first (ascending order, `asc`) or last (descending order, `desc`). + ==== Size The `size` parameter can be set to define how many composite buckets should be returned. diff --git a/docs/reference/aggregations/metrics/min-aggregation.asciidoc b/docs/reference/aggregations/metrics/min-aggregation.asciidoc index 9eaa8a7372688..4225beff3f777 100644 --- a/docs/reference/aggregations/metrics/min-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/min-aggregation.asciidoc @@ -46,7 +46,7 @@ response. ==== Script -The `min` aggregation can also calculate the maximum of a script. The example +The `min` aggregation can also calculate the minimum of a script. The example below computes the minimum price: [source,js] diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 59f8d77a0c086..8567ed63b3418 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -26,6 +26,7 @@ include::{xes-repo-dir}/settings/configuring-xes.asciidoc[] include::{xes-repo-dir}/setup/bootstrap-checks-xes.asciidoc[] +:edit_url: include::upgrade.asciidoc[] include::migration/index.asciidoc[] @@ -66,6 +67,7 @@ include::{xes-repo-dir}/rest-api/index.asciidoc[] include::{xes-repo-dir}/commands/index.asciidoc[] +:edit_url: include::how-to.asciidoc[] include::testing.asciidoc[] diff --git a/docs/reference/mapping/types/text.asciidoc b/docs/reference/mapping/types/text.asciidoc index 069e50fc79ac7..988a2ada38d7e 100644 --- a/docs/reference/mapping/types/text.asciidoc +++ b/docs/reference/mapping/types/text.asciidoc @@ -89,7 +89,7 @@ The following parameters are accepted by `text` fields: What information should be stored in the index, for search and highlighting purposes. Defaults to `positions`. -<>:: +<>:: If enabled, term prefixes of between 2 and 5 characters are indexed into a separate field. This allows prefix searches to run more efficiently, at @@ -138,7 +138,7 @@ The following parameters are accepted by `text` fields: [[index-prefix-config]] ==== Index Prefix configuration -Text fields may also index term prefixes to speed up prefix searches. The `index_prefix` +Text fields may also index term prefixes to speed up prefix searches. The `index_prefixes` parameter is configured as below. Either or both of `min_chars` and `max_chars` may be excluded. Both values are treated as inclusive @@ -151,7 +151,7 @@ PUT my_index "properties": { "full_name": { "type": "text", - "index_prefix" : { + "index_prefixes" : { "min_chars" : 1, <1> "max_chars" : 10 <2> } diff --git a/docs/reference/migration/migrate_7_0/aggregations.asciidoc b/docs/reference/migration/migrate_7_0/aggregations.asciidoc index 5241ba4ccc76c..2f947cbe2bfcf 100644 --- a/docs/reference/migration/migrate_7_0/aggregations.asciidoc +++ b/docs/reference/migration/migrate_7_0/aggregations.asciidoc @@ -9,4 +9,9 @@ These `execution_hint` are removed and should be replaced by `global_ordinals`. The dynamic cluster setting named `search.max_buckets` now defaults to 10,000 (instead of unlimited in the previous version). -Requests that try to return more than the limit will fail with an exception. \ No newline at end of file +Requests that try to return more than the limit will fail with an exception. + +==== `missing` option of the `composite` aggregation has been removed + +The `missing` option of the `composite` aggregation, deprecated in 6.x, +has been removed. `missing_bucket` should be used instead. \ No newline at end of file diff --git a/docs/reference/migration/migrate_7_0/scripting.asciidoc b/docs/reference/migration/migrate_7_0/scripting.asciidoc index df43aaa92eadf..79380f84204ed 100644 --- a/docs/reference/migration/migrate_7_0/scripting.asciidoc +++ b/docs/reference/migration/migrate_7_0/scripting.asciidoc @@ -11,3 +11,9 @@ the getter methods for date objects were deprecated. These methods have now been removed. Instead, use `.value` on `date` fields, or explicitly parse `long` fields into a date object using `Instance.ofEpochMillis(doc["myfield"].value)`. + +==== Script errors will return as `400` error codes + +Malformed scripts, either in search templates, ingest pipelines or search +requests, return `400 - Bad request` while they would previously return +`500 - Internal Server Error`. This also applies for stored scripts. diff --git a/docs/reference/migration/migrate_7_0/search.asciidoc b/docs/reference/migration/migrate_7_0/search.asciidoc index 1fe0bc62418bb..7505b6f14d1b4 100644 --- a/docs/reference/migration/migrate_7_0/search.asciidoc +++ b/docs/reference/migration/migrate_7_0/search.asciidoc @@ -43,7 +43,7 @@ The Search API returns `400 - Bad request` while it would previously return * the number of slices is too large * keep alive for scroll is too large * number of filters in the adjacency matrix aggregation is too large - +* script compilation errors ==== Scroll queries cannot use the `request_cache` anymore diff --git a/docs/reference/search/suggesters/context-suggest.asciidoc b/docs/reference/search/suggesters/context-suggest.asciidoc index 9226c29b9ad6e..2b522062ec06c 100644 --- a/docs/reference/search/suggesters/context-suggest.asciidoc +++ b/docs/reference/search/suggesters/context-suggest.asciidoc @@ -84,6 +84,10 @@ PUT place_path_category NOTE: Adding context mappings increases the index size for completion field. The completion index is entirely heap resident, you can monitor the completion field index size using <>. +NOTE: deprecated[7.0.0, Indexing a suggestion without context on a context enabled completion field is deprecated +and will be removed in the next major release. If you want to index a suggestion that matches all contexts you should +add a special context for it.] + [[suggester-context-category]] [float] ==== Category Context @@ -156,9 +160,9 @@ POST place/_search?pretty // CONSOLE // TEST[continued] -NOTE: When no categories are provided at query-time, all indexed documents are considered. -Querying with no categories on a category enabled completion field should be avoided, as it -will degrade search performance. +Note: deprecated[7.0.0, When no categories are provided at query-time, all indexed documents are considered. +Querying with no categories on a category enabled completion field is deprecated and will be removed in the next major release +as it degrades search performance considerably.] Suggestions with certain categories can be boosted higher than others. The following filters suggestions by categories and additionally boosts diff --git a/docs/reference/upgrade/disable-shard-alloc.asciidoc b/docs/reference/upgrade/disable-shard-alloc.asciidoc index 107d20f1135ce..abd40336e9b08 100644 --- a/docs/reference/upgrade/disable-shard-alloc.asciidoc +++ b/docs/reference/upgrade/disable-shard-alloc.asciidoc @@ -1,8 +1,10 @@ -When you shut down a node, the allocation process waits for one minute -before starting to replicate the shards on that node to other nodes -in the cluster, causing a lot of wasted I/O. You can avoid racing the clock -by disabling allocation before shutting down the node: +When you shut down a node, the allocation process waits for +`index.unassigned.node_left.delayed_timeout` (by default, one minute) before +starting to replicate the shards on that node to other nodes in the cluster, +which can involve a lot of I/O. Since the node is shortly going to be +restarted, this I/O is unnecessary. You can avoid racing the clock by disabling +allocation before shutting down the node: [source,js] -------------------------------------------------- diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index 02a4197fba94a..69c8afb3e2fc6 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -193,6 +193,7 @@ public Map> getTokenizers() { tokenizers.put("pattern", PatternTokenizerFactory::new); tokenizers.put("uax_url_email", UAX29URLEmailTokenizerFactory::new); tokenizers.put("whitespace", WhitespaceTokenizerFactory::new); + tokenizers.put("keyword", KeywordTokenizerFactory::new); return tokenizers; } diff --git a/server/src/main/java/org/elasticsearch/index/analysis/KeywordTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/KeywordTokenizerFactory.java similarity index 89% rename from server/src/main/java/org/elasticsearch/index/analysis/KeywordTokenizerFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/KeywordTokenizerFactory.java index 1d94cad150785..abe88462cb996 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/KeywordTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/KeywordTokenizerFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.KeywordTokenizer; @@ -30,7 +30,7 @@ public class KeywordTokenizerFactory extends AbstractTokenizerFactory { private final int bufferSize; - public KeywordTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + KeywordTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); bufferSize = settings.getAsInt("buffer_size", 256); } diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java index 7deadcbcc25f6..5084306587847 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java @@ -24,7 +24,6 @@ import org.apache.lucene.analysis.miscellaneous.LimitTokenCountFilterFactory; import org.apache.lucene.analysis.reverse.ReverseStringFilterFactory; import org.apache.lucene.analysis.snowball.SnowballPorterFilterFactory; -import org.elasticsearch.index.analysis.KeywordTokenizerFactory; import org.elasticsearch.index.analysis.SoraniNormalizationFilterFactory; import org.elasticsearch.index.analysis.SynonymTokenFilterFactory; import org.elasticsearch.indices.analysis.AnalysisFactoryTestCase; @@ -56,6 +55,7 @@ protected Map> getTokenizers() { tokenizers.put("pattern", PatternTokenizerFactory.class); tokenizers.put("uax29urlemail", UAX29URLEmailTokenizerFactory.class); tokenizers.put("whitespace", WhitespaceTokenizerFactory.class); + tokenizers.put("keyword", KeywordTokenizerFactory.class); return tokenizers; } diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml index cffd4496f1fb7..9a7c158fc4734 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml @@ -5,9 +5,22 @@ indices.analyze: body: text: Foo Bar! + explain: true tokenizer: keyword - - length: { tokens: 1 } - - match: { tokens.0.token: Foo Bar! } + - length: { detail.tokenizer.tokens: 1 } + - match: { detail.tokenizer.name: keyword } + - match: { detail.tokenizer.tokens.0.token: Foo Bar! } + + - do: + indices.analyze: + body: + text: Foo Bar! + explain: true + tokenizer: + type: keyword + - length: { detail.tokenizer.tokens: 1 } + - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.tokens.0.token: Foo Bar! } --- "nGram": diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml index 1737d743a6d1c..936736e93de93 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml @@ -97,3 +97,19 @@ - length: { tokens: 2 } - match: { tokens.0.token: sha } - match: { tokens.1.token: hay } + +--- +"Custom normalizer in request": + - do: + indices.analyze: + body: + text: ABc + explain: true + filter: ["lowercase"] + + - length: { detail.tokenizer.tokens: 1 } + - length: { detail.tokenfilters.0.tokens: 1 } + - match: { detail.tokenizer.name: keyword_for_normalizer } + - match: { detail.tokenizer.tokens.0.token: ABc } + - match: { detail.tokenfilters.0.name: lowercase } + - match: { detail.tokenfilters.0.tokens.0.token: abc } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java index c68f498c0eaf1..5204a07b1c969 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java @@ -52,8 +52,7 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestStatus.OK; -public class GrokProcessorGetAction extends Action { +public class GrokProcessorGetAction extends Action { public static final GrokProcessorGetAction INSTANCE = new GrokProcessorGetAction(); public static final String NAME = "cluster:admin/ingest/processor/grok/get"; @@ -62,11 +61,6 @@ private GrokProcessorGetAction() { super(NAME); } - @Override - public RequestBuilder newRequestBuilder(ElasticsearchClient client) { - return new RequestBuilder(client); - } - @Override public Response newResponse() { return new Response(null); @@ -79,7 +73,7 @@ public ActionRequestValidationException validate() { } } - public static class RequestBuilder extends ActionRequestBuilder { + public static class RequestBuilder extends ActionRequestBuilder { public RequestBuilder(ElasticsearchClient client) { super(client, GrokProcessorGetAction.INSTANCE, new Request()); } diff --git a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-1cbadda4d3.jar.sha1 new file mode 100644 index 0000000000000..3bbaa2ba0a715 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-1cbadda4d3.jar.sha1 @@ -0,0 +1 @@ +98c920972b2f5e8563540e805d87e6a3bc888972 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-cc2ee23050.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-cc2ee23050.jar.sha1 deleted file mode 100644 index 8222106897b18..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-cc2ee23050.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1e28b448387ec05d655f8c81ee54e13ff2975a4d \ No newline at end of file diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateAction.java index 99eb9709f7333..21b9a11e1f214 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateAction.java @@ -20,10 +20,8 @@ package org.elasticsearch.script.mustache; import org.elasticsearch.action.Action; -import org.elasticsearch.client.ElasticsearchClient; -public class MultiSearchTemplateAction - extends Action { +public class MultiSearchTemplateAction extends Action { public static final MultiSearchTemplateAction INSTANCE = new MultiSearchTemplateAction(); public static final String NAME = "indices:data/read/msearch/template"; @@ -36,9 +34,4 @@ private MultiSearchTemplateAction() { public MultiSearchTemplateResponse newResponse() { return new MultiSearchTemplateResponse(); } - - @Override - public MultiSearchTemplateRequestBuilder newRequestBuilder(ElasticsearchClient client) { - return new MultiSearchTemplateRequestBuilder(client, this); - } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestBuilder.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestBuilder.java index 4ef6c593d9a41..c4dac0dd88eb8 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestBuilder.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestBuilder.java @@ -24,7 +24,7 @@ import org.elasticsearch.client.ElasticsearchClient; public class MultiSearchTemplateRequestBuilder - extends ActionRequestBuilder { + extends ActionRequestBuilder { protected MultiSearchTemplateRequestBuilder(ElasticsearchClient client, MultiSearchTemplateAction action) { super(client, action, new MultiSearchTemplateRequest()); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java index 37d72ac4c0d04..b5bc86679aed4 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java @@ -20,12 +20,14 @@ package org.elasticsearch.script.mustache; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -107,12 +109,14 @@ public Exception getFailure() { } private Item[] items; - + private long tookInMillis; + MultiSearchTemplateResponse() { } - public MultiSearchTemplateResponse(Item[] items) { + public MultiSearchTemplateResponse(Item[] items, long tookInMillis) { this.items = items; + this.tookInMillis = tookInMillis; } @Override @@ -126,6 +130,13 @@ public Iterator iterator() { public Item[] getResponses() { return this.items; } + + /** + * How long the msearch_template took. + */ + public TimeValue getTook() { + return new TimeValue(tookInMillis); + } @Override public void readFrom(StreamInput in) throws IOException { @@ -134,6 +145,9 @@ public void readFrom(StreamInput in) throws IOException { for (int i = 0; i < items.length; i++) { items[i] = Item.readItem(in); } + if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + tookInMillis = in.readVLong(); + } } @Override @@ -143,11 +157,15 @@ public void writeTo(StreamOutput out) throws IOException { for (Item item : items) { item.writeTo(out); } + if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + out.writeVLong(tookInMillis); + } } @Override public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(); + builder.field("took", tookInMillis); builder.startArray(Fields.RESPONSES); for (Item item : items) { if (item.isFailure()) { diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngine.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngine.java index 5a0b2e15460c5..b739fc1cfd087 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngine.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngine.java @@ -19,9 +19,9 @@ package org.elasticsearch.script.mustache; import com.github.mustachejava.Mustache; +import com.github.mustachejava.MustacheException; import com.github.mustachejava.MustacheFactory; -import java.io.StringReader; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; @@ -31,12 +31,15 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.script.ScriptException; import org.elasticsearch.script.TemplateScript; import java.io.Reader; +import java.io.StringReader; import java.io.StringWriter; import java.security.AccessController; import java.security.PrivilegedAction; +import java.util.Collections; import java.util.Map; /** @@ -66,9 +69,14 @@ public T compile(String templateName, String templateSource, ScriptContext new MustacheExecutableScript(template, params); - return context.factoryClazz.cast(compiled); + try { + Mustache template = factory.compile(reader, "query-template"); + TemplateScript.Factory compiled = params -> new MustacheExecutableScript(template, params); + return context.factoryClazz.cast(compiled); + } catch (MustacheException ex) { + throw new ScriptException(ex.getMessage(), ex, Collections.emptyList(), templateSource, NAME); + } + } private CustomMustacheFactory createMustacheFactory(Map options) { diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateAction.java index 2982fbd70c645..1246e8e8e9bf2 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateAction.java @@ -20,9 +20,8 @@ package org.elasticsearch.script.mustache; import org.elasticsearch.action.Action; -import org.elasticsearch.client.ElasticsearchClient; -public class SearchTemplateAction extends Action { +public class SearchTemplateAction extends Action { public static final SearchTemplateAction INSTANCE = new SearchTemplateAction(); public static final String NAME = "indices:data/read/search/template"; @@ -31,11 +30,6 @@ private SearchTemplateAction() { super(NAME); } - @Override - public SearchTemplateRequestBuilder newRequestBuilder(ElasticsearchClient client) { - return new SearchTemplateRequestBuilder(client, this); - } - @Override public SearchTemplateResponse newResponse() { return new SearchTemplateResponse(); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequestBuilder.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequestBuilder.java index 02d27ac79fc3d..054acbc655768 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequestBuilder.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequestBuilder.java @@ -27,7 +27,7 @@ import java.util.Map; public class SearchTemplateRequestBuilder - extends ActionRequestBuilder { + extends ActionRequestBuilder { SearchTemplateRequestBuilder(ElasticsearchClient client, SearchTemplateAction action) { super(client, action, new SearchTemplateRequest()); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java index 167190d8f5d04..15cfc626fba3b 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java @@ -94,7 +94,7 @@ protected void doExecute(MultiSearchTemplateRequest request, ActionListener compile("{{#toJson}}{{foo}}{{bar}}{{/toJson}}")); + final String script = "{{#toJson}}{{foo}}{{bar}}{{/toJson}}"; + ScriptException e = expectThrows(ScriptException.class, () -> compile(script)); assertThat(e.getMessage(), containsString("Mustache function [toJson] must contain one and only one identifier")); + assertEquals(MustacheScriptEngine.NAME, e.getLang()); + assertEquals(script, e.getScript()); - e = expectThrows(MustacheException.class, () -> compile("{{#toJson}}{{/toJson}}")); + final String script2 = "{{#toJson}}{{/toJson}}"; + e = expectThrows(ScriptException.class, () -> compile(script2)); assertThat(e.getMessage(), containsString("Mustache function [toJson] must contain one and only one identifier")); + assertEquals(MustacheScriptEngine.NAME, e.getLang()); + assertEquals(script2, e.getScript()); } public void testEmbeddedToJSON() throws Exception { @@ -312,11 +318,17 @@ public void testJoinWithToJson() { } public void testsUnsupportedTagsJoin() { - MustacheException e = expectThrows(MustacheException.class, () -> compile("{{#join}}{{/join}}")); + final String script = "{{#join}}{{/join}}"; + ScriptException e = expectThrows(ScriptException.class, () -> compile(script)); assertThat(e.getMessage(), containsString("Mustache function [join] must contain one and only one identifier")); + assertEquals(MustacheScriptEngine.NAME, e.getLang()); + assertEquals(script, e.getScript()); - e = expectThrows(MustacheException.class, () -> compile("{{#join delimiter='a'}}{{/join delimiter='b'}}")); + final String script2 = "{{#join delimiter='a'}}{{/join delimiter='b'}}"; + e = expectThrows(ScriptException.class, () -> compile(script2)); assertThat(e.getMessage(), containsString("Mismatched start/end tags")); + assertEquals(MustacheScriptEngine.NAME, e.getLang()); + assertEquals(script2, e.getScript()); } public void testJoinWithCustomDelimiter() { diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateIT.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateIT.java index fe2fedf62b559..884e26e7df855 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateIT.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateIT.java @@ -198,6 +198,7 @@ public void testIndexedTemplateClient() throws Exception { getResponse = client().admin().cluster().prepareGetStoredScript("testTemplate").get(); assertNull(getResponse.getSource()); + assertWarnings("the template context is now deprecated. Specify templates in a \"script\" element."); } public void testIndexedTemplate() throws Exception { @@ -267,6 +268,7 @@ public void testIndexedTemplate() throws Exception { .setScript("2").setScriptType(ScriptType.STORED).setScriptParams(templateParams) .get(); assertHitCount(searchResponse.getResponse(), 1); + assertWarnings("the template context is now deprecated. Specify templates in a \"script\" element."); } // Relates to #10397 @@ -311,6 +313,7 @@ public void testIndexedTemplateOverwrite() throws Exception { .get(); assertHitCount(searchResponse.getResponse(), 1); } + assertWarnings("the template context is now deprecated. Specify templates in a \"script\" element."); } public void testIndexedTemplateWithArray() throws Exception { @@ -339,6 +342,7 @@ public void testIndexedTemplateWithArray() throws Exception { .setScript("4").setScriptType(ScriptType.STORED).setScriptParams(arrayTemplateParams) .get(); assertHitCount(searchResponse.getResponse(), 5); + assertWarnings("the template context is now deprecated. Specify templates in a \"script\" element."); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExecuteAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExecuteAction.java index aa650a37c4fa2..f91d349f80657 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExecuteAction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExecuteAction.java @@ -62,8 +62,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestStatus.OK; -public class PainlessExecuteAction extends Action { +public class PainlessExecuteAction extends Action { static final PainlessExecuteAction INSTANCE = new PainlessExecuteAction(); private static final String NAME = "cluster:admin/scripts/painless/execute"; @@ -72,11 +71,6 @@ private PainlessExecuteAction() { super(NAME); } - @Override - public RequestBuilder newRequestBuilder(ElasticsearchClient client) { - return new RequestBuilder(client); - } - @Override public Response newResponse() { return new Response(); @@ -201,7 +195,7 @@ public static SupportedContext fromId(byte id) { } - public static class RequestBuilder extends ActionRequestBuilder { + public static class RequestBuilder extends ActionRequestBuilder { RequestBuilder(ElasticsearchClient client) { super(client, INSTANCE, new Request()); diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/16_update2.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/16_update2.yml index 3d6f603d4d806..253676bda8e38 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/16_update2.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/16_update2.yml @@ -35,7 +35,7 @@ id: "non_existing" - do: - catch: request + catch: bad_request put_script: id: "1" context: "search" diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/20_scriptfield.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/20_scriptfield.yml index e498a1737576e..02c17ce0e3714 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/20_scriptfield.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/20_scriptfield.yml @@ -133,7 +133,7 @@ setup: --- "Scripted Field with script error": - do: - catch: request + catch: bad_request search: body: script_fields: diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureFieldMapper.java index 5b0158ff55b5f..30e18ae6d6823 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureFieldMapper.java @@ -163,14 +163,6 @@ public Query existsQuery(QueryShardContext context) { return new TermQuery(new Term("_feature", name())); } - @Override - public Query nullValueQuery() { - if (nullValue() == null) { - return null; - } - return termQuery(nullValue(), null); - } - @Override public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) { failIfNoDocValues(); diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalAction.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalAction.java index 8908fbdfbddd8..441cbb5fac108 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalAction.java @@ -20,12 +20,11 @@ package org.elasticsearch.index.rankeval; import org.elasticsearch.action.Action; -import org.elasticsearch.client.ElasticsearchClient; /** * Action for explaining evaluating search ranking results. */ -public class RankEvalAction extends Action { +public class RankEvalAction extends Action { public static final RankEvalAction INSTANCE = new RankEvalAction(); public static final String NAME = "indices:data/read/rank_eval"; @@ -34,11 +33,6 @@ private RankEvalAction() { super(NAME); } - @Override - public RankEvalRequestBuilder newRequestBuilder(ElasticsearchClient client) { - return new RankEvalRequestBuilder(client, this, new RankEvalRequest()); - } - @Override public RankEvalResponse newResponse() { return new RankEvalResponse(); diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequestBuilder.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequestBuilder.java index 2df16ade5664d..146c987eff0ac 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequestBuilder.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequestBuilder.java @@ -23,9 +23,9 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -public class RankEvalRequestBuilder extends ActionRequestBuilder { +public class RankEvalRequestBuilder extends ActionRequestBuilder { - public RankEvalRequestBuilder(ElasticsearchClient client, Action action, + public RankEvalRequestBuilder(ElasticsearchClient client, Action action, RankEvalRequest request) { super(client, action, request); } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java index c9ff39bbd118a..a5597873103bc 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java @@ -38,7 +38,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.Vector; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; @@ -146,7 +145,7 @@ public void testPrecisionAtFiveRelevanceThreshold() { public void testCombine() { MeanReciprocalRank reciprocalRank = new MeanReciprocalRank(); - Vector partialResults = new Vector<>(3); + List partialResults = new ArrayList<>(3); partialResults.add(new EvalQueryQuality("id1", 0.5)); partialResults.add(new EvalQueryQuality("id2", 1.0)); partialResults.add(new EvalQueryQuality("id3", 0.75)); diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java index 3efff57920b84..c65ad76fdf9af 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java @@ -38,7 +38,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.Vector; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; @@ -163,7 +162,7 @@ public void testParseFromXContent() throws IOException { public void testCombine() { PrecisionAtK metric = new PrecisionAtK(); - Vector partialResults = new Vector<>(3); + List partialResults = new ArrayList<>(3); partialResults.add(new EvalQueryQuality("a", 0.1)); partialResults.add(new EvalQueryQuality("b", 0.2)); partialResults.add(new EvalQueryQuality("c", 0.6)); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollParallelizationHelper.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollParallelizationHelper.java index 2aff0d7a5c501..3cc0901c81e21 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollParallelizationHelper.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollParallelizationHelper.java @@ -61,7 +61,7 @@ private BulkByScrollParallelizationHelper() {} static > void startSlicedAction( Request request, BulkByScrollTask task, - Action action, + Action action, ActionListener listener, Client client, DiscoveryNode node, @@ -85,7 +85,7 @@ static > void startSlicedAc private static > void sliceConditionally( Request request, BulkByScrollTask task, - Action action, + Action action, ActionListener listener, Client client, DiscoveryNode node, @@ -118,7 +118,7 @@ private static int countSlicesBasedOnShards(ClusterSearchShardsResponse response private static > void sendSubRequests( Client client, - Action action, + Action action, String localNodeId, BulkByScrollTask task, Request request, diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleAction.java index ffbcbbf1e37e7..ff0803c77425f 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleAction.java @@ -21,9 +21,8 @@ import org.elasticsearch.action.Action; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; -import org.elasticsearch.client.ElasticsearchClient; -public class RethrottleAction extends Action { +public class RethrottleAction extends Action { public static final RethrottleAction INSTANCE = new RethrottleAction(); public static final String NAME = "cluster:admin/reindex/rethrottle"; @@ -31,11 +30,6 @@ private RethrottleAction() { super(NAME); } - @Override - public RethrottleRequestBuilder newRequestBuilder(ElasticsearchClient client) { - return new RethrottleRequestBuilder(client, this); - } - @Override public ListTasksResponse newResponse() { return new ListTasksResponse(); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleRequestBuilder.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleRequestBuilder.java index d5cc3d9c5d388..b70389b5c9f3e 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleRequestBuilder.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleRequestBuilder.java @@ -29,7 +29,7 @@ */ public class RethrottleRequestBuilder extends TasksRequestBuilder { public RethrottleRequestBuilder(ElasticsearchClient client, - Action action) { + Action action) { super(client, action, new RethrottleRequest()); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java b/modules/reindex/src/test/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java index 1f99f062d25af..8743ef255ba06 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java +++ b/modules/reindex/src/test/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java @@ -27,10 +27,12 @@ import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.index.reindex.BulkByScrollTask; import org.elasticsearch.index.reindex.DeleteByQueryAction; +import org.elasticsearch.index.reindex.DeleteByQueryRequestBuilder; import org.elasticsearch.index.reindex.ReindexAction; import org.elasticsearch.index.reindex.ReindexRequest; import org.elasticsearch.index.reindex.ReindexRequestBuilder; import org.elasticsearch.index.reindex.RethrottleAction; +import org.elasticsearch.index.reindex.RethrottleRequestBuilder; import org.elasticsearch.index.reindex.UpdateByQueryAction; import org.elasticsearch.index.reindex.UpdateByQueryRequestBuilder; import org.elasticsearch.script.Script; @@ -47,7 +49,7 @@ public class ReindexDocumentationIT extends ESIntegTestCase { public void reindex() { Client client = client(); // tag::reindex1 - BulkByScrollResponse response = ReindexAction.INSTANCE.newRequestBuilder(client) + BulkByScrollResponse response = new ReindexRequestBuilder(client, ReindexAction.INSTANCE) .destination("target_index") .filter(QueryBuilders.matchQuery("category", "xzy")) // <1> .get(); @@ -58,14 +60,14 @@ public void updateByQuery() { Client client = client(); { // tag::update-by-query - UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); + UpdateByQueryRequestBuilder updateByQuery = new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE); updateByQuery.source("source_index").abortOnVersionConflict(false); BulkByScrollResponse response = updateByQuery.get(); // end::update-by-query } { // tag::update-by-query-filter - UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); + UpdateByQueryRequestBuilder updateByQuery = new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE); updateByQuery.source("source_index") .filter(QueryBuilders.termQuery("level", "awesome")) .size(1000) @@ -75,7 +77,7 @@ public void updateByQuery() { } { // tag::update-by-query-size - UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); + UpdateByQueryRequestBuilder updateByQuery = new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE); updateByQuery.source("source_index") .source().setSize(500); BulkByScrollResponse response = updateByQuery.get(); @@ -83,7 +85,7 @@ public void updateByQuery() { } { // tag::update-by-query-sort - UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); + UpdateByQueryRequestBuilder updateByQuery = new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE); updateByQuery.source("source_index").size(100) .source().addSort("cat", SortOrder.DESC); BulkByScrollResponse response = updateByQuery.get(); @@ -91,7 +93,7 @@ public void updateByQuery() { } { // tag::update-by-query-script - UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); + UpdateByQueryRequestBuilder updateByQuery = new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE); updateByQuery.source("source_index") .script(new Script( ScriptType.INLINE, @@ -108,21 +110,21 @@ public void updateByQuery() { } { // tag::update-by-query-multi-index - UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); + UpdateByQueryRequestBuilder updateByQuery = new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE); updateByQuery.source("foo", "bar").source().setTypes("a", "b"); BulkByScrollResponse response = updateByQuery.get(); // end::update-by-query-multi-index } { // tag::update-by-query-routing - UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); + UpdateByQueryRequestBuilder updateByQuery = new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE); updateByQuery.source().setRouting("cat"); BulkByScrollResponse response = updateByQuery.get(); // end::update-by-query-routing } { // tag::update-by-query-pipeline - UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); + UpdateByQueryRequestBuilder updateByQuery = new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE); updateByQuery.setPipeline("hurray"); BulkByScrollResponse response = updateByQuery.get(); // end::update-by-query-pipeline @@ -156,7 +158,7 @@ public void updateByQuery() { { TaskId taskId = null; // tag::update-by-query-rethrottle - RethrottleAction.INSTANCE.newRequestBuilder(client) + new RethrottleRequestBuilder(client, RethrottleAction.INSTANCE) .setTaskId(taskId) .setRequestsPerSecond(2.0f) .get(); @@ -167,7 +169,7 @@ public void updateByQuery() { public void deleteByQuery() { Client client = client(); // tag::delete-by-query-sync - BulkByScrollResponse response = DeleteByQueryAction.INSTANCE.newRequestBuilder(client) + BulkByScrollResponse response = new DeleteByQueryRequestBuilder(client, DeleteByQueryAction.INSTANCE) .filter(QueryBuilders.matchQuery("gender", "male")) // <1> .source("persons") // <2> .get(); // <3> @@ -175,7 +177,7 @@ public void deleteByQuery() { // end::delete-by-query-sync // tag::delete-by-query-async - DeleteByQueryAction.INSTANCE.newRequestBuilder(client) + new DeleteByQueryRequestBuilder(client, DeleteByQueryAction.INSTANCE) .filter(QueryBuilders.matchQuery("gender", "male")) // <1> .source("persons") // <2> .execute(new ActionListener() { // <3> diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index 17345f5c85b65..727710e8b6bdd 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -744,8 +744,8 @@ private class MyMockClient extends FilterClient { @Override @SuppressWarnings("unchecked") protected > void doExecute( - Action action, Request request, ActionListener listener) { + RequestBuilder extends ActionRequestBuilder> void doExecute( + Action action, Request request, ActionListener listener) { if (false == expectedHeaders.equals(threadPool().getThreadContext().getHeaders())) { listener.onFailure( new RuntimeException("Expected " + expectedHeaders + " but got " + threadPool().getThreadContext().getHeaders())); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java index f829c8f22d73c..72ba651dff9ae 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java @@ -109,13 +109,13 @@ private RemoteInfo newRemoteInfo(String username, String password, Map request.get()); assertEquals(RestStatus.BAD_REQUEST, e.status()); @@ -123,7 +123,7 @@ public void testReindexSendsHeaders() throws Exception { } public void testReindexWithoutAuthenticationWhenRequired() throws Exception { - ReindexRequestBuilder request = ReindexAction.INSTANCE.newRequestBuilder(client()).source("source").destination("dest") + ReindexRequestBuilder request = new ReindexRequestBuilder(client(), ReindexAction.INSTANCE).source("source").destination("dest") .setRemoteInfo(newRemoteInfo(null, null, emptyMap())); ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> request.get()); assertEquals(RestStatus.UNAUTHORIZED, e.status()); @@ -132,7 +132,7 @@ public void testReindexWithoutAuthenticationWhenRequired() throws Exception { } public void testReindexWithBadAuthentication() throws Exception { - ReindexRequestBuilder request = ReindexAction.INSTANCE.newRequestBuilder(client()).source("source").destination("dest") + ReindexRequestBuilder request = new ReindexRequestBuilder(client(), ReindexAction.INSTANCE).source("source").destination("dest") .setRemoteInfo(newRemoteInfo("junk", "auth", emptyMap())); ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> request.get()); assertThat(e.getMessage(), containsString("\"reason\":\"Bad Authorization\"")); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexTestCase.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexTestCase.java index 54854afb35ea4..01b5539a23c48 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexTestCase.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexTestCase.java @@ -47,19 +47,19 @@ protected Collection> transportClientPlugins() { } protected ReindexRequestBuilder reindex() { - return ReindexAction.INSTANCE.newRequestBuilder(client()); + return new ReindexRequestBuilder(client(), ReindexAction.INSTANCE); } protected UpdateByQueryRequestBuilder updateByQuery() { - return UpdateByQueryAction.INSTANCE.newRequestBuilder(client()); + return new UpdateByQueryRequestBuilder(client(), UpdateByQueryAction.INSTANCE); } protected DeleteByQueryRequestBuilder deleteByQuery() { - return DeleteByQueryAction.INSTANCE.newRequestBuilder(client()); + return new DeleteByQueryRequestBuilder(client(), DeleteByQueryAction.INSTANCE); } protected RethrottleRequestBuilder rethrottle() { - return RethrottleAction.INSTANCE.newRequestBuilder(client()); + return new RethrottleRequestBuilder(client(), RethrottleAction.INSTANCE); } public static BulkIndexByScrollResponseMatcher matcher() { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java index aea720aeb21e2..4fe3d1a3a6eff 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java @@ -106,7 +106,7 @@ final Settings nodeSettings() { public void testReindex() throws Exception { testCase( ReindexAction.NAME, - client -> ReindexAction.INSTANCE.newRequestBuilder(client).source("source").destination("dest"), + client -> new ReindexRequestBuilder(client, ReindexAction.INSTANCE).source("source").destination("dest"), matcher().created(DOC_COUNT)); } @@ -127,7 +127,7 @@ public void testReindexFromRemote() throws Exception { TransportAddress address = masterNode.getHttp().getAddress().publishAddress(); RemoteInfo remote = new RemoteInfo("http", address.getAddress(), address.getPort(), new BytesArray("{\"match_all\":{}}"), null, null, emptyMap(), RemoteInfo.DEFAULT_SOCKET_TIMEOUT, RemoteInfo.DEFAULT_CONNECT_TIMEOUT); - ReindexRequestBuilder request = ReindexAction.INSTANCE.newRequestBuilder(client).source("source").destination("dest") + ReindexRequestBuilder request = new ReindexRequestBuilder(client, ReindexAction.INSTANCE).source("source").destination("dest") .setRemoteInfo(remote); return request; }; @@ -135,12 +135,12 @@ public void testReindexFromRemote() throws Exception { } public void testUpdateByQuery() throws Exception { - testCase(UpdateByQueryAction.NAME, client -> UpdateByQueryAction.INSTANCE.newRequestBuilder(client).source("source"), + testCase(UpdateByQueryAction.NAME, client -> new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE).source("source"), matcher().updated(DOC_COUNT)); } public void testDeleteByQuery() throws Exception { - testCase(DeleteByQueryAction.NAME, client -> DeleteByQueryAction.INSTANCE.newRequestBuilder(client).source("source") + testCase(DeleteByQueryAction.NAME, client -> new DeleteByQueryRequestBuilder(client, DeleteByQueryAction.INSTANCE).source("source") .filter(QueryBuilders.matchAllQuery()), matcher().deleted(DOC_COUNT)); } diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/35_search_failures.yml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/35_search_failures.yml index 70e78f7e36b37..89135449d6969 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/35_search_failures.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/35_search_failures.yml @@ -17,7 +17,7 @@ indices.refresh: {} - do: - catch: request + catch: bad_request reindex: body: source: diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/85_scripting.yml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/85_scripting.yml index 617a46dfa66b5..901f24f022cba 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/85_scripting.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/85_scripting.yml @@ -446,7 +446,7 @@ indices.refresh: {} - do: - catch: request + catch: bad_request reindex: refresh: true body: diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/35_search_failure.yml b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/35_search_failure.yml index 17f422453ce18..e7f3a146480ff 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/35_search_failure.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/35_search_failure.yml @@ -17,7 +17,7 @@ indices.refresh: {} - do: - catch: request + catch: bad_request update_by_query: index: source body: diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/80_scripting.yml b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/80_scripting.yml index 8ed94347923d1..1a3880f3d15c1 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/80_scripting.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/80_scripting.yml @@ -434,7 +434,7 @@ indices.refresh: {} - do: - catch: request + catch: bad_request update_by_query: index: twitter refresh: true diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 45e889797bde4..31c1214f03428 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -56,6 +56,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.http.AbstractHttpServerTransport; import org.elasticsearch.http.BindHttpException; import org.elasticsearch.http.HttpHandlingSettings; import org.elasticsearch.http.HttpStats; diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java index 96b436ce7de43..5b22409b92da0 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java @@ -273,7 +273,7 @@ public void dispatchBadRequest(final RestRequest request, try (Netty4HttpServerTransport transport = new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry(), dispatcher)) { transport.start(); - final TransportAddress remoteAddress = randomFrom(transport.boundAddress.boundAddresses()); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); try (Netty4HttpClient client = new Netty4HttpClient()) { final String url = "/" + new String(new byte[maxInitialLineLength], Charset.forName("UTF-8")); @@ -352,7 +352,7 @@ public void dispatchBadRequest(final RestRequest request, try (Netty4HttpServerTransport transport = new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry(), dispatcher)) { transport.start(); - final TransportAddress remoteAddress = randomFrom(transport.boundAddress.boundAddresses()); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); AtomicBoolean channelClosed = new AtomicBoolean(false); diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-1cbadda4d3.jar.sha1 new file mode 100644 index 0000000000000..7f3d3b5ccf63c --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-1cbadda4d3.jar.sha1 @@ -0,0 +1 @@ +844e2b76f4bc6e646e1c3257d668ac598e03f36a \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-cc2ee23050.jar.sha1 deleted file mode 100644 index 781b814c99e45..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-cc2ee23050.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -452c9a9f86b79b9b3eaa7d6aa782e189d5bcfe8f \ No newline at end of file diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java index a4502a953dbe0..c4c44222f470e 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java @@ -135,14 +135,6 @@ public Query existsQuery(QueryShardContext context) { } } - @Override - public Query nullValueQuery() { - if (nullValue() == null) { - return null; - } - return termQuery(nullValue(), null); - } - @Override public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) { failIfNoDocValues(); diff --git a/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/10_basic.yml b/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/10_basic.yml index 521d8f0714070..c9ff2b2fb6463 100644 --- a/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/10_basic.yml +++ b/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/10_basic.yml @@ -16,9 +16,11 @@ body: filter: [icu_normalizer] text: Foo Bar Ruß - tokenizer: keyword - - length: { tokens: 1 } - - match: { tokens.0.token: foo bar russ } + tokenizer: standard + - length: { tokens: 3 } + - match: { tokens.0.token: foo} + - match: { tokens.1.token: bar } + - match: { tokens.2.token: russ } --- "Normalization charfilter": - do: @@ -26,9 +28,11 @@ body: char_filter: [icu_normalizer] text: Foo Bar Ruß - tokenizer: keyword - - length: { tokens: 1 } - - match: { tokens.0.token: foo bar russ } + tokenizer: standard + - length: { tokens: 3 } + - match: { tokens.0.token: foo } + - match: { tokens.1.token: bar } + - match: { tokens.2.token: russ } --- "Folding filter": - do: @@ -36,9 +40,11 @@ body: filter: [icu_folding] text: Foo Bar résumé - tokenizer: keyword - - length: { tokens: 1 } - - match: { tokens.0.token: foo bar resume } + tokenizer: standard + - length: { tokens: 3 } + - match: { tokens.0.token: foo } + - match: { tokens.1.token: bar } + - match: { tokens.2.token: resume } --- "Normalization with a UnicodeSet Filter": - do: @@ -64,25 +70,34 @@ index: test body: char_filter: ["charfilter_icu_normalizer"] - tokenizer: keyword + tokenizer: standard text: charfilter Föo Bâr Ruß - - length: { tokens: 1 } - - match: { tokens.0.token: charfilter föo bâr ruß } + - length: { tokens: 4 } + - match: { tokens.0.token: charfilter } + - match: { tokens.1.token: föo } + - match: { tokens.2.token: bâr } + - match: { tokens.3.token: ruß } - do: indices.analyze: index: test body: - tokenizer: keyword + tokenizer: standard filter: ["tokenfilter_icu_normalizer"] text: tokenfilter Föo Bâr Ruß - - length: { tokens: 1 } - - match: { tokens.0.token: tokenfilter föo Bâr ruß } + - length: { tokens: 4 } + - match: { tokens.0.token: tokenfilter } + - match: { tokens.1.token: föo } + - match: { tokens.2.token: Bâr } + - match: { tokens.3.token: ruß } - do: indices.analyze: index: test body: - tokenizer: keyword + tokenizer: standard filter: ["tokenfilter_icu_folding"] text: icufolding Föo Bâr Ruß - - length: { tokens: 1 } - - match: { tokens.0.token: icufolding foo bâr russ } + - length: { tokens: 4 } + - match: { tokens.0.token: icufolding } + - match: { tokens.1.token: foo } + - match: { tokens.2.token: bâr } + - match: { tokens.3.token: russ } diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-1cbadda4d3.jar.sha1 new file mode 100644 index 0000000000000..65423fff2a441 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-1cbadda4d3.jar.sha1 @@ -0,0 +1 @@ +2f2bd2d67c7952e4ae14ab3f742824a45d0d1719 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-cc2ee23050.jar.sha1 deleted file mode 100644 index baba08978587f..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-cc2ee23050.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -48c76a922bdfc7f50b1b6fe22e9456c555f3f990 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-1cbadda4d3.jar.sha1 new file mode 100644 index 0000000000000..04fa62ce64a1d --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-1cbadda4d3.jar.sha1 @@ -0,0 +1 @@ +46ad7ebcfcdbdb60dd54aae4d720356a7a51c7c0 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-cc2ee23050.jar.sha1 deleted file mode 100644 index da19e1c3857a5..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-cc2ee23050.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4db5777df468b0867ff6539c9ab687e0ed6cab41 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-1cbadda4d3.jar.sha1 new file mode 100644 index 0000000000000..55bc8869196e0 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-1cbadda4d3.jar.sha1 @@ -0,0 +1 @@ +548e9f2b4d4a985dc174b2eee4007c0bd5642e68 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-cc2ee23050.jar.sha1 deleted file mode 100644 index 148b5425d64b1..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-cc2ee23050.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0e09e6b011ab2b1a0e3e0e1df2ab2a91dca8ba23 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-1cbadda4d3.jar.sha1 new file mode 100644 index 0000000000000..be66854321699 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-1cbadda4d3.jar.sha1 @@ -0,0 +1 @@ +b90e66f4104f0234cfef335762f65a6fed695231 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-cc2ee23050.jar.sha1 deleted file mode 100644 index bce84d16a9a3d..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-cc2ee23050.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ceefa0f9789ab9ea5c8ab9f67ed7a601a3ae6aa9 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-1cbadda4d3.jar.sha1 new file mode 100644 index 0000000000000..b77acdc34f31c --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-1cbadda4d3.jar.sha1 @@ -0,0 +1 @@ +929a4eb52b11f6d3f0df9c8eba014f5ee2464c67 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-cc2ee23050.jar.sha1 deleted file mode 100644 index 762c56f77001f..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-cc2ee23050.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b013adc183e52a74795ad3d3032f4d0f9db30b73 \ No newline at end of file diff --git a/plugins/analysis-stempel/src/test/resources/rest-api-spec/test/analysis_stempel/10_basic.yml b/plugins/analysis-stempel/src/test/resources/rest-api-spec/test/analysis_stempel/10_basic.yml index 1941126c64fb8..3400a7f9bdf1a 100644 --- a/plugins/analysis-stempel/src/test/resources/rest-api-spec/test/analysis_stempel/10_basic.yml +++ b/plugins/analysis-stempel/src/test/resources/rest-api-spec/test/analysis_stempel/10_basic.yml @@ -5,7 +5,7 @@ indices.analyze: body: text: studenci - tokenizer: keyword + tokenizer: standard filter: [polish_stem] - length: { tokens: 1 } - match: { tokens.0.token: student } diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-1cbadda4d3.jar.sha1 new file mode 100644 index 0000000000000..cce4b6ff18df5 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-1cbadda4d3.jar.sha1 @@ -0,0 +1 @@ +0e6575a411b65cd95e0e54f04d3da278b68be521 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-cc2ee23050.jar.sha1 deleted file mode 100644 index 7631bea25691f..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-cc2ee23050.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -95300f29418f60e57e022d934d3462be9e1e2225 \ No newline at end of file diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java index f21dbdfd269f4..6f4f8cfea9609 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java @@ -35,6 +35,7 @@ import com.microsoft.azure.storage.blob.ListBlobItem; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.MapBuilder; @@ -45,6 +46,7 @@ import java.io.InputStream; import java.net.URI; import java.net.URISyntaxException; +import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.Map; @@ -52,66 +54,59 @@ public class AzureStorageServiceImpl extends AbstractComponent implements AzureStorageService { final Map storageSettings; - - final Map clients = new HashMap<>(); + final Map clients; public AzureStorageServiceImpl(Settings settings, Map storageSettings) { super(settings); - - this.storageSettings = storageSettings; - if (storageSettings.isEmpty()) { // If someone did not register any settings, they basically can't use the plugin throw new IllegalArgumentException("If you want to use an azure repository, you need to define a client configuration."); } - - logger.debug("starting azure storage client instance"); - - // We register all regular azure clients - for (Map.Entry azureStorageSettingsEntry : this.storageSettings.entrySet()) { - logger.debug("registering regular client for account [{}]", azureStorageSettingsEntry.getKey()); - createClient(azureStorageSettingsEntry.getValue()); - } + this.storageSettings = storageSettings; + this.clients = createClients(storageSettings); } - void createClient(AzureStorageSettings azureStorageSettings) { - try { - logger.trace("creating new Azure storage client using account [{}], key [{}], endpoint suffix [{}]", - azureStorageSettings.getAccount(), azureStorageSettings.getKey(), azureStorageSettings.getEndpointSuffix()); + private Map createClients(final Map storageSettings) { + final Map clients = new HashMap<>(); + for (Map.Entry azureStorageEntry : storageSettings.entrySet()) { + final String clientName = azureStorageEntry.getKey(); + final AzureStorageSettings clientSettings = azureStorageEntry.getValue(); + try { + logger.trace("creating new Azure storage client with name [{}]", clientName); + String storageConnectionString = + "DefaultEndpointsProtocol=https;" + + "AccountName=" + clientSettings.getAccount() + ";" + + "AccountKey=" + clientSettings.getKey(); + + final String endpointSuffix = clientSettings.getEndpointSuffix(); + if (Strings.hasLength(endpointSuffix)) { + storageConnectionString += ";EndpointSuffix=" + endpointSuffix; + } + // Retrieve storage account from connection-string. + CloudStorageAccount storageAccount = CloudStorageAccount.parse(storageConnectionString); - String storageConnectionString = - "DefaultEndpointsProtocol=https;" - + "AccountName=" + azureStorageSettings.getAccount() + ";" - + "AccountKey=" + azureStorageSettings.getKey(); + // Create the blob client. + CloudBlobClient client = storageAccount.createCloudBlobClient(); - String endpointSuffix = azureStorageSettings.getEndpointSuffix(); - if (endpointSuffix != null && !endpointSuffix.isEmpty()) { - storageConnectionString += ";EndpointSuffix=" + endpointSuffix; + // Register the client + clients.put(clientSettings.getAccount(), client); + } catch (Exception e) { + logger.error(() -> new ParameterizedMessage("Can not create azure storage client [{}]", clientName), e); } - // Retrieve storage account from connection-string. - CloudStorageAccount storageAccount = CloudStorageAccount.parse(storageConnectionString); - - // Create the blob client. - CloudBlobClient client = storageAccount.createCloudBlobClient(); - - // Register the client - this.clients.put(azureStorageSettings.getAccount(), client); - } catch (Exception e) { - logger.error("can not create azure storage client: {}", e.getMessage()); } + return Collections.unmodifiableMap(clients); } CloudBlobClient getSelectedClient(String clientName, LocationMode mode) { logger.trace("selecting a client named [{}], mode [{}]", clientName, mode.name()); AzureStorageSettings azureStorageSettings = this.storageSettings.get(clientName); if (azureStorageSettings == null) { - throw new IllegalArgumentException("Can not find named azure client [" + clientName + "]. Check your settings."); + throw new IllegalArgumentException("Unable to find client with name [" + clientName + "]"); } CloudBlobClient client = this.clients.get(azureStorageSettings.getAccount()); - if (client == null) { - throw new IllegalArgumentException("Can not find an azure client named [" + azureStorageSettings.getAccount() + "]"); + throw new IllegalArgumentException("No account defined for client with name [" + clientName + "]"); } // NOTE: for now, just set the location mode in case it is different; diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java index 72cd015f14847..447826dbf833f 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java @@ -23,7 +23,6 @@ import com.microsoft.azure.storage.RetryExponentialRetry; import com.microsoft.azure.storage.blob.CloudBlobClient; import com.microsoft.azure.storage.core.Base64; - import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; @@ -36,6 +35,7 @@ import java.net.URISyntaxException; import java.net.UnknownHostException; import java.nio.charset.StandardCharsets; +import java.util.Collections; import java.util.Map; import static org.elasticsearch.repositories.azure.AzureStorageServiceImpl.blobNameFromUri; @@ -49,31 +49,14 @@ public class AzureStorageServiceTests extends ESTestCase { - private MockSecureSettings buildSecureSettings() { - MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("azure.client.azure1.account", "myaccount1"); - secureSettings.setString("azure.client.azure1.key", "mykey1"); - secureSettings.setString("azure.client.azure2.account", "myaccount2"); - secureSettings.setString("azure.client.azure2.key", "mykey2"); - secureSettings.setString("azure.client.azure3.account", "myaccount3"); - secureSettings.setString("azure.client.azure3.key", "mykey3"); - return secureSettings; - } - private Settings buildSettings() { - Settings settings = Settings.builder() - .setSecureSettings(buildSecureSettings()) - .build(); - return settings; - } - public void testReadSecuredSettings() { MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("azure.client.azure1.account", "myaccount1"); - secureSettings.setString("azure.client.azure1.key", "mykey1"); + secureSettings.setString("azure.client.azure1.key", encodeKey("mykey1")); secureSettings.setString("azure.client.azure2.account", "myaccount2"); - secureSettings.setString("azure.client.azure2.key", "mykey2"); + secureSettings.setString("azure.client.azure2.key", encodeKey("mykey2")); secureSettings.setString("azure.client.azure3.account", "myaccount3"); - secureSettings.setString("azure.client.azure3.key", "mykey3"); + secureSettings.setString("azure.client.azure3.key", encodeKey("mykey3")); Settings settings = Settings.builder().setSecureSettings(secureSettings) .put("azure.client.azure3.endpoint_suffix", "my_endpoint_suffix").build(); @@ -88,9 +71,9 @@ public void testReadSecuredSettings() { public void testCreateClientWithEndpointSuffix() { MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("azure.client.azure1.account", "myaccount1"); - secureSettings.setString("azure.client.azure1.key", Base64.encode("mykey1".getBytes(StandardCharsets.UTF_8))); + secureSettings.setString("azure.client.azure1.key", encodeKey("mykey1")); secureSettings.setString("azure.client.azure2.account", "myaccount2"); - secureSettings.setString("azure.client.azure2.key", Base64.encode("mykey2".getBytes(StandardCharsets.UTF_8))); + secureSettings.setString("azure.client.azure2.key", encodeKey("mykey2")); Settings settings = Settings.builder().setSecureSettings(secureSettings) .put("azure.client.azure1.endpoint_suffix", "my_endpoint_suffix").build(); AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(settings, AzureStorageSettings.load(settings)); @@ -103,7 +86,7 @@ public void testCreateClientWithEndpointSuffix() { public void testGetSelectedClientWithNoPrimaryAndSecondary() { try { - new AzureStorageServiceMockForSettings(Settings.EMPTY); + new AzureStorageServiceImpl(Settings.EMPTY, Collections.emptyMap()); fail("we should have raised an IllegalArgumentException"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), is("If you want to use an azure repository, you need to define a client configuration.")); @@ -111,11 +94,11 @@ public void testGetSelectedClientWithNoPrimaryAndSecondary() { } public void testGetSelectedClientNonExisting() { - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMockForSettings(buildSettings()); + AzureStorageServiceImpl azureStorageService = createAzureService(buildSettings()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { azureStorageService.getSelectedClient("azure4", LocationMode.PRIMARY_ONLY); }); - assertThat(e.getMessage(), is("Can not find named azure client [azure4]. Check your settings.")); + assertThat(e.getMessage(), is("Unable to find client with name [azure4]")); } public void testGetSelectedClientDefaultTimeout() { @@ -123,7 +106,7 @@ public void testGetSelectedClientDefaultTimeout() { .setSecureSettings(buildSecureSettings()) .put("azure.client.azure3.timeout", "30s") .build(); - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMockForSettings(timeoutSettings); + AzureStorageServiceImpl azureStorageService = createAzureService(timeoutSettings); CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), nullValue()); CloudBlobClient client3 = azureStorageService.getSelectedClient("azure3", LocationMode.PRIMARY_ONLY); @@ -131,13 +114,13 @@ public void testGetSelectedClientDefaultTimeout() { } public void testGetSelectedClientNoTimeout() { - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMockForSettings(buildSettings()); + AzureStorageServiceImpl azureStorageService = createAzureService(buildSettings()); CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(nullValue())); } public void testGetSelectedClientBackoffPolicy() { - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMockForSettings(buildSettings()); + AzureStorageServiceImpl azureStorageService = createAzureService(buildSettings()); CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), is(notNullValue())); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), instanceOf(RetryExponentialRetry.class)); @@ -149,7 +132,7 @@ public void testGetSelectedClientBackoffPolicyNbRetries() { .put("azure.client.azure1.max_retries", 7) .build(); - AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMockForSettings(timeoutSettings); + AzureStorageServiceImpl azureStorageService = createAzureService(timeoutSettings); CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), is(notNullValue())); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), instanceOf(RetryExponentialRetry.class)); @@ -159,7 +142,7 @@ public void testNoProxy() { Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .build(); - AzureStorageServiceMockForSettings mock = new AzureStorageServiceMockForSettings(settings); + AzureStorageServiceImpl mock = createAzureService(settings); assertThat(mock.storageSettings.get("azure1").getProxy(), nullValue()); assertThat(mock.storageSettings.get("azure2").getProxy(), nullValue()); assertThat(mock.storageSettings.get("azure3").getProxy(), nullValue()); @@ -172,7 +155,7 @@ public void testProxyHttp() throws UnknownHostException { .put("azure.client.azure1.proxy.port", 8080) .put("azure.client.azure1.proxy.type", "http") .build(); - AzureStorageServiceMockForSettings mock = new AzureStorageServiceMockForSettings(settings); + AzureStorageServiceImpl mock = createAzureService(settings); Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); assertThat(azure1Proxy, notNullValue()); @@ -192,7 +175,7 @@ public void testMultipleProxies() throws UnknownHostException { .put("azure.client.azure2.proxy.port", 8081) .put("azure.client.azure2.proxy.type", "http") .build(); - AzureStorageServiceMockForSettings mock = new AzureStorageServiceMockForSettings(settings); + AzureStorageServiceImpl mock = createAzureService(settings); Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); assertThat(azure1Proxy, notNullValue()); assertThat(azure1Proxy.type(), is(Proxy.Type.HTTP)); @@ -211,7 +194,7 @@ public void testProxySocks() throws UnknownHostException { .put("azure.client.azure1.proxy.port", 8080) .put("azure.client.azure1.proxy.type", "socks") .build(); - AzureStorageServiceMockForSettings mock = new AzureStorageServiceMockForSettings(settings); + AzureStorageServiceImpl mock = createAzureService(settings); Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); assertThat(azure1Proxy, notNullValue()); assertThat(azure1Proxy.type(), is(Proxy.Type.SOCKS)); @@ -227,7 +210,7 @@ public void testProxyNoHost() { .put("azure.client.azure1.proxy.type", randomFrom("socks", "http")) .build(); - SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMockForSettings(settings)); + SettingsException e = expectThrows(SettingsException.class, () -> createAzureService(settings)); assertEquals("Azure Proxy type has been set but proxy host or port is not defined.", e.getMessage()); } @@ -238,7 +221,7 @@ public void testProxyNoPort() { .put("azure.client.azure1.proxy.type", randomFrom("socks", "http")) .build(); - SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMockForSettings(settings)); + SettingsException e = expectThrows(SettingsException.class, () -> createAzureService(settings)); assertEquals("Azure Proxy type has been set but proxy host or port is not defined.", e.getMessage()); } @@ -249,7 +232,7 @@ public void testProxyNoType() { .put("azure.client.azure1.proxy.port", 8080) .build(); - SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMockForSettings(settings)); + SettingsException e = expectThrows(SettingsException.class, () -> createAzureService(settings)); assertEquals("Azure Proxy port or host have been set but proxy type is not defined.", e.getMessage()); } @@ -261,26 +244,10 @@ public void testProxyWrongHost() { .put("azure.client.azure1.proxy.port", 8080) .build(); - SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceMockForSettings(settings)); + SettingsException e = expectThrows(SettingsException.class, () -> createAzureService(settings)); assertEquals("Azure proxy host is unknown.", e.getMessage()); } - /** - * This internal class just overload createClient method which is called by AzureStorageServiceImpl.doStart() - */ - class AzureStorageServiceMockForSettings extends AzureStorageServiceImpl { - AzureStorageServiceMockForSettings(Settings settings) { - super(settings, AzureStorageSettings.load(settings)); - } - - // We fake the client here - @Override - void createClient(AzureStorageSettings azureStorageSettings) { - this.clients.put(azureStorageSettings.getAccount(), - new CloudBlobClient(URI.create("https://" + azureStorageSettings.getAccount()))); - } - } - public void testBlobNameFromUri() throws URISyntaxException { String name = blobNameFromUri(new URI("https://myservice.azure.net/container/path/to/myfile")); assertThat(name, is("path/to/myfile")); @@ -291,4 +258,27 @@ public void testBlobNameFromUri() throws URISyntaxException { name = blobNameFromUri(new URI("https://127.0.0.1/container/path/to/myfile")); assertThat(name, is("path/to/myfile")); } + + private static MockSecureSettings buildSecureSettings() { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("azure.client.azure1.account", "myaccount1"); + secureSettings.setString("azure.client.azure1.key", encodeKey("mykey1")); + secureSettings.setString("azure.client.azure2.account", "myaccount2"); + secureSettings.setString("azure.client.azure2.key", encodeKey("mykey2")); + secureSettings.setString("azure.client.azure3.account", "myaccount3"); + secureSettings.setString("azure.client.azure3.key", encodeKey("mykey3")); + return secureSettings; + } + + private static Settings buildSettings() { + return Settings.builder().setSecureSettings(buildSecureSettings()).build(); + } + + private static AzureStorageServiceImpl createAzureService(final Settings settings) { + return new AzureStorageServiceImpl(settings, AzureStorageSettings.load(settings)); + } + + private static String encodeKey(final String value) { + return Base64.encode(value.getBytes(StandardCharsets.UTF_8)); + } } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java index 825a023bd51bc..de5c166de3f5b 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java @@ -42,7 +42,7 @@ import org.elasticsearch.http.HttpHandlingSettings; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpStats; -import org.elasticsearch.http.netty4.AbstractHttpServerTransport; +import org.elasticsearch.http.AbstractHttpServerTransport; import org.elasticsearch.nio.AcceptingSelector; import org.elasticsearch.nio.AcceptorEventHandler; import org.elasticsearch.nio.BytesChannelContext; diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java index 99132f0c89d5b..81724bd72ab0a 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; import org.elasticsearch.client.Client; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; @@ -221,8 +222,10 @@ public void testThatMoreLikeThisQueryMultiTermVectorRequestContainsContextAndHea public void testThatRelevantHttpHeadersBecomeRequestHeaders() throws IOException { final String IRRELEVANT_HEADER = "SomeIrrelevantHeader"; Request request = new Request("GET", "/" + queryIndex + "/_search"); - request.addHeader(CUSTOM_HEADER, randomHeaderValue); - request.addHeader(IRRELEVANT_HEADER, randomHeaderValue); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader(CUSTOM_HEADER, randomHeaderValue); + options.addHeader(IRRELEVANT_HEADER, randomHeaderValue); + request.setOptions(options); Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); List searchRequests = getRequests(SearchRequest.class); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java index 2d139e7955ea9..a1dd978df1775 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java @@ -20,6 +20,7 @@ package org.elasticsearch.http; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import java.io.IOException; @@ -32,8 +33,10 @@ public class CorsNotSetIT extends HttpSmokeTestCase { public void testCorsSettingDefaultBehaviourDoesNotReturnAnything() throws IOException { String corsValue = "http://localhost:9200"; Request request = new Request("GET", "/"); - request.addHeader("User-Agent", "Mozilla Bar"); - request.addHeader("Origin", corsValue); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader("User-Agent", "Mozilla Bar"); + options.addHeader("Origin", corsValue); + request.setOptions(options); Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java index e79e80315501b..ad10ad80e4bf0 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.http; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.settings.Settings; @@ -55,16 +56,20 @@ public void testThatRegularExpressionWorksOnMatch() throws IOException { { String corsValue = "http://localhost:9200"; Request request = new Request("GET", "/"); - request.addHeader("User-Agent", "Mozilla Bar"); - request.addHeader("Origin", corsValue); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader("User-Agent", "Mozilla Bar"); + options.addHeader("Origin", corsValue); + request.setOptions(options); Response response = getRestClient().performRequest(request); assertResponseWithOriginHeader(response, corsValue); } { String corsValue = "https://localhost:9201"; Request request = new Request("GET", "/"); - request.addHeader("User-Agent", "Mozilla Bar"); - request.addHeader("Origin", corsValue); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader("User-Agent", "Mozilla Bar"); + options.addHeader("Origin", corsValue); + request.setOptions(options); Response response = getRestClient().performRequest(request); assertResponseWithOriginHeader(response, corsValue); assertThat(response.getHeader("Access-Control-Allow-Credentials"), is("true")); @@ -73,8 +78,10 @@ public void testThatRegularExpressionWorksOnMatch() throws IOException { public void testThatRegularExpressionReturnsForbiddenOnNonMatch() throws IOException { Request request = new Request("GET", "/"); - request.addHeader("User-Agent", "Mozilla Bar"); - request.addHeader("Origin", "http://evil-host:9200"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader("User-Agent", "Mozilla Bar"); + options.addHeader("Origin", "http://evil-host:9200"); + request.setOptions(options); try { getRestClient().performRequest(request); fail("request should have failed"); @@ -88,7 +95,9 @@ public void testThatRegularExpressionReturnsForbiddenOnNonMatch() throws IOExcep public void testThatSendingNoOriginHeaderReturnsNoAccessControlHeader() throws IOException { Request request = new Request("GET", "/"); - request.addHeader("User-Agent", "Mozilla Bar"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader("User-Agent", "Mozilla Bar"); + request.setOptions(options); Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); @@ -103,9 +112,11 @@ public void testThatRegularExpressionIsNotAppliedWithoutCorrectBrowserOnMatch() public void testThatPreFlightRequestWorksOnMatch() throws IOException { String corsValue = "http://localhost:9200"; Request request = new Request("OPTIONS", "/"); - request.addHeader("User-Agent", "Mozilla Bar"); - request.addHeader("Origin", corsValue); - request.addHeader("Access-Control-Request-Method", "GET"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader("User-Agent", "Mozilla Bar"); + options.addHeader("Origin", corsValue); + options.addHeader("Access-Control-Request-Method", "GET"); + request.setOptions(options); Response response = getRestClient().performRequest(request); assertResponseWithOriginHeader(response, corsValue); assertNotNull(response.getHeader("Access-Control-Allow-Methods")); @@ -114,9 +125,11 @@ public void testThatPreFlightRequestWorksOnMatch() throws IOException { public void testThatPreFlightRequestReturnsNullOnNonMatch() throws IOException { String corsValue = "http://evil-host:9200"; Request request = new Request("OPTIONS", "/"); - request.addHeader("User-Agent", "Mozilla Bar"); - request.addHeader("Origin", corsValue); - request.addHeader("Access-Control-Request-Method", "GET"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader("User-Agent", "Mozilla Bar"); + options.addHeader("Origin", corsValue); + options.addHeader("Access-Control-Request-Method", "GET"); + request.setOptions(options); try { getRestClient().performRequest(request); fail("request should have failed"); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java index a9a0a0c7ed945..b287b49527ab1 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java @@ -20,6 +20,7 @@ import org.apache.http.HttpHeaders; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.test.rest.ESRestTestCase; @@ -38,7 +39,9 @@ public class HttpCompressionIT extends ESRestTestCase { public void testCompressesResponseIfRequested() throws IOException { Request request = new Request("GET", "/"); - request.addHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING); + request.setOptions(options); Response response = client().performRequest(request); assertEquals(200, response.getStatusLine().getStatusCode()); assertEquals(GZIP_ENCODING, response.getHeader(HttpHeaders.CONTENT_ENCODING)); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java index 976ba3131151f..d3707031f0e35 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java @@ -21,6 +21,7 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -46,7 +47,9 @@ public void testNoHandlerRespectsAcceptHeader() throws IOException { private void runTestNoHandlerRespectsAcceptHeader( final String accept, final String contentType, final String expect) throws IOException { Request request = new Request("GET", "/foo/bar/baz/qux/quux"); - request.addHeader("Accept", accept); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader("Accept", accept); + request.setOptions(options); final ResponseException e = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request)); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java index ac2503f2c525c..d73566c8038c7 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.http; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.plugins.Plugin; @@ -61,7 +62,9 @@ public void testThatSettingHeadersWorks() throws IOException { } Request request = new Request("GET", "/_protected"); - request.addHeader("Secret", "password"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader("Secret", "password"); + request.setOptions(options); Response authResponse = getRestClient().performRequest(request); assertThat(authResponse.getStatusLine().getStatusCode(), equalTo(200)); assertThat(authResponse.getHeader("Secret"), equalTo("granted")); diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yml b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yml index 0e54ff0b7ad59..6bc6219bfe59d 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yml @@ -332,7 +332,7 @@ wait_for_status: green - do: - catch: request + catch: bad_request ingest.put_pipeline: id: "my_pipeline_1" body: > @@ -348,5 +348,5 @@ ] } - match: { error.header.processor_type: "set" } - - match: { error.type: "general_script_exception" } - - match: { error.reason: "Failed to compile inline script [{{#join}}{{/join}}] using lang [mustache]" } + - match: { error.type: "script_exception" } + - match: { error.reason: "Mustache function [join] must contain one and only one identifier" } diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yml b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yml index 8c6a94b4a5c49..1c027adcc8071 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/50_script_processor_using_painless.yml @@ -89,7 +89,7 @@ --- "Test script processor with syntax error in inline script": - do: - catch: request + catch: bad_request ingest.put_pipeline: id: "my_pipeline" body: > diff --git a/qa/vagrant/README.md b/qa/vagrant/README.md index ce253a2e3e002..801c3e52673a3 100644 --- a/qa/vagrant/README.md +++ b/qa/vagrant/README.md @@ -82,38 +82,26 @@ In general it's probably best to avoid running external commands when a good Java alternative exists. For example most filesystem operations can be done with the java.nio.file APIs. For those that aren't, use an instance of [Shell](src/main/java/org/elasticsearch/packaging/util/Shell.java) -Despite the name, commands run with this class are not run in a shell, and any -familiar features of shells like variables or expansion won't work. - -If you do need the shell, you must explicitly invoke the shell's command. For -example to run a command with Bash, use the `bash -c command` syntax. Note that -the entire script must be in a single string argument +This class runs scripts in either bash with the `bash -c