diff --git a/.buildkite/check-es-serverless.yml b/.buildkite/check-es-serverless.yml new file mode 100644 index 0000000000000..1659b1413f64b --- /dev/null +++ b/.buildkite/check-es-serverless.yml @@ -0,0 +1,9 @@ +steps: + - trigger: elasticsearch-serverless-update-submodule + label: ":elasticsearch: Check elasticsearch changes against serverless" + build: + message: "Validate latest elasticsearch changes" + env: + ELASTICSEARCH_SUBMODULE_COMMIT: "${BUILDKITE_COMMIT}" + UPDATE_SUBMODULE: "false" + diff --git a/.buildkite/update-es-serverless.yml b/.buildkite/update-es-serverless.yml index 5f0bfdd9afa90..b9e01581df331 100644 --- a/.buildkite/update-es-serverless.yml +++ b/.buildkite/update-es-serverless.yml @@ -1,6 +1,6 @@ steps: - - trigger: elasticsearch-serverless - label: ":elasticsearch: Run serverless tests" + - trigger: elasticsearch-serverless-update-submodule + label: ":elasticsearch: Update elasticsearch submodule in serverless" build: message: "Elasticsearch submodule update build" env: diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 5462e2b4b2a9b..989cf2241e3c6 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -87,6 +87,6 @@ BWC_VERSION: - "8.6.2" - "8.7.0" - "8.7.1" - - "8.7.2" - "8.8.0" + - "8.8.1" - "8.9.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 088f3e184d577..79e6dca4d6c38 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,4 @@ BWC_VERSION: - "7.17.11" - - "8.7.2" - - "8.8.0" + - "8.8.1" - "8.9.0" diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTaskFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTaskFuncTest.groovy index bb56e93d35cec..b365624b5749a 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTaskFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTaskFuncTest.groovy @@ -97,7 +97,6 @@ class ThirdPartyAuditTaskFuncTest extends AbstractGradleInternalPluginFuncTest { def output = normalized(result.getOutput()) assertOutputContains(output, """\ - Forbidden APIs output: DEBUG: Classpath: [file:./build/precommit/thirdPartyAudit/thirdPartyAudit/] DEBUG: Detected Java 9 or later with module system. ERROR: Forbidden class/interface use: java.io.File [non-public internal runtime class] @@ -135,7 +134,6 @@ class ThirdPartyAuditTaskFuncTest extends AbstractGradleInternalPluginFuncTest { def output = normalized(result.getOutput()) assertOutputContains(output, """\ - Forbidden APIs output: DEBUG: Classpath: [file:./build/precommit/thirdPartyAudit/thirdPartyAudit/] DEBUG: Detected Java 9 or later with module system. DEBUG: Class 'org.apache.logging.log4j.LogManager' cannot be loaded (while looking up details about referenced class 'org.apache.logging.log4j.LogManager'). diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/ShellRetry.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/ShellRetry.java index 9af8096d07e6c..e3193f7aea5d0 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/ShellRetry.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/ShellRetry.java @@ -8,6 +8,9 @@ package org.elasticsearch.gradle.internal.docker; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + /** * The methods in this class take a shell command and wrap it in retry logic, so that our * Docker builds can be more robust in the face of transient errors e.g. network issues. @@ -17,8 +20,7 @@ static String loop(String name, String command) { return loop(name, command, 4, "exit"); } - // TODO: Re-enable when we sort out https://github.com/elastic/elasticsearch/issues/96207 - /*static String loop(String name, String command, int indentSize, String exitKeyword) { + static String loop(String name, String command, int indentSize, String exitKeyword) { String indent = " ".repeat(indentSize); // bash understands the `{1..10}` syntax, but other shells don't e.g. the default in Alpine Linux. @@ -35,9 +37,5 @@ static String loop(String name, String command) { // We need to escape all newlines so that the build process doesn't run all lines onto a single line return commandWithRetry.toString().replaceAll(" *\n", " \\\\\n"); - }*/ - - static String loop(String name, String command, int indentSize, String exitKeyword) { - return command.replaceAll(" *\n", " \\\\\n"); } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesPrecommitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesPrecommitPlugin.java index 3bc6697930198..72c08712a1fd9 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesPrecommitPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesPrecommitPlugin.java @@ -31,7 +31,10 @@ public TaskProvider createTask(Project project) { Configuration compileOnly = project.getConfigurations() .getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME); t.setDependencies( - runtimeClasspath.fileCollection(dependency -> dependency instanceof ProjectDependency == false).minus(compileOnly) + runtimeClasspath.fileCollection( + dependency -> dependency instanceof ProjectDependency == false + && dependency.getGroup().startsWith("org.elasticsearch") == false + ).minus(compileOnly) ); }); return dependencyLicenses; diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java index 2293fd4342311..8f357497c9008 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java @@ -11,6 +11,8 @@ import org.apache.commons.io.output.NullOutputStream; import org.elasticsearch.gradle.OS; +import org.elasticsearch.gradle.VersionProperties; +import org.elasticsearch.gradle.internal.info.BuildParams; import org.gradle.api.DefaultTask; import org.gradle.api.JavaVersion; import org.gradle.api.file.ArchiveOperations; @@ -55,6 +57,8 @@ import javax.inject.Inject; +import static org.gradle.api.JavaVersion.VERSION_20; + @CacheableTask public abstract class ThirdPartyAuditTask extends DefaultTask { @@ -333,6 +337,10 @@ private String runForbiddenAPIsCli() throws IOException { spec.setExecutable(javaHome.get() + "/bin/java"); } spec.classpath(getForbiddenAPIsClasspath(), classpath); + // Enable explicitly for each release as appropriate. Just JDK 20 for now, and just the vector module. + if (isJava20()) { + spec.jvmArgs("--add-modules", "jdk.incubator.vector"); + } spec.jvmArgs("-Xmx1g"); spec.getMainClass().set("de.thetaphi.forbiddenapis.cli.CliMain"); spec.args("-f", getSignatureFile().getAbsolutePath(), "-d", getJarExpandDir(), "--debug", "--allowmissingclasses"); @@ -355,6 +363,18 @@ private String runForbiddenAPIsCli() throws IOException { return forbiddenApisOutput; } + /** Returns true iff the Java version is 20. */ + private boolean isJava20() { + if (BuildParams.getIsRuntimeJavaHomeSet()) { + if (VERSION_20.equals(BuildParams.getRuntimeJavaVersion())) { + return true; + } + } else if ("20".equals(VersionProperties.getBundledJdkMajorVersion())) { + return true; + } + return false; + } + private Set runJdkJarHellCheck() throws IOException { ByteArrayOutputStream standardOut = new ByteArrayOutputStream(); ExecResult execResult = execOperations.javaexec(spec -> { diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index 5ae0dbf66fa20..d9c09013be730 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -1,5 +1,5 @@ elasticsearch = 8.9.0 -lucene = 9.6.0 +lucene = 9.7.0-snapshot-24df30cca69 bundled_jdk_vendor = openjdk bundled_jdk = 20.0.1+9@b4887098932d415489976708ad6d1a4b diff --git a/catalog-info.yaml b/catalog-info.yaml index 48b2c4e671208..38b7752a893d8 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -31,3 +31,34 @@ spec: daily promotion: branch: main cronline: '@daily' +--- +# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json +apiVersion: backstage.io/v1alpha1 +kind: Resource +metadata: + name: buildkite-pipeline-elasticsearch-check-serverless-submodule + description: Validate elasticsearch changes against serverless + links: + - title: Pipeline + url: https://buildkite.com/elastic/elasticsearch-check-serverless-submodule +spec: + type: buildkite-pipeline + system: buildkite + owner: group:elasticsearch-team + implementation: + apiVersion: buildkite.elastic.dev/v1 + kind: Pipeline + metadata: + description: ':elasticsearch: Validate elasticsearch changes against serverless' + name: elasticsearch / check serverless submodule + spec: + repository: elastic/elasticsearch + pipeline_file: .buildkite/check-es-serverless.yml + branch_configuration: main + teams: + elasticsearch-team: {} + everyone: + access_level: READ_ONLY + provider_settings: + build_pull_requests: false + publish_commit_status: false diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index ac47d2a37c182..466dc74d19e8e 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,8 +1,8 @@ include::{docs-root}/shared/versions/stack/{source_branch}.asciidoc[] -:lucene_version: 9.6.0 -:lucene_version_path: 9_6_0 +:lucene_version: 9.7.0 +:lucene_version_path: 9_7_0 :jdk: 11.0.2 :jdk_major: 11 :build_type: tar diff --git a/docs/changelog/76511.yaml b/docs/changelog/76511.yaml deleted file mode 100644 index ef98c99a03f95..0000000000000 --- a/docs/changelog/76511.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 76511 -summary: Add `reroute` processor -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/89256.yaml b/docs/changelog/89256.yaml deleted file mode 100644 index d0622f3775c21..0000000000000 --- a/docs/changelog/89256.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 89256 -summary: Add `keyed` parameter to filters agg, allowing the user to get non-keyed buckets of named filters agg -area: Aggregations -type: enhancement -issues: - - 83957 diff --git a/docs/changelog/91841.yaml b/docs/changelog/91841.yaml deleted file mode 100644 index 872dc9f610c5a..0000000000000 --- a/docs/changelog/91841.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91841 -summary: Use `storedFieldsSpec` to load stored fields for highlighting -area: Highlighting -type: enhancement -issues: [] diff --git a/docs/changelog/92021.yaml b/docs/changelog/92021.yaml deleted file mode 100644 index b0e5a48ca187d..0000000000000 --- a/docs/changelog/92021.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 92021 -summary: Improve master service batching queues -area: Cluster Coordination -type: enhancement -issues: - - 81626 diff --git a/docs/changelog/92588.yaml b/docs/changelog/92588.yaml deleted file mode 100644 index 0447207b398b7..0000000000000 --- a/docs/changelog/92588.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 92588 -summary: Failed tasks proactively cancel children tasks -area: Snapshot/Restore -type: enhancement -issues: - - 90353 diff --git a/docs/changelog/92646.yaml b/docs/changelog/92646.yaml deleted file mode 100644 index fbb840ce3a64e..0000000000000 --- a/docs/changelog/92646.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 92646 -summary: Introduce `DocumentParsingException` -area: Search -type: enhancement -issues: - - 85083 diff --git a/docs/changelog/92684.yaml b/docs/changelog/92684.yaml deleted file mode 100644 index 18896ce2a9c27..0000000000000 --- a/docs/changelog/92684.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 92684 -summary: Use `LogByteSizeMergePolicy` instead of `TieredMergePolicy` for time-based - data -area: Engine -type: enhancement -issues: [] diff --git a/docs/changelog/92810.yaml b/docs/changelog/92810.yaml deleted file mode 100644 index 48714aae2f8e8..0000000000000 --- a/docs/changelog/92810.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 92810 -summary: Do not fail node if SAML HTTP metadata is unavailable -area: Authentication -type: enhancement -issues: - - 37608 diff --git a/docs/changelog/93133.yaml b/docs/changelog/93133.yaml deleted file mode 100644 index be6c2cc7be4f4..0000000000000 --- a/docs/changelog/93133.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 93133 -summary: Add request/response body logging to HTTP tracer -area: Network -type: enhancement -issues: [] diff --git a/docs/changelog/93239.yaml b/docs/changelog/93239.yaml deleted file mode 100644 index c0b45c7e8337f..0000000000000 --- a/docs/changelog/93239.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 93239 -summary: Support ignore malformed in boolean fields -area: Search -type: bug -issues: - - 89542 - diff --git a/docs/changelog/93371.yaml b/docs/changelog/93371.yaml deleted file mode 100644 index 6671f92bb6677..0000000000000 --- a/docs/changelog/93371.yaml +++ /dev/null @@ -1,31 +0,0 @@ -pr: 93371 -summary: "Encode using 40, 48 and 56 bits per value" -area: TSDB -type: feature -issues: [] -highlight: - title: "Encode using 40, 48 and 56 bits per value" - body: |- - We use the encoding as follows: - * for values taking [33, 40] bits per value, encode using 40 bits per value - * for values taking [41, 48] bits per value, encode using 48 bits per value - * for values taking [49, 56] bits per value, encode using 56 bits per value - - This is an improvement over the encoding used by ForUtils which does not - apply any compression for values taking more than 32 bits per value. - - Note that 40, 48 and 56 bits per value represent exact multiples of - bytes (5, 6 and 7 bytes per value). As a result, we always write values - using 3, 2 or 1 byte less than the 8 bytes required for a long value. - - Looking at the savings in stored bytes, for a block of 128 (long) values we - would normally store 128 x 8 bytes = 1024 bytes, while now we have the following: - * 40 bits per value: write 645 bytes instead of 1024, saving 379 bytes (37%) - * 48 bits per value: write 772 bytes instead of 1024, saving 252 bytes (24%) - * 56 bits per value: write 897 bytes instead of 1024, saving 127 bytes (12%) - - We also apply compression to gauge metrics under the assumption that - compressing values taking more than 32 bits per value works well for - floating point values, because of the way floating point values are - represented (IEEE 754 format). - notable: true diff --git a/docs/changelog/93396.yaml b/docs/changelog/93396.yaml deleted file mode 100644 index 9df80fdbf7ac1..0000000000000 --- a/docs/changelog/93396.yaml +++ /dev/null @@ -1,48 +0,0 @@ -pr: 93396 -summary: Add support for Reciprocal Rank Fusion to the search API -area: Ranking -type: feature -issues: [] -highlight: - title: Add support for Reciprocal Rank Fusion (RRF) to the search API - body: |- - This change adds reciprocal rank fusion (RRF) which follows the basic formula - for merging `1...n` sets of results sets together with `sum(1/(k+d))` where `k` - is a ranking constant and `d` is a document's scored position within a result set - from a query. The main advantage of ranking this way is the scores for the sets - of results do not have to be normalized relative to each other because RRF only - relies upon positions within each result set. - - The API for this change adds a `rank` top-level element to the search - endpoint. An example: - - [source,Java] - ---- - { - "query": { - "match": { - "product": { - "query": "brown shoes" - } - } - }, - "knn": { - "field": "product-vector", - "query_vector": [54, 10, -2], - "k": 20, - "num_candidates": 75 - }, - "rank": { - "rrf": { - "window_size": 100, - "rank_constant": 20 - } - } - } - ---- - - The above example will execute the search query and the knn search separately. - It will preserve separate result sets up to the point where the queries are - ranked on the coordinating node using RRF. - - notable: true diff --git a/docs/changelog/93524.yaml b/docs/changelog/93524.yaml deleted file mode 100644 index b19427e95f94c..0000000000000 --- a/docs/changelog/93524.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 93524 -summary: Reduce the likelihood of writing small segments due to an oversize translog -area: Engine -type: enhancement -issues: - - 75611 diff --git a/docs/changelog/93576.yaml b/docs/changelog/93576.yaml deleted file mode 100644 index db8640fa1e68e..0000000000000 --- a/docs/changelog/93576.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 93576 -summary: Sort segments on timestamp in read only engine -area: Engine -type: enhancement -issues: [] diff --git a/docs/changelog/93579.yaml b/docs/changelog/93579.yaml deleted file mode 100644 index 901f60b51d195..0000000000000 --- a/docs/changelog/93579.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 93579 -summary: Use a combined field to index terms and doc values on keyword fields -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/93595.yaml b/docs/changelog/93595.yaml deleted file mode 100644 index 6fa2428dfab1f..0000000000000 --- a/docs/changelog/93595.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 93595 -summary: Use mmap for temporary files -area: Engine -type: enhancement -issues: [] diff --git a/docs/changelog/93600.yaml b/docs/changelog/93600.yaml deleted file mode 100644 index a1af86fc38f3c..0000000000000 --- a/docs/changelog/93600.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 93600 -summary: New `TransportBroadcastUnpromotableAction` action -area: CRUD -type: feature -issues: [] diff --git a/docs/changelog/93607.yaml b/docs/changelog/93607.yaml deleted file mode 100644 index b8cf803303bd9..0000000000000 --- a/docs/changelog/93607.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 93607 -summary: Servlerless API protection with annotations -area: Indices APIs -type: enhancement -issues: [] diff --git a/docs/changelog/93632.yaml b/docs/changelog/93632.yaml deleted file mode 100644 index 9ec4562eb22a9..0000000000000 --- a/docs/changelog/93632.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 93632 -summary: Streamline AsyncShardFetch#getNumberOfInFlightFetches -area: Allocation -type: bug -issues: - - 93631 diff --git a/docs/changelog/93680.yaml b/docs/changelog/93680.yaml deleted file mode 100644 index dbe09454eea77..0000000000000 --- a/docs/changelog/93680.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 93680 -summary: Add cluster stats re. snapshot activity -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/93694.yaml b/docs/changelog/93694.yaml deleted file mode 100644 index 5cc153785e0b9..0000000000000 --- a/docs/changelog/93694.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 93694 -summary: Text Expansion Query -area: "Machine Learning" -type: feature -issues: [] diff --git a/docs/changelog/93695.yaml b/docs/changelog/93695.yaml deleted file mode 100644 index e0233bd51ac16..0000000000000 --- a/docs/changelog/93695.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 93695 -summary: Fix `GetPipelineResponse` equality -area: Distributed -type: bug -issues: [] diff --git a/docs/changelog/93702.yaml b/docs/changelog/93702.yaml deleted file mode 100644 index ba68e4609f58a..0000000000000 --- a/docs/changelog/93702.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 93702 -summary: Add origin of synonym rules to exception message -area: Analysis -type: enhancement -issues: [] diff --git a/docs/changelog/93704.yaml b/docs/changelog/93704.yaml deleted file mode 100644 index 129f47d84e840..0000000000000 --- a/docs/changelog/93704.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 93704 -summary: Merge two histograms usign the higher number of digits among all histograms -area: Aggregations -type: bug -issues: - - 92822 diff --git a/docs/changelog/93711.yaml b/docs/changelog/93711.yaml deleted file mode 100644 index 9aef0511cd984..0000000000000 --- a/docs/changelog/93711.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 93711 -summary: Allow low level paging in `LeafDocLookup` -area: Infra/Scripting -type: bug -issues: [] diff --git a/docs/changelog/93726.yaml b/docs/changelog/93726.yaml deleted file mode 100644 index cf21784d41242..0000000000000 --- a/docs/changelog/93726.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 93726 -summary: Finer control over authentication metadata serialization -area: Authentication -type: enhancement -issues: [] diff --git a/docs/changelog/93759.yaml b/docs/changelog/93759.yaml deleted file mode 100644 index 02b2f098897ec..0000000000000 --- a/docs/changelog/93759.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 93759 -summary: Upgrading tika to 2.7.0 -area: Ingest Node -type: upgrade -issues: [] diff --git a/docs/changelog/93799.yaml b/docs/changelog/93799.yaml deleted file mode 100644 index f485b22b0258d..0000000000000 --- a/docs/changelog/93799.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 93799 -summary: Avoid deserializing responses in proxy node -area: Network -type: enhancement -issues: [] diff --git a/docs/changelog/93802.yaml b/docs/changelog/93802.yaml deleted file mode 100644 index 1a3ec7837b360..0000000000000 --- a/docs/changelog/93802.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 93802 -summary: "Fleet: Add new mappings for .fleet-actions signing" -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/93839.yaml b/docs/changelog/93839.yaml deleted file mode 100644 index f6176481affce..0000000000000 --- a/docs/changelog/93839.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 93839 -summary: Enable _terms_enum on version fields -area: Search -type: enhancement -issues: - - 83403 diff --git a/docs/changelog/93946.yaml b/docs/changelog/93946.yaml deleted file mode 100644 index 9dec150e12557..0000000000000 --- a/docs/changelog/93946.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 93946 -summary: Support position `time_series_metric` on `geo_point` fields -area: "TSDB" -type: enhancement -issues: [] diff --git a/docs/changelog/93952.yaml b/docs/changelog/93952.yaml deleted file mode 100644 index 523f23f719f77..0000000000000 --- a/docs/changelog/93952.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 93952 -summary: Improve module/plugin loading logging message. -area: Infra/Plugins -type: enhancement -issues: [93881] diff --git a/docs/changelog/93955.yaml b/docs/changelog/93955.yaml deleted file mode 100644 index 3f3dbadfcf68f..0000000000000 --- a/docs/changelog/93955.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 93955 -summary: Add register analysis to repo analysis API -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/93960.yaml b/docs/changelog/93960.yaml deleted file mode 100644 index a4a9085f0deb1..0000000000000 --- a/docs/changelog/93960.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 93960 -summary: "[Profiling] Parallelize response handling" -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/93990.yaml b/docs/changelog/93990.yaml deleted file mode 100644 index dd843905464f8..0000000000000 --- a/docs/changelog/93990.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 93990 -summary: Adding initial public and internal serverless scopes to data management rest - handlers -area: Indices APIs -type: enhancement -issues: [] diff --git a/docs/changelog/94000.yaml b/docs/changelog/94000.yaml deleted file mode 100644 index debbf2fd205c7..0000000000000 --- a/docs/changelog/94000.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 94000 -summary: Introduce redirect method on IngestDocument -area: Ingest Node -type: enhancement -issues: - - 83653 diff --git a/docs/changelog/94012.yaml b/docs/changelog/94012.yaml deleted file mode 100644 index bfebcee132c07..0000000000000 --- a/docs/changelog/94012.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94012 -summary: Initial implementation for `DataLifecycleService` -area: DLM -type: feature -issues: [] diff --git a/docs/changelog/94035.yaml b/docs/changelog/94035.yaml deleted file mode 100644 index fb669ba22a4e2..0000000000000 --- a/docs/changelog/94035.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 94035 -summary: Adding initial public and internal serverless scopes to Search team REST - handlers -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/94065.yaml b/docs/changelog/94065.yaml deleted file mode 100644 index 3ffc9d70b0e79..0000000000000 --- a/docs/changelog/94065.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 94065 -summary: Implicitly rollover data streams / aliases based on `max_primary_shard_docs` -area: ILM+SLM -type: enhancement -issues: - - 87246 diff --git a/docs/changelog/94066.yaml b/docs/changelog/94066.yaml deleted file mode 100644 index 4be624eb16c87..0000000000000 --- a/docs/changelog/94066.yaml +++ /dev/null @@ -1,10 +0,0 @@ -pr: 94066 -summary: Deprecate `cluster.routing.allocation.type` -area: Allocation -type: deprecation -issues: [] -deprecation: - title: Deprecate `cluster.routing.allocation.type` - area: Cluster and node setting - details: The `cluster.routing.allocation.type` setting is deprecated and will be removed in a future version. - impact: Discontinue use of the `cluster.routing.allocation.type` setting. diff --git a/docs/changelog/94080.yaml b/docs/changelog/94080.yaml deleted file mode 100644 index e259e3ead27a0..0000000000000 --- a/docs/changelog/94080.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 94080 -summary: Fix `_terms_enum` display values -area: Search -type: bug -issues: - - 94041 diff --git a/docs/changelog/94116.yaml b/docs/changelog/94116.yaml deleted file mode 100644 index a9558b2e8fc73..0000000000000 --- a/docs/changelog/94116.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94116 -summary: Add to `HealthMetadata` information about `ShardLimits` -area: Health -type: feature -issues: [] diff --git a/docs/changelog/94121.yaml b/docs/changelog/94121.yaml deleted file mode 100644 index 740d2363133f3..0000000000000 --- a/docs/changelog/94121.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94121 -summary: Enable synthetic source for malformed booleans -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/94133.yaml b/docs/changelog/94133.yaml deleted file mode 100644 index 5f12617f3a2ba..0000000000000 --- a/docs/changelog/94133.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94133 -summary: Porting watcher over to `BulkProcessor2` -area: Watcher -type: enhancement -issues: [] diff --git a/docs/changelog/94134.yaml b/docs/changelog/94134.yaml deleted file mode 100644 index c1e96550cab2a..0000000000000 --- a/docs/changelog/94134.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94134 -summary: Increase the merge factor to 32 for time-based data -area: Engine -type: enhancement -issues: [] diff --git a/docs/changelog/94136.yaml b/docs/changelog/94136.yaml deleted file mode 100644 index 1aa77888a4d73..0000000000000 --- a/docs/changelog/94136.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94136 -summary: Add level parameter validation in REST layer -area: Infra/REST API -type: bug -issues: [93981] diff --git a/docs/changelog/94162.yaml b/docs/changelog/94162.yaml deleted file mode 100644 index 69048419cf961..0000000000000 --- a/docs/changelog/94162.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94162 -summary: Add `delete_destination_index` parameter to the `Delete Transform API` -area: Transform -type: enhancement -issues: [] diff --git a/docs/changelog/94179.yaml b/docs/changelog/94179.yaml deleted file mode 100644 index 0a7d9b0225d12..0000000000000 --- a/docs/changelog/94179.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94179 -summary: Upgrade to Netty 4.1.89 -area: Network -type: upgrade -issues: [] diff --git a/docs/changelog/94195.yaml b/docs/changelog/94195.yaml deleted file mode 100644 index 383db7635d2e6..0000000000000 --- a/docs/changelog/94195.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 94195 -summary: Use double wildcards for filtered excludes properly -area: Infra/Core -type: bug -issues: - - 92632 diff --git a/docs/changelog/94240.yaml b/docs/changelog/94240.yaml deleted file mode 100644 index bd55e70140d5c..0000000000000 --- a/docs/changelog/94240.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94240 -summary: "[DLM] Introduce default rollover cluster setting & expose it via APIs" -area: DLM -type: feature -issues: [] diff --git a/docs/changelog/94249.yaml b/docs/changelog/94249.yaml deleted file mode 100644 index b2e732067e122..0000000000000 --- a/docs/changelog/94249.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94249 -summary: Ensure refresh to return the latest commit generation -area: Engine -type: bug -issues: [] diff --git a/docs/changelog/94322.yaml b/docs/changelog/94322.yaml deleted file mode 100644 index 418f2564abcdf..0000000000000 --- a/docs/changelog/94322.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 94322 -summary: Enable `_terms_enum` on `ip` fields -area: Mapping -type: enhancement -issues: - - 89933 diff --git a/docs/changelog/94327.yaml b/docs/changelog/94327.yaml deleted file mode 100644 index 42c7adc620620..0000000000000 --- a/docs/changelog/94327.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94327 -summary: "[Profiling] Map stack frames more efficiently" -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/94379.yaml b/docs/changelog/94379.yaml deleted file mode 100644 index 6c7cf01317530..0000000000000 --- a/docs/changelog/94379.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94379 -summary: "Avoid duplicate application of RoutingTable diff" -area: Allocation -type: bug -issues: [] diff --git a/docs/changelog/94381.yaml b/docs/changelog/94381.yaml deleted file mode 100644 index 89c4bb61a0ed2..0000000000000 --- a/docs/changelog/94381.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94381 -summary: Add Enterprise Search Module -area: Search -type: feature -issues: [] diff --git a/docs/changelog/94396.yaml b/docs/changelog/94396.yaml deleted file mode 100644 index 58cc074e895ab..0000000000000 --- a/docs/changelog/94396.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94396 -summary: Allow docvalues-only search on `geo_shape` -area: Geo -type: enhancement -issues: [] diff --git a/docs/changelog/94417.yaml b/docs/changelog/94417.yaml deleted file mode 100644 index 2e547ebd7d7ab..0000000000000 --- a/docs/changelog/94417.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94417 -summary: "Avoid copying during iteration of all shards in routing table" -area: Allocation -type: bug -issues: [] diff --git a/docs/changelog/94418.yaml b/docs/changelog/94418.yaml deleted file mode 100644 index 6d2eda28f92ec..0000000000000 --- a/docs/changelog/94418.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 94418 -summary: Support for store parameter in `geo_shape` field -area: Geo -type: enhancement -issues: - - 83655 diff --git a/docs/changelog/94420.yaml b/docs/changelog/94420.yaml deleted file mode 100644 index 70699c55f7295..0000000000000 --- a/docs/changelog/94420.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94420 -summary: Secondary credentials used with transforms should only require source and destination index privileges, not transform privileges -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/94447.yaml b/docs/changelog/94447.yaml deleted file mode 100644 index 2b15246b0e279..0000000000000 --- a/docs/changelog/94447.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94447 -summary: Remove rollover cluster setting validator -area: DLM -type: bug -issues: [] diff --git a/docs/changelog/94494.yaml b/docs/changelog/94494.yaml deleted file mode 100644 index d2be702684a1e..0000000000000 --- a/docs/changelog/94494.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94494 -summary: Upgrade to lucene-9.6.0-snapshot-f5d1e1c787c -area: Search -type: upgrade -issues: [] diff --git a/docs/changelog/94500.yaml b/docs/changelog/94500.yaml deleted file mode 100644 index 076c951771843..0000000000000 --- a/docs/changelog/94500.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94500 -summary: Add global ordinal info to stats APIs -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/94504.yaml b/docs/changelog/94504.yaml deleted file mode 100644 index 9d5874daf1a53..0000000000000 --- a/docs/changelog/94504.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94504 -summary: Index sequence numbers via a single Lucene field -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/94506.yaml b/docs/changelog/94506.yaml deleted file mode 100644 index 52ef41baeeb4a..0000000000000 --- a/docs/changelog/94506.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 94506 -summary: Add permissions to kibana_system for TI package transforms to support IOC expiration -area: Authorization -type: enhancement -issues: - - 94505 diff --git a/docs/changelog/94529.yaml b/docs/changelog/94529.yaml deleted file mode 100644 index 8c810e090f794..0000000000000 --- a/docs/changelog/94529.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94529 -summary: Add `_meta` field to data frame analytics config -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/94540.yaml b/docs/changelog/94540.yaml deleted file mode 100644 index b1b0c923b16c7..0000000000000 --- a/docs/changelog/94540.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94540 -summary: Cut over from Field to `StringField` when applicable -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/94543.yaml b/docs/changelog/94543.yaml deleted file mode 100644 index 0a3d3ff20e74d..0000000000000 --- a/docs/changelog/94543.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 94543 -summary: Report transport message size per action -area: Network -type: enhancement -issues: - - 88151 diff --git a/docs/changelog/94552.yaml b/docs/changelog/94552.yaml deleted file mode 100644 index 0803d76bee6be..0000000000000 --- a/docs/changelog/94552.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94552 -summary: Add new `ShardsCapacity` Health Indicator Service -area: Health -type: feature -issues: [] diff --git a/docs/changelog/94564.yaml b/docs/changelog/94564.yaml deleted file mode 100644 index 23a82825aee78..0000000000000 --- a/docs/changelog/94564.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94564 -summary: Add the ability to return the score of the named queries -area: Search -type: feature -issues: [29606] diff --git a/docs/changelog/94576.yaml b/docs/changelog/94576.yaml deleted file mode 100644 index 6bb59e406ab9d..0000000000000 --- a/docs/changelog/94576.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 94576 -summary: Add support for custom endpoints in the Azure repository -area: Snapshot/Restore -type: enhancement -issues: - - 94537 diff --git a/docs/changelog/94590.yaml b/docs/changelog/94590.yaml deleted file mode 100644 index a9ab9b5bac1fb..0000000000000 --- a/docs/changelog/94590.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94590 -summary: Add new endpoints to configure data lifecycle on a data stream level -area: DLM -type: feature -issues: [] diff --git a/docs/changelog/94621.yaml b/docs/changelog/94621.yaml deleted file mode 100644 index 95221e6af0520..0000000000000 --- a/docs/changelog/94621.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94621 -summary: Introduce a _lifecycle/explain API for data stream backing indices -area: DLM -type: feature -issues: [] diff --git a/docs/changelog/94635.yaml b/docs/changelog/94635.yaml deleted file mode 100644 index 891ffa1ac498f..0000000000000 --- a/docs/changelog/94635.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94635 -summary: Upgrade to lucene-9.6.0-snapshot-8a815153fbe -area: Search -type: upgrade -issues: [] diff --git a/docs/changelog/94649.yaml b/docs/changelog/94649.yaml deleted file mode 100644 index 6e2c587f07876..0000000000000 --- a/docs/changelog/94649.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94649 -summary: Don't create many Rounding.Prepared instances when checking for empty buckets in date_histogram aggregator. -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/94669.yaml b/docs/changelog/94669.yaml deleted file mode 100644 index c9bb3d8e06632..0000000000000 --- a/docs/changelog/94669.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94669 -summary: Add `transport_version` to node info JSON -area: Infra/Transport API -type: enhancement -issues: [] diff --git a/docs/changelog/94677.yaml b/docs/changelog/94677.yaml deleted file mode 100644 index 7d5711f82c1da..0000000000000 --- a/docs/changelog/94677.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 94677 -summary: Fix race condition in `NodeEnvironment.close()` -area: Infra/Core -type: bug -issues: - - 94672 diff --git a/docs/changelog/94680.yaml b/docs/changelog/94680.yaml deleted file mode 100644 index 98f7f358dd53e..0000000000000 --- a/docs/changelog/94680.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94680 -summary: Allow preserving specific headers on thread context stash -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/94714.yaml b/docs/changelog/94714.yaml deleted file mode 100644 index 9eddbb9462f15..0000000000000 --- a/docs/changelog/94714.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94714 -summary: Fix role transformation to include missing properties -area: Authorization -type: bug -issues: [] diff --git a/docs/changelog/94724.yaml b/docs/changelog/94724.yaml deleted file mode 100644 index a821fe1fa71e4..0000000000000 --- a/docs/changelog/94724.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94724 -summary: Expose authorization failure as transform health issue -area: Transform -type: enhancement -issues: [] diff --git a/docs/changelog/94734.yaml b/docs/changelog/94734.yaml deleted file mode 100644 index aea8fe534a37f..0000000000000 --- a/docs/changelog/94734.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 94734 -summary: Fix bug where `geo_line` does not respect `sort_order` -area: Geo -type: bug -issues: - - 94733 diff --git a/docs/changelog/94828.yaml b/docs/changelog/94828.yaml deleted file mode 100644 index 49fb3c02b2ef1..0000000000000 --- a/docs/changelog/94828.yaml +++ /dev/null @@ -1,23 +0,0 @@ -pr: 94828 -summary: Add new `similarity` field to `knn` clause in `_search` -area: Search -type: feature -issues: [] -highlight: - title: Add new `similarity` field to `knn` clause in `_search` - body: |- - This adds a new parameter to `knn` that allows filtering nearest - neighbor results that are outside a given similarity. - - `num_candidates` and `k` are still required as this controls the - nearest-neighbor vector search accuracy and exploration. For each shard - the query will search `num_candidates` and only keep those that are - within the provided `similarity` boundary, and then finally reduce to - only the global top `k` as normal. - - For example, when using the `l2_norm` indexed similarity value, this - could be considered a `radius` post-filter on `knn`. - - relates to: https://github.com/elastic/elasticsearch/issues/84929 && - https://github.com/elastic/elasticsearch/pull/93574 - notable: true diff --git a/docs/changelog/94832.yaml b/docs/changelog/94832.yaml deleted file mode 100644 index a3d94bb2cd2d5..0000000000000 --- a/docs/changelog/94832.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 94832 -summary: Support search template api explain query string argument -area: Search -type: bug -issues: - - 83363 diff --git a/docs/changelog/94842.yaml b/docs/changelog/94842.yaml deleted file mode 100644 index 25a37d83b8d15..0000000000000 --- a/docs/changelog/94842.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94842 -summary: Flattened field synthetic support -area: TSDB -type: feature -issues: [] diff --git a/docs/changelog/94858.yaml b/docs/changelog/94858.yaml deleted file mode 100644 index a844d36a65dd7..0000000000000 --- a/docs/changelog/94858.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94858 -summary: Leverage Weight#count when size is set to 0 -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/94861.yaml b/docs/changelog/94861.yaml deleted file mode 100644 index 9d03c48dd6a37..0000000000000 --- a/docs/changelog/94861.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94861 -summary: "[DLM] Extend the template to simulate api to support include defaults" -area: DLM -type: feature -issues: [] diff --git a/docs/changelog/94879.yaml b/docs/changelog/94879.yaml deleted file mode 100644 index 0664140f592b2..0000000000000 --- a/docs/changelog/94879.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 94879 -summary: Sort ILM explain output by natural index name -area: ILM+SLM -type: enhancement -issues: - - 94768 diff --git a/docs/changelog/94889.yaml b/docs/changelog/94889.yaml deleted file mode 100644 index 981cedc79c6bb..0000000000000 --- a/docs/changelog/94889.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94889 -summary: Shortcut total hit count when `terminate_after` is used -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/94890.yaml b/docs/changelog/94890.yaml deleted file mode 100644 index 56dfb58615f37..0000000000000 --- a/docs/changelog/94890.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94890 -summary: Stop sorting indices in get-snapshots API -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/94943.yaml b/docs/changelog/94943.yaml deleted file mode 100644 index a371fd7c71a44..0000000000000 --- a/docs/changelog/94943.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94943 -summary: Allow specifying destination index aliases in the Transform's `dest` config -area: Transform -type: enhancement -issues: [] diff --git a/docs/changelog/94950.yaml b/docs/changelog/94950.yaml deleted file mode 100644 index d8278b365c730..0000000000000 --- a/docs/changelog/94950.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94950 -summary: Dlm add auto rollover condition max age -area: DLM -type: feature -issues: [] diff --git a/docs/changelog/94955.yaml b/docs/changelog/94955.yaml deleted file mode 100644 index 1538405399619..0000000000000 --- a/docs/changelog/94955.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94955 -summary: Upgrade to lucene-9.6-snapshot-dcc2154a1d3 -area: Engine -type: upgrade -issues: [] diff --git a/docs/changelog/94965.yaml b/docs/changelog/94965.yaml deleted file mode 100644 index e7009bbf21e33..0000000000000 --- a/docs/changelog/94965.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 94965 -summary: Retry downsample ILM action using a new target index -area: "ILM+SLM" -type: bug -issues: - - 93580 diff --git a/docs/changelog/94998.yaml b/docs/changelog/94998.yaml deleted file mode 100644 index 99ce2dad32b89..0000000000000 --- a/docs/changelog/94998.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 94998 -summary: Retain underlying error on proxy mode connection failure -area: "Network" -type: enhancement -issues: [] diff --git a/docs/changelog/95003.yaml b/docs/changelog/95003.yaml deleted file mode 100644 index b3d501c08c92c..0000000000000 --- a/docs/changelog/95003.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 95003 -summary: Fix Grok.match() with offset and suffix pattern -area: Ingest Node -type: bug -issues: - - 95002 diff --git a/docs/changelog/95026.yaml b/docs/changelog/95026.yaml deleted file mode 100644 index 9b098701ef668..0000000000000 --- a/docs/changelog/95026.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95026 -summary: Initial Search Application Search API with templates -area: Application -type: feature -issues: [] diff --git a/docs/changelog/95027.yaml b/docs/changelog/95027.yaml deleted file mode 100644 index 8fd1fa7bd849f..0000000000000 --- a/docs/changelog/95027.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95027 -summary: "Implements behavioral analytics events ingest API" -area: Search -type: feature -issues: [] diff --git a/docs/changelog/95068.yaml b/docs/changelog/95068.yaml deleted file mode 100644 index b2944491d54ae..0000000000000 --- a/docs/changelog/95068.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 95068 -summary: "[Ingest Processor] Add `ignore_missing` param to the `uri_parts` ingest\ - \ processor" -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/95082.yaml b/docs/changelog/95082.yaml deleted file mode 100644 index 1bd1b46b2b098..0000000000000 --- a/docs/changelog/95082.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95082 -summary: Make `SourceProvider` using stored fields segment-thread-safe -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/95113.yaml b/docs/changelog/95113.yaml deleted file mode 100644 index 37a5ae21a03d6..0000000000000 --- a/docs/changelog/95113.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95113 -summary: Adding origination date to DLM -area: DLM -type: enhancement -issues: [] diff --git a/docs/changelog/95114.yaml b/docs/changelog/95114.yaml deleted file mode 100644 index 23cc4d7ae55be..0000000000000 --- a/docs/changelog/95114.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95114 -summary: Fix `RecyclerBytesStreamOutput` corrupting when ending write on page boundary -area: Network -type: bug -issues: [] diff --git a/docs/changelog/95133.yaml b/docs/changelog/95133.yaml deleted file mode 100644 index 047c3059599d6..0000000000000 --- a/docs/changelog/95133.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95133 -summary: Fix maximum seek limit `RecyclerBytesStreamOutput` -area: Network -type: bug -issues: [] diff --git a/docs/changelog/95168.yaml b/docs/changelog/95168.yaml deleted file mode 100644 index f71557439761d..0000000000000 --- a/docs/changelog/95168.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95168 -summary: "Start, stop and infer of a trained model can now optionally use a deployment ID that is different to the model ID" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/95170.yaml b/docs/changelog/95170.yaml deleted file mode 100644 index dec6a70b6958b..0000000000000 --- a/docs/changelog/95170.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95170 -summary: Ensure checking indices privileges works with nested-limited-role -area: Authorization -type: enhancement -issues: [] diff --git a/docs/changelog/95176.yaml b/docs/changelog/95176.yaml deleted file mode 100644 index fb40bddf59352..0000000000000 --- a/docs/changelog/95176.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95176 -summary: Add `embedding_size` to text embedding config -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/95187.yaml b/docs/changelog/95187.yaml deleted file mode 100644 index 776e9d3be9c62..0000000000000 --- a/docs/changelog/95187.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95187 -summary: Fix privileges check failures by adding `allow_restricted_indices` flag -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/95198.yaml b/docs/changelog/95198.yaml deleted file mode 100644 index 38555a2dcbb92..0000000000000 --- a/docs/changelog/95198.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95198 -summary: "[Behavioral Analytics] Add a `final_pipeline` to event data streams" -area: Application -type: feature -issues: [] diff --git a/docs/changelog/95212.yaml b/docs/changelog/95212.yaml deleted file mode 100644 index e2e5637a5dd5c..0000000000000 --- a/docs/changelog/95212.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95212 -summary: "[Behavioral analytics] Implement search filters into events" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/95221.yaml b/docs/changelog/95221.yaml deleted file mode 100644 index f670e9ddc5641..0000000000000 --- a/docs/changelog/95221.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95221 -summary: Longer timeout for mapping update during resize -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/95229.yaml b/docs/changelog/95229.yaml deleted file mode 100644 index 22e9bf712601a..0000000000000 --- a/docs/changelog/95229.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95229 -summary: Avoid null Location in post write refresh -area: CRUD -type: bug -issues: [] diff --git a/docs/changelog/95232.yaml b/docs/changelog/95232.yaml deleted file mode 100644 index 02ad9e0d0c567..0000000000000 --- a/docs/changelog/95232.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95232 -summary: Simulate ingest pipeline API verbose bug -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/95257.yaml b/docs/changelog/95257.yaml deleted file mode 100644 index 236b06cf0b896..0000000000000 --- a/docs/changelog/95257.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95257 -summary: Increase max number of vector dims to 2048 -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/95271.yaml b/docs/changelog/95271.yaml deleted file mode 100644 index 0507c4d7062f6..0000000000000 --- a/docs/changelog/95271.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95271 -summary: Include model definition install status for Pytorch models -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/95273.yaml b/docs/changelog/95273.yaml deleted file mode 100644 index 0b5e5f7a4e537..0000000000000 --- a/docs/changelog/95273.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95273 -summary: Support flattened fields as time series dimension fields -area: TSDB -type: feature -issues: [] diff --git a/docs/changelog/95281.yaml b/docs/changelog/95281.yaml deleted file mode 100644 index e7b89c55d1819..0000000000000 --- a/docs/changelog/95281.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95281 -summary: Integrate ELSER model download into put trained model API -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/95342.yaml b/docs/changelog/95342.yaml deleted file mode 100644 index 5814815b6b55d..0000000000000 --- a/docs/changelog/95342.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 95342 -summary: Add Watcher APIs for updating/retrieving settings -area: Watcher -type: enhancement -issues: - - 92991 diff --git a/docs/changelog/95351.yaml b/docs/changelog/95351.yaml deleted file mode 100644 index e8cf146229625..0000000000000 --- a/docs/changelog/95351.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95351 -summary: Read register current term asynchronously in `StoreHeartbeatService` -area: Cluster Coordination -type: bug -issues: [] diff --git a/docs/changelog/95357.yaml b/docs/changelog/95357.yaml deleted file mode 100644 index 66c75248e1798..0000000000000 --- a/docs/changelog/95357.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95357 -summary: Add `WildcardLike/Pattern` to QL -area: SQL -type: enhancement -issues: [] diff --git a/docs/changelog/95359.yaml b/docs/changelog/95359.yaml deleted file mode 100644 index 39c8b5e865b9e..0000000000000 --- a/docs/changelog/95359.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95359 -summary: Bump bundled JDK to Java 20.0.1 -area: Packaging -type: upgrade -issues: [] diff --git a/docs/changelog/95388.yaml b/docs/changelog/95388.yaml deleted file mode 100644 index dd871ae5c771e..0000000000000 --- a/docs/changelog/95388.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95388 -summary: Add `event_loop_utilization` Kibana stats to the monitoring index templates -area: Monitoring -type: enhancement -issues: [] diff --git a/docs/changelog/95398.yaml b/docs/changelog/95398.yaml deleted file mode 100644 index 45412ec537750..0000000000000 --- a/docs/changelog/95398.yaml +++ /dev/null @@ -1,9 +0,0 @@ -pr: 95398 -summary: GA release of the JWT realm -area: Authentication -type: feature -issues: [] -highlight: - title: GA release of the JWT realm - body: This PR removes the beta label for JWT realm feature to make it GA. - notable: true diff --git a/docs/changelog/95405.yaml b/docs/changelog/95405.yaml deleted file mode 100644 index e835fa0503e76..0000000000000 --- a/docs/changelog/95405.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95405 -summary: Behavioral Analytics event ingest tuning -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/95423.yaml b/docs/changelog/95423.yaml deleted file mode 100644 index edfa2ab937bb9..0000000000000 --- a/docs/changelog/95423.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95423 -summary: Introduce the `index.lifecycle.prefer_ilm` setting -area: DLM -type: feature -issues: [] diff --git a/docs/changelog/95433.yaml b/docs/changelog/95433.yaml deleted file mode 100644 index 65cdb3b1ed952..0000000000000 --- a/docs/changelog/95433.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95433 -summary: "[Behavioral Analytics] Add geo ip and user agent to events" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/95440.yaml b/docs/changelog/95440.yaml deleted file mode 100644 index e6b888d55bff8..0000000000000 --- a/docs/changelog/95440.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95440 -summary: "[ML] Get trained model stats by deployment id or model id" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/95454.yaml b/docs/changelog/95454.yaml deleted file mode 100644 index 38611ea816abf..0000000000000 --- a/docs/changelog/95454.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95454 -summary: Balance priorities during reconciliation -area: Allocation -type: bug -issues: [] diff --git a/docs/changelog/95456.yaml b/docs/changelog/95456.yaml deleted file mode 100644 index 31373fa7e6f62..0000000000000 --- a/docs/changelog/95456.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 95456 -summary: Use monotonic time in `TransformScheduler` -area: Transform -type: bug -issues: - - 95445 diff --git a/docs/changelog/95477.yaml b/docs/changelog/95477.yaml deleted file mode 100644 index 6289facb12ce8..0000000000000 --- a/docs/changelog/95477.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95477 -summary: Add license checking to the redact processor -area: Ingest Node -type: feature -issues: [] diff --git a/docs/changelog/95512.yaml b/docs/changelog/95512.yaml new file mode 100644 index 0000000000000..2414f9a7809b0 --- /dev/null +++ b/docs/changelog/95512.yaml @@ -0,0 +1,5 @@ +pr: 95512 +summary: Adding `manage_dlm` index privilege and expanding `view_index_metadata` for access to data lifecycle APIs +area: DLM +type: enhancement +issues: [] diff --git a/docs/changelog/95514.yaml b/docs/changelog/95514.yaml deleted file mode 100644 index 8e87ce66c4835..0000000000000 --- a/docs/changelog/95514.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95514 -summary: Fix versioning for tests cases using a randomly generated rank builder -area: Ranking -type: bug -issues: [] diff --git a/docs/changelog/95527.yaml b/docs/changelog/95527.yaml deleted file mode 100644 index 2c85521361644..0000000000000 --- a/docs/changelog/95527.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95527 -summary: Allow deletion of component templates that are specified in the `ignore_missing_component_templates` array -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/95557.yaml b/docs/changelog/95557.yaml deleted file mode 100644 index d45207860de48..0000000000000 --- a/docs/changelog/95557.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95557 -summary: Revert usage of `SafeMustacheFactory` in `CustomMustacheFactory` -area: Infra/Scripting -type: bug -issues: [] diff --git a/docs/changelog/95590.yaml b/docs/changelog/95590.yaml deleted file mode 100644 index 8d31c098977fd..0000000000000 --- a/docs/changelog/95590.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95590 -summary: Avoid expensive source parsing by using doc values when querying model definition meta fields -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/95596.yaml b/docs/changelog/95596.yaml deleted file mode 100644 index 53af444516610..0000000000000 --- a/docs/changelog/95596.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95596 -summary: "[Fleet] Add read privileges to profiling-* for symbolization support" -area: Authorization -type: bug -issues: [] diff --git a/docs/changelog/95614.yaml b/docs/changelog/95614.yaml deleted file mode 100644 index 9df93fd865c20..0000000000000 --- a/docs/changelog/95614.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95614 -summary: "[Behavioral Analytics] Use a a client with ent-search origin in the `BulkProcessorFactory`" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/95621.yaml b/docs/changelog/95621.yaml deleted file mode 100644 index 57dd56da37dc9..0000000000000 --- a/docs/changelog/95621.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95621 -summary: Check if an analytics event data stream exists before installing pipeline -area: Application -type: bug -issues: [] diff --git a/docs/changelog/95641.yaml b/docs/changelog/95641.yaml deleted file mode 100644 index 5a843a406ea24..0000000000000 --- a/docs/changelog/95641.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95641 -summary: Upgrade Jackson xml to 2.15.0 -area: Infra/Core -type: upgrade -issues: [] diff --git a/docs/changelog/95665.yaml b/docs/changelog/95665.yaml deleted file mode 100644 index 59e99db23b51b..0000000000000 --- a/docs/changelog/95665.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95665 -summary: "[DLM] Fix the new endpoint rest-api specification" -area: DLM -type: bug -issues: [] diff --git a/docs/changelog/95705.yaml b/docs/changelog/95705.yaml new file mode 100644 index 0000000000000..dcd33dbcc61d8 --- /dev/null +++ b/docs/changelog/95705.yaml @@ -0,0 +1,6 @@ +pr: 95705 +summary: Avoid stack overflow while parsing mapping +area: Mapping +type: bug +issues: + - 52098 diff --git a/docs/changelog/95767.yaml b/docs/changelog/95767.yaml deleted file mode 100644 index 524cf8c9630eb..0000000000000 --- a/docs/changelog/95767.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 95767 -summary: Strip disallowed chars from generated snapshot name -area: ILM+SLM -type: bug -issues: - - 95593 diff --git a/docs/changelog/95854.yaml b/docs/changelog/95854.yaml deleted file mode 100644 index fb63a09a6b1c6..0000000000000 --- a/docs/changelog/95854.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 95854 -summary: Fix 0 default value for repo snapshot speed -area: Snapshot/Restore -type: bug -issues: - - 95561 diff --git a/docs/changelog/95865.yaml b/docs/changelog/95865.yaml deleted file mode 100644 index 7a70c9dd0886a..0000000000000 --- a/docs/changelog/95865.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 95865 -summary: Fix searching a filtered and unfiltered data stream alias -area: Data streams -type: bug -issues: - - 95786 diff --git a/docs/changelog/95891.yaml b/docs/changelog/95891.yaml deleted file mode 100644 index 9c33314dd16e6..0000000000000 --- a/docs/changelog/95891.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 95891 -summary: Cancel cold cache prewarming tasks if store is closing -area: Snapshot/Restore -type: bug -issues: - - 95504 diff --git a/docs/changelog/95895.yaml b/docs/changelog/95895.yaml deleted file mode 100644 index b5ddb7c278dec..0000000000000 --- a/docs/changelog/95895.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95895 -summary: "Move synonyms set code to server, add PUT request" -area: Analysis -type: enhancement -issues: [] diff --git a/docs/changelog/95934.yaml b/docs/changelog/95934.yaml deleted file mode 100644 index ee4d72f4c8b25..0000000000000 --- a/docs/changelog/95934.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95934 -summary: "[ILM] Fix the migrate to tiers service and migrate action tiers configuration" -area: ILM+SLM -type: bug -issues: [] diff --git a/docs/changelog/95967.yaml b/docs/changelog/95967.yaml deleted file mode 100644 index 9cc28d871fc20..0000000000000 --- a/docs/changelog/95967.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 95967 -summary: Upgrade Lucene to the final 9.6.0 release -area: Search -type: upgrade -issues: [] diff --git a/docs/changelog/96015.yaml b/docs/changelog/96015.yaml deleted file mode 100644 index d47fc7ddce001..0000000000000 --- a/docs/changelog/96015.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 96015 -summary: Check shard availability before including in stats -area: Distributed -type: bug -issues: - - 96000 - - 87001 diff --git a/docs/changelog/96025.yaml b/docs/changelog/96025.yaml deleted file mode 100644 index 122d26e257938..0000000000000 --- a/docs/changelog/96025.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 96025 -summary: Fix `RebalanceOnlyWhenActiveAllocationDecider` -area: Allocation -type: bug -issues: [] diff --git a/docs/changelog/96034.yaml b/docs/changelog/96034.yaml deleted file mode 100644 index 4f048fcfc4f8e..0000000000000 --- a/docs/changelog/96034.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 96034 -summary: Fix Azure `InputStream#read` method -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/96088.yaml b/docs/changelog/96088.yaml deleted file mode 100644 index 9f0e7a88c4727..0000000000000 --- a/docs/changelog/96088.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 96088 -summary: Adjust `BoundedGeoHexGridTiler#FACTOR` to prevent missing hits -area: Geo -type: bug -issues: - - 96057 diff --git a/docs/changelog/96177.yaml b/docs/changelog/96177.yaml new file mode 100644 index 0000000000000..d832a99019fda --- /dev/null +++ b/docs/changelog/96177.yaml @@ -0,0 +1,5 @@ +pr: 96177 +summary: Adding `data_lifecycle` to the _xpack/usage API +area: DLM +type: enhancement +issues: [] diff --git a/docs/changelog/96221.yaml b/docs/changelog/96221.yaml deleted file mode 100644 index 8df71102071de..0000000000000 --- a/docs/changelog/96221.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 96221 -summary: Call listener in order to prevent the request from hanging -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/96272.yaml b/docs/changelog/96272.yaml new file mode 100644 index 0000000000000..739983e8f9d46 --- /dev/null +++ b/docs/changelog/96272.yaml @@ -0,0 +1,5 @@ +pr: 96272 +summary: "[Profiling] Add status API" +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/96274.yaml b/docs/changelog/96274.yaml deleted file mode 100644 index 61e09c15183bf..0000000000000 --- a/docs/changelog/96274.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 96274 -summary: Do not fail upon `ResourceAlreadyExistsException` during destination index - creation -area: Transform -type: bug -issues: - - 95310 diff --git a/docs/changelog/96279.yaml b/docs/changelog/96279.yaml new file mode 100644 index 0000000000000..39c14d64e34a4 --- /dev/null +++ b/docs/changelog/96279.yaml @@ -0,0 +1,5 @@ +pr: 96279 +summary: Improve cancellability in `TransportTasksAction` +area: Task Management +type: bug +issues: [] diff --git a/docs/changelog/96286.yaml b/docs/changelog/96286.yaml new file mode 100644 index 0000000000000..2b128bb59c35f --- /dev/null +++ b/docs/changelog/96286.yaml @@ -0,0 +1,6 @@ +pr: 96286 +summary: Allow the removal of an in-use template if there are other ones matching + the dependent data streams +area: Data streams +type: bug +issues: [] diff --git a/docs/changelog/96328.yaml b/docs/changelog/96328.yaml new file mode 100644 index 0000000000000..91f83959a76d6 --- /dev/null +++ b/docs/changelog/96328.yaml @@ -0,0 +1,6 @@ +pr: 96328 +summary: Add `ingest` information to the cluster info endpoint +area: Stats +type: enhancement +issues: + - 95392 diff --git a/docs/changelog/96340.yaml b/docs/changelog/96340.yaml new file mode 100644 index 0000000000000..c0fc9133d7370 --- /dev/null +++ b/docs/changelog/96340.yaml @@ -0,0 +1,5 @@ +pr: 96340 +summary: Chunk profiling stacktrace response +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/96394.yaml b/docs/changelog/96394.yaml new file mode 100644 index 0000000000000..f23c81e97b7f9 --- /dev/null +++ b/docs/changelog/96394.yaml @@ -0,0 +1,6 @@ +pr: 96394 +summary: Allow unsigned long field to use decay functions +area: Mapping +type: enhancement +issues: + - 89603 diff --git a/docs/changelog/96399.yaml b/docs/changelog/96399.yaml new file mode 100644 index 0000000000000..d553795ea16a0 --- /dev/null +++ b/docs/changelog/96399.yaml @@ -0,0 +1,5 @@ +pr: 96399 +summary: Reduce overhead in blob cache service get +area: Snapshot/Restore +type: enhancement +issues: [] diff --git a/docs/changelog/96406.yaml b/docs/changelog/96406.yaml new file mode 100644 index 0000000000000..8ceddff3dfbef --- /dev/null +++ b/docs/changelog/96406.yaml @@ -0,0 +1,5 @@ +pr: 96406 +summary: Fix tchar pattern in `RestRequest` +area: Infra/REST API +type: bug +issues: [] diff --git a/docs/changelog/96421.yaml b/docs/changelog/96421.yaml new file mode 100644 index 0000000000000..a628b3b157f98 --- /dev/null +++ b/docs/changelog/96421.yaml @@ -0,0 +1,6 @@ +pr: 96421 +summary: Promptly fail recovery from snapshot +area: Recovery +type: bug +issues: + - 95525 diff --git a/docs/changelog/96433.yaml b/docs/changelog/96433.yaml new file mode 100644 index 0000000000000..04e1e9451637b --- /dev/null +++ b/docs/changelog/96433.yaml @@ -0,0 +1,5 @@ +pr: 96433 +summary: Upgrade Lucene to a 9.7.0 snapshot +area: Search +type: upgrade +issues: [] diff --git a/docs/reference/cluster/cluster-info.asciidoc b/docs/reference/cluster/cluster-info.asciidoc index a2a57311df4f5..bc736d48bc6e8 100644 --- a/docs/reference/cluster/cluster-info.asciidoc +++ b/docs/reference/cluster/cluster-info.asciidoc @@ -33,9 +33,14 @@ You can use the Cluster Info API to retrieve information of a cluster. A comma-separated list of the following options: + -- +`_all`:: +All the information available. Can not be mixed with other targets. + `http`:: HTTP connection information. +`ingest`:: +Ingest information. -- [role="child_attributes"] @@ -126,6 +131,114 @@ Cumulative size in bytes of all requests from this client. ====== +[[cluster-info-api-response-body-ingest]] +`ingest`:: +(object) +Contains ingest information for the cluster. ++ +.Properties of `ingest` +[%collapsible%open] +====== +`total`:: +(object) +Contains information about ingest operations for the cluster. ++ +.Properties of `total` +[%collapsible%open] +======= +`count`:: +(integer) +Total number of documents ingested across the cluster. + +`time`:: +(<>) +Total time spent preprocessing ingest documents across the cluster. + +`time_in_millis`:: +(integer) +Total time, in milliseconds, spent preprocessing ingest documents across the cluster. + +`current`:: +(integer) +Total number of documents currently being ingested. + +`failed`:: +(integer) +Total number of failed ingest operations across the cluster. +======= + +`pipelines`:: +(object) +Contains information about ingest pipelines for the cluster. ++ +.Properties of `pipelines` +[%collapsible%open] +======= +``:: +(object) +Contains information about the ingest pipeline. ++ +.Properties of `` +[%collapsible%open] +======== +`count`:: +(integer) +Number of documents preprocessed by the ingest pipeline. + +`time`:: +(<>) +Total time spent preprocessing documents in the ingest pipeline. + +`time_in_millis`:: +(integer) +Total time, in milliseconds, spent preprocessing documents in the ingest +pipeline. + +`failed`:: +(integer) +Total number of failed operations for the ingest pipeline. + +`processors`:: +(array of objects) +Contains information for the ingest processors for the ingest pipeline. ++ +.Properties of `processors` +[%collapsible%open] +========= +``:: +(object) +Contains information for the ingest processor. ++ +.Properties of `` +[%collapsible%open] +========== +`count`:: +(integer) +Number of documents transformed by the processor. + +`time`:: +(<>) +Time spent by the processor transforming documents. + +`time_in_millis`:: +(integer) +Time, in milliseconds, spent by the processor transforming documents. + +`current`:: +(integer) +Number of documents currently being transformed by the processor. + +`failed`:: +(integer) +Number of failed operations for the processor. +========== +========= +======== +======= +====== + + + [[cluster-info-api-example]] ==== {api-examples-title} @@ -133,10 +246,13 @@ Cumulative size in bytes of all requests from this client. ---- # returns all stats info of the cluster GET /_info/_all ----- -[source,console] ----- # returns the http info of the cluster GET /_info/http + +# returns the http info of the cluster +GET /_info/ingest + +# returns the http and ingest info of the cluster +GET /_info/http,ingest ---- diff --git a/docs/reference/data-streams/tsds-index-settings.asciidoc b/docs/reference/data-streams/tsds-index-settings.asciidoc index 333cf9fdcc998..fa5c9b8cd821f 100644 --- a/docs/reference/data-streams/tsds-index-settings.asciidoc +++ b/docs/reference/data-streams/tsds-index-settings.asciidoc @@ -37,14 +37,15 @@ can not be less than `time_series.poll_interval` cluster setting. (<<_static_index_settings,Static>>, string or array of strings) Plain `keyword` fields used to route documents in a TSDS to index shards. Supports wildcards (`*`). Only indices with an `index.mode` of `time_series` support this setting. -Defaults to the list of <> with a -`time_series_dimension` value of `true` defined in your component templates. For -more information, refer to <>. +Defaults to an empty list, except for data streams then defaults to the list +of <> with a `time_series_dimension` +value of `true` defined in your component and index templates. For more +information, refer to <>. [[index-mapping-dimension-fields-limit]] // tag::dimensions-limit[] `index.mapping.dimension_fields.limit`:: (<>, integer) Maximum number of <> for the -index. Defaults to `16`. +index. Defaults to `21`. // end::dimensions-limit[] diff --git a/docs/reference/dlm/apis/delete-lifecycle.asciidoc b/docs/reference/dlm/apis/delete-lifecycle.asciidoc index aec0d51050f3e..df0ae137b0838 100644 --- a/docs/reference/dlm/apis/delete-lifecycle.asciidoc +++ b/docs/reference/dlm/apis/delete-lifecycle.asciidoc @@ -8,6 +8,12 @@ experimental::[] Deletes the lifecycle from a set of data streams. +[[delete-lifecycle-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the `manage_dlm` index privilege or higher to +use this API. For more information, see <>. + [[dlm-delete-lifecycle-request]] ==== {api-request-title} diff --git a/docs/reference/dlm/apis/explain-data-lifecycle.asciidoc b/docs/reference/dlm/apis/explain-data-lifecycle.asciidoc index 19c1b203a7ce8..8f477bb6d1e3f 100644 --- a/docs/reference/dlm/apis/explain-data-lifecycle.asciidoc +++ b/docs/reference/dlm/apis/explain-data-lifecycle.asciidoc @@ -8,6 +8,14 @@ experimental::[] Retrieves the current data lifecycle status for one or more data stream backing indices. +[[explain-lifecycle-api-prereqs]] +==== {api-prereq-title} + +* Nit: would rephrase as: + +If the {es} {security-features} are enabled, you must have at least the `manage_dlm` index privilege or +`view_index_metadata` index privilege to use this API. For more information, see <>. + [[dlm-explain-lifecycle-request]] ==== {api-request-title} diff --git a/docs/reference/dlm/apis/get-lifecycle.asciidoc b/docs/reference/dlm/apis/get-lifecycle.asciidoc index d45bb34ef016d..f600c27e684b1 100644 --- a/docs/reference/dlm/apis/get-lifecycle.asciidoc +++ b/docs/reference/dlm/apis/get-lifecycle.asciidoc @@ -8,6 +8,13 @@ experimental::[] Gets the lifecycle of a set of data streams. +[[get-lifecycle-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have at least one of the `manage` +<>, the `manage_dlm` index privilege, or the +`view_index_metadata` privilege to use this API. For more information, see <>. + [[dlm-get-lifecycle-request]] ==== {api-request-title} diff --git a/docs/reference/dlm/apis/put-lifecycle.asciidoc b/docs/reference/dlm/apis/put-lifecycle.asciidoc index d2abece88a177..ceca3d0478155 100644 --- a/docs/reference/dlm/apis/put-lifecycle.asciidoc +++ b/docs/reference/dlm/apis/put-lifecycle.asciidoc @@ -8,6 +8,12 @@ experimental::[] Configures the data lifecycle for the targeted data streams. +[[put-lifecycle-api-prereqs]] +==== {api-prereq-title} + +If the {es} {security-features} are enabled, you must have the `manage_dlm` index privilege or higher to use this API. +For more information, see <>. + [[dlm-put-lifecycle-request]] ==== {api-request-title} diff --git a/docs/reference/mapping/types/dense-vector.asciidoc b/docs/reference/mapping/types/dense-vector.asciidoc index 658920bef7376..165ff74825113 100644 --- a/docs/reference/mapping/types/dense-vector.asciidoc +++ b/docs/reference/mapping/types/dense-vector.asciidoc @@ -115,8 +115,9 @@ integer values between -128 to 127, inclusive for both indexing and searching. Number of vector dimensions. Can't exceed `1024` for indexed vectors (`"index": true`), or `2048` for non-indexed vectors. -experimental::[] -Number of dimensions for indexed vectors can be extended up to `2048`. ++ +experimental:[] +The number of dimensions for indexed vectors can be extended up to `2048`. `index`:: (Optional, Boolean) diff --git a/docs/reference/migration/migrate_8_8.asciidoc b/docs/reference/migration/migrate_8_8.asciidoc index 909a05916c1e1..85f934111ed79 100644 --- a/docs/reference/migration/migrate_8_8.asciidoc +++ b/docs/reference/migration/migrate_8_8.asciidoc @@ -9,8 +9,6 @@ your application to {es} 8.8. See also <> and <>. -coming::[8.8.0] - [discrete] [[breaking-changes-8.8]] diff --git a/docs/reference/modules/network/threading.asciidoc b/docs/reference/modules/network/threading.asciidoc index 2e26ae89dbdea..87e7e2371472b 100644 --- a/docs/reference/modules/network/threading.asciidoc +++ b/docs/reference/modules/network/threading.asciidoc @@ -63,7 +63,7 @@ reported like this: [source,text] ---- - 100.0% [cpu=0.0%, other=100.0%] (500ms out of 500ms) cpu usage by thread 'elasticsearch[instance-0000000004][transport_worker][T#1]' + 0.0% [cpu=0.0%, idle=100.0%] (500ms out of 500ms) cpu usage by thread 'elasticsearch[instance-0000000004][transport_worker][T#1]' 10/10 snapshots sharing following 9 elements java.base@17.0.2/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.2/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) @@ -77,11 +77,9 @@ reported like this: ---- Note that `transport_worker` threads should always be in state `RUNNABLE`, even -when waiting for input, because they block in the native `EPoll#wait` method. -This means the hot threads API will report these threads at 100% overall -utilisation. This is normal, and the breakdown of time into `cpu=` and `other=` -fractions shows how much time the thread spent running and waiting for input -respectively. +when waiting for input, because they block in the native `EPoll#wait` method. The `idle=` +time reports the proportion of time the thread spent waiting for input, whereas the `cpu=` time +reports the proportion of time the thread spent processing input it has received. If a `transport_worker` thread is not frequently idle, it may build up a backlog of work. This can cause delays in processing messages on the channels diff --git a/docs/reference/release-notes/8.6.0.asciidoc b/docs/reference/release-notes/8.6.0.asciidoc index f9fe3ee534f10..a9fe2fc77965f 100644 --- a/docs/reference/release-notes/8.6.0.asciidoc +++ b/docs/reference/release-notes/8.6.0.asciidoc @@ -11,6 +11,22 @@ include::8.4.0.asciidoc[tag=file-based-settings-deadlock-known-issue] include::8.0.0.asciidoc[tag=jackson-filtering-bug] +// tag::reconciliation-imbalance-known-issue[] +* Shard rebalancing may temporarily unbalance cluster ++ +From 8.6.0 onwards the default shard rebalancing algorithm will compute the +final desired balance and then make shard movements to reconcile the current +state of the cluster with the desired state. However the order in which the +shard movements take place may be skewed towards certain nodes, causing the +cluster to become temporarily unbalanced while the reconciliation is ongoing. +As always, once a node reaches a disk watermark it will not accept any +additional shards, but this skew may result in nodes reaching their disk +watermarks more often than expected in normal operation. Once the +reconciliation process completes, the cluster will be balanced again. ++ +To avoid this problem, upgrade to 8.8.0 or later. +// end::reconciliation-imbalance-known-issue[] + [[bug-8.6.0]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.6.1.asciidoc b/docs/reference/release-notes/8.6.1.asciidoc index e83569690ce8b..f1bb8d6171e16 100644 --- a/docs/reference/release-notes/8.6.1.asciidoc +++ b/docs/reference/release-notes/8.6.1.asciidoc @@ -3,6 +3,12 @@ Also see <>. +[[known-issues-8.6.1]] +[float] +=== Known issues + +include::8.6.0.asciidoc[tag=reconciliation-imbalance-known-issue] + [[bug-8.6.1]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.6.2.asciidoc b/docs/reference/release-notes/8.6.2.asciidoc index 39c237df1eb6b..c4962e37fe5f2 100644 --- a/docs/reference/release-notes/8.6.2.asciidoc +++ b/docs/reference/release-notes/8.6.2.asciidoc @@ -3,6 +3,12 @@ Also see <>. +[[known-issues-8.6.2]] +[float] +=== Known issues + +include::8.6.0.asciidoc[tag=reconciliation-imbalance-known-issue] + [[bug-8.6.2]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.7.0.asciidoc b/docs/reference/release-notes/8.7.0.asciidoc index a6f3497c77ace..d0886a616e341 100644 --- a/docs/reference/release-notes/8.7.0.asciidoc +++ b/docs/reference/release-notes/8.7.0.asciidoc @@ -3,6 +3,12 @@ Also see <>. +[[known-issues-8.7.0]] +[float] +=== Known issues + +include::8.6.0.asciidoc[tag=reconciliation-imbalance-known-issue] + [[breaking-8.7.0]] [float] === Breaking changes diff --git a/docs/reference/release-notes/8.7.1.asciidoc b/docs/reference/release-notes/8.7.1.asciidoc index aa12d1fb1a4cd..a0513bc1a8f0e 100644 --- a/docs/reference/release-notes/8.7.1.asciidoc +++ b/docs/reference/release-notes/8.7.1.asciidoc @@ -3,6 +3,21 @@ Also see <>. +[[known-issues-8.7.1]] +[float] +=== Known issues + +* `ArrayIndexOutOfBoundsException` may be thrown while creating a transport message ++ +Certain sequences of writes and seeks to the buffer used to create a transport +message may encounter an alignment bug which results in an +`ArrayIndexOutOfBoundsException`, preventing the transport message from being +sent. ++ +This issue is fixed in 8.8.0. + +include::8.6.0.asciidoc[tag=reconciliation-imbalance-known-issue] + [[bug-8.7.1]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.8.0.asciidoc b/docs/reference/release-notes/8.8.0.asciidoc index 838f6f97e01e2..da47bd5e386bb 100644 --- a/docs/reference/release-notes/8.8.0.asciidoc +++ b/docs/reference/release-notes/8.8.0.asciidoc @@ -1,8 +1,6 @@ [[release-notes-8.8.0]] == {es} version 8.8.0 -coming[8.8.0] - Also see <>. [[bug-8.8.0]] @@ -10,68 +8,98 @@ Also see <>. === Bug fixes Aggregations:: -* Merge two histograms usign the higher number of digits among all histograms {es-pull}93704[#93704] (issue: {es-issue}92822[#92822]) +* Merge two histograms using the higher number of digits among all histograms {es-pull}93704[#93704] (issue: {es-issue}92822[#92822]) Allocation:: * Avoid copying during iteration of all shards in routing table {es-pull}94417[#94417] -* Avoid duplicate application of RoutingTable diff {es-pull}94379[#94379] -* Streamline AsyncShardFetch#getNumberOfInFlightFetches {es-pull}93632[#93632] (issue: {es-issue}93631[#93631]) -* Use applied state after `DiskThresholdMonitor` reroute {es-pull}94916[#94916] +* Avoid duplicate application of `RoutingTable` diff {es-pull}94379[#94379] +* Balance priorities during reconciliation {es-pull}95454[#95454] +* Fix `RebalanceOnlyWhenActiveAllocationDecider` {es-pull}96025[#96025] +* Streamline `AsyncShardFetch#getNumberOfInFlightFetches` {es-pull}93632[#93632] (issue: {es-issue}93631[#93631]) + +Application:: +* Check if an analytics event data stream exists before installing pipeline {es-pull}95621[#95621] +* [Behavioral Analytics] Use a client with ent-search origin in the `BulkProcessorFactory` {es-pull}95614[#95614] Authorization:: * Fix role transformation to include missing properties {es-pull}94714[#94714] +* [Fleet] Add read privileges to `profiling-*` for symbolization support {es-pull}95596[#95596] + +CRUD:: +* Avoid null `Location` in post write refresh {es-pull}95229[#95229] + +Cluster Coordination:: +* Read register current term asynchronously in `StoreHeartbeatService` {es-pull}95351[#95351] DLM:: * Remove rollover cluster setting validator {es-pull}94447[#94447] +* [DLM] Fix the new endpoint rest-api specification {es-pull}95665[#95665] + +Data streams:: +* Allow deletion of component templates that are specified in the `ignore_missing_component_templates` array {es-pull}95527[#95527] +* Fix searching a filtered and unfiltered data stream alias {es-pull}95865[#95865] (issue: {es-issue}95786[#95786]) Distributed:: +* Check shard availability before including in stats {es-pull}96015[#96015] (issues: {es-issue}96000[#96000], {es-issue}87001[#87001]) * Fix `GetPipelineResponse` equality {es-pull}93695[#93695] Engine:: * Ensure refresh to return the latest commit generation {es-pull}94249[#94249] Geo:: +* Adjust `BoundedGeoHexGridTiler#FACTOR` to prevent missing hits {es-pull}96088[#96088] (issue: {es-issue}96057[#96057]) * Fix bug where `geo_line` does not respect `sort_order` {es-pull}94734[#94734] (issue: {es-issue}94733[#94733]) ILM+SLM:: -* Allow ILM to transition to implicit cached steps {es-pull}91779[#91779] (issue: {es-issue}91749[#91749]) -* Downsample ILM action should skip non-time-series indices {es-pull}94835[#94835] (issue: {es-issue}93123[#93123]) +* Retry downsample ILM action using a new target index {es-pull}94965[#94965] (issue: {es-issue}93580[#93580]) +* Strip disallowed chars from generated snapshot name {es-pull}95767[#95767] (issue: {es-issue}95593[#95593]) +* [ILM] Fix the migrate to tiers service and migrate action tiers configuration {es-pull}95934[#95934] Infra/Core:: -* Check no unassigned shards even if the node already left {es-pull}94722[#94722] * Fix race condition in `NodeEnvironment.close()` {es-pull}94677[#94677] (issue: {es-issue}94672[#94672]) * Use double wildcards for filtered excludes properly {es-pull}94195[#94195] (issue: {es-issue}92632[#92632]) Infra/REST API:: * Add level parameter validation in REST layer {es-pull}94136[#94136] (issue: {es-issue}93981[#93981]) -* Fixes CORS headers needed by Elastic clients {es-pull}85791[#85791] Infra/Scripting:: * Allow low level paging in `LeafDocLookup` {es-pull}93711[#93711] +* Revert usage of `SafeMustacheFactory` in `CustomMustacheFactory` {es-pull}95557[#95557] Ingest Node:: -* Fix async enrich execution prematurely releases enrich policy lock {es-pull}94702[#94702] (issue: {es-issue}94690[#94690]) +* Fix `Grok.match()` with offset and suffix pattern {es-pull}95003[#95003] (issue: {es-issue}95002[#95002]) +* Fix bug in verbose simulations of the ingest pipeline API {es-pull}95232[#95232] + +Machine Learning:: +* Avoid expensive source parsing by using doc values when querying model definition meta fields {es-pull}95590[#95590] -Recovery:: -* Async creation of `IndexShard` instances {es-pull}94545[#94545] +Mapping:: +* Longer timeout for mapping update during resize {es-pull}95221[#95221] + +Network:: +* Fix `RecyclerBytesStreamOutput` corrupting when ending write on page boundary {es-pull}95114[#95114] +* Fix maximum seek limit `RecyclerBytesStreamOutput` {es-pull}95133[#95133] + +Ranking:: +* Fix versioning for tests cases using a randomly generated rank builder {es-pull}95514[#95514] Search:: -* Fix '_terms_enum' on docvalue-only keywords fields {es-pull}94719[#94719] (issue: {es-issue}94673[#94673]) * Fix `_terms_enum` display values {es-pull}94080[#94080] (issue: {es-issue}94041[#94041]) -* Return 200 when closing empty PIT or scroll {es-pull}94708[#94708] * Support ignore malformed in boolean fields {es-pull}93239[#93239] (issue: {es-issue}89542[#89542]) * Support search template api explain query string argument {es-pull}94832[#94832] (issue: {es-issue}83363[#83363]) Snapshot/Restore:: +* Cancel cold cache prewarming tasks if store is closing {es-pull}95891[#95891] (issue: {es-issue}95504[#95504]) +* Fix 0 default value for repo snapshot speed {es-pull}95854[#95854] (issue: {es-issue}95561[#95561]) +* Fix Azure `InputStream#read` method {es-pull}96034[#96034] * Stop sorting indices in get-snapshots API {es-pull}94890[#94890] -Stats:: -* Fix _cluster/stats `.nodes.fs` deduplication {es-pull}94798[#94798] (issue: {es-issue}24472[#24472]) -* Fix `FsInfo` device deduplication {es-pull}94744[#94744] - Transform:: -* Catch deprecations as `Exception` rather than `IOException` {es-pull}94553[#94553] +* Call listener in order to prevent the request from hanging {es-pull}96221[#96221] +* Do not fail upon `ResourceAlreadyExistsException` during destination index creation {es-pull}96274[#96274] (issue: {es-issue}95310[#95310]) +* Fix privileges check failures by adding `allow_restricted_indices` flag {es-pull}95187[#95187] * Secondary credentials used with transforms should only require source and destination index privileges, not transform privileges {es-pull}94420[#94420] +* Use monotonic time in `TransformScheduler` {es-pull}95456[#95456] (issue: {es-issue}95445[#95445]) [[deprecation-8.8.0]] [float] @@ -85,18 +113,32 @@ Allocation:: === Enhancements Aggregations:: -* Don't create many Rounding.Prepared instances when checking for empty buckets in date_histogram aggregator. {es-pull}94649[#94649] +* Add `keyed` parameter to filters agg, allowing the user to get non-keyed buckets of named filters agg {es-pull}89256[#89256] (issue: {es-issue}83957[#83957]) +* Add global ordinal info to stats APIs {es-pull}94500[#94500] +* Don't create many `Rounding.Prepared` instances when checking for empty buckets in date_histogram aggregator. {es-pull}94649[#94649] Analysis:: * Add origin of synonym rules to exception message {es-pull}93702[#93702] +Application:: +* Behavioral Analytics event ingest tuning {es-pull}95405[#95405] +* [Behavioral Analytics] Add geo ip and user agent to events {es-pull}95433[#95433] +* [Behavioral analytics] Implement search filters into events {es-pull}95212[#95212] + Authentication:: * Do not fail node if SAML HTTP metadata is unavailable {es-pull}92810[#92810] (issue: {es-issue}37608[#37608]) * Finer control over authentication metadata serialization {es-pull}93726[#93726] +Authorization:: +* Add permissions to `kibana_system` for TI package transforms to support IOC expiration {es-pull}94506[#94506] (issue: {es-issue}94505[#94505]) +* Ensure checking indices privileges works with `nested-limited-role` {es-pull}95170[#95170] + Cluster Coordination:: * Improve master service batching queues {es-pull}92021[#92021] (issue: {es-issue}81626[#81626]) +DLM:: +* Adding origination date to DLM {es-pull}95113[#95113] + Engine:: * Increase the merge factor to 32 for time-based data {es-pull}94134[#94134] * Reduce the likelihood of writing small segments due to an oversize translog {es-pull}93524[#93524] (issue: {es-issue}75611[#75611]) @@ -108,6 +150,9 @@ Geo:: * Allow docvalues-only search on `geo_shape` {es-pull}94396[#94396] * Support for store parameter in `geo_shape` field {es-pull}94418[#94418] (issue: {es-issue}83655[#83655]) +Highlighting:: +* Use `storedFieldsSpec` to load stored fields for highlighting {es-pull}91841[#91841] + ILM+SLM:: * Implicitly rollover data streams / aliases based on `max_primary_shard_docs` {es-pull}94065[#94065] (issue: {es-issue}87246[#87246]) * Sort ILM explain output by natural index name {es-pull}94879[#94879] (issue: {es-issue}94768[#94768]) @@ -126,10 +171,17 @@ Infra/Transport API:: * Add `transport_version` to node info JSON {es-pull}94669[#94669] Ingest Node:: -* Introduce redirect method on IngestDocument {es-pull}94000[#94000] (issue: {es-issue}83653[#83653]) +* Add `reroute` processor {es-pull}76511[#76511] +* Introduce redirect method on `IngestDocument` {es-pull}94000[#94000] (issue: {es-issue}83653[#83653]) +* [Ingest Processor] Add `ignore_missing` param to the `uri_parts` ingest processor {es-pull}95068[#95068] Machine Learning:: * Add `_meta` field to data frame analytics config {es-pull}94529[#94529] +* Add `embedding_size` to text embedding config {es-pull}95176[#95176] +* Include model definition install status for Pytorch models {es-pull}95271[#95271] +* Integrate ELSER model download into put trained model API {es-pull}95281[#95281] +* Start, stop and infer of a trained model can now optionally use a deployment ID that is different to the model ID {es-pull}95168[#95168] +* [ML] Get trained model stats by deployment id or model id {es-pull}95440[#95440] Mapping:: * Cut over from Field to `StringField` when applicable {es-pull}94540[#94540] @@ -138,74 +190,121 @@ Mapping:: * Index sequence numbers via a single Lucene field {es-pull}94504[#94504] * Use a combined field to index terms and doc values on keyword fields {es-pull}93579[#93579] +Monitoring:: +* Add `event_loop_utilization` Kibana stats to the monitoring index templates {es-pull}95388[#95388] + Network:: * Add request/response body logging to HTTP tracer {es-pull}93133[#93133] * Avoid deserializing responses in proxy node {es-pull}93799[#93799] * Report transport message size per action {es-pull}94543[#94543] (issue: {es-issue}88151[#88151]) +* Retain underlying error on proxy mode connection failure {es-pull}94998[#94998] + +SQL:: +* Add `WildcardLike/Pattern` to QL {es-pull}95357[#95357] Search:: * Adding initial public and internal serverless scopes to Search team REST handlers {es-pull}94035[#94035] -* Enable _terms_enum on version fields {es-pull}93839[#93839] (issue: {es-issue}83403[#83403]) -* Leverage Weight#count when size is set to 0 {es-pull}94858[#94858] +* Enable `_terms_enum` on version fields {es-pull}93839[#93839] (issue: {es-issue}83403[#83403]) +* Introduce `DocumentParsingException` {es-pull}92646[#92646] (issue: {es-issue}85083[#85083]) +* Leverage `Weight#count` when size is set to 0 {es-pull}94858[#94858] +* Make `SourceProvider` using stored fields segment-thread-safe {es-pull}95082[#95082] * Shortcut total hit count when `terminate_after` is used {es-pull}94889[#94889] * [Profiling] Map stack frames more efficiently {es-pull}94327[#94327] * [Profiling] Parallelize response handling {es-pull}93960[#93960] Security:: -* Fleet: Add new mappings for .fleet-actions signing {es-pull}93802[#93802] +* Fleet: Add new mappings for `.fleet-actions` signing {es-pull}93802[#93802] Snapshot/Restore:: -* Add cluster stats re. snapshot activity {es-pull}93680[#93680] * Add register analysis to repo analysis API {es-pull}93955[#93955] +* Add snapshot activity in cluster stats {es-pull}93680[#93680] * Add support for custom endpoints in the Azure repository {es-pull}94576[#94576] (issue: {es-issue}94537[#94537]) +* Failed tasks proactively cancel children tasks {es-pull}92588[#92588] (issue: {es-issue}90353[#90353]) TSDB:: * Support position `time_series_metric` on `geo_point` fields {es-pull}93946[#93946] Transform:: * Add `delete_destination_index` parameter to the `Delete Transform API` {es-pull}94162[#94162] +* Allow specifying destination index aliases in the Transform's `dest` config {es-pull}94943[#94943] +* Expose authorization failure as transform health issue {es-pull}94724[#94724] + +Vector Search:: +* Increase max number of vector dims to 2048 {es-pull}95257[#95257] Watcher:: +* Add Watcher APIs for updating/retrieving settings {es-pull}95342[#95342] (issue: {es-issue}92991[#92991]) * Porting watcher over to `BulkProcessor2` {es-pull}94133[#94133] [[feature-8.8.0]] [float] === New features +Application:: +* Initial Search Application Search API with templates {es-pull}95026[#95026] +* [Behavioral Analytics] Add a `final_pipeline` to event data streams {es-pull}95198[#95198] + +Authentication:: +* GA release of the JWT realm {es-pull}95398[#95398] + CRUD:: * New `TransportBroadcastUnpromotableAction` action {es-pull}93600[#93600] DLM:: +* Add new endpoints to configure data lifecycle on a data stream level {es-pull}94590[#94590] +* Dlm add auto rollover condition max age {es-pull}94950[#94950] * Initial implementation for `DataLifecycleService` {es-pull}94012[#94012] * Introduce a _lifecycle/explain API for data stream backing indices {es-pull}94621[#94621] +* Introduce the `index.lifecycle.prefer_ilm` setting {es-pull}95423[#95423] +* [DLM] Extend the template to simulate api to support include defaults {es-pull}94861[#94861] * [DLM] Introduce default rollover cluster setting & expose it via APIs {es-pull}94240[#94240] Health:: * Add new `ShardsCapacity` Health Indicator Service {es-pull}94552[#94552] * Add to `HealthMetadata` information about `ShardLimits` {es-pull}94116[#94116] +Ingest Node:: +* Add license checking to the redact processor {es-pull}95477[#95477] + Machine Learning:: * Text Expansion Query {es-pull}93694[#93694] +Ranking:: +* Add support for Reciprocal Rank Fusion to the search API {es-pull}93396[#93396] + Search:: +* Add Enterprise Search Module {es-pull}94381[#94381] * Add new `similarity` field to `knn` clause in `_search` {es-pull}94828[#94828] * Add the ability to return the score of the named queries {es-pull}94564[#94564] (issue: {es-issue}29606[#29606]) +* Implements behavioral analytics events ingest API {es-pull}95027[#95027] + +TSDB:: +* Encode using 40, 48 and 56 bits per value {es-pull}93371[#93371] +* Flattened field synthetic support {es-pull}94842[#94842] +* Support flattened fields as time series dimension fields {es-pull}95273[#95273] [[upgrade-8.8.0]] [float] === Upgrades +Engine:: +* Upgrade to `lucene-9.6-snapshot-dcc2154a1d3` {es-pull}94955[#94955] + +Infra/Core:: +* Upgrade Jackson xml to 2.15.0 {es-pull}95641[#95641] + Ingest Node:: -* Upgrading tika to 2.7.0 {es-pull}93759[#93759] +* Upgrading tika to `2.7.0` {es-pull}93759[#93759] Network:: -* Upgrade to Netty 4.1.89 {es-pull}94179[#94179] +* Upgrade to Netty `4.1.89` {es-pull}94179[#94179] Packaging:: -* Upgrade bundled JDK to Java 20 {es-pull}94600[#94600] +* Bump bundled JDK to Java `20.0.1` {es-pull}95359[#95359] Search:: -* Upgrade to lucene-9.6.0-snapshot-8a815153fbe {es-pull}94635[#94635] -* Upgrade to lucene-9.6.0-snapshot-f5d1e1c787c {es-pull}94494[#94494] +* Upgrade Lucene to the final 9.6.0 release {es-pull}95967[#95967] +* Upgrade to `lucene-9.6.0-snapshot-8a815153fbe` {es-pull}94635[#94635] +* Upgrade to `lucene-9.6.0-snapshot-f5d1e1c787c` {es-pull}94494[#94494] diff --git a/docs/reference/rest-api/usage.asciidoc b/docs/reference/rest-api/usage.asciidoc index 82ce4d4a7d471..7b6bfdb4aa3cf 100644 --- a/docs/reference/rest-api/usage.asciidoc +++ b/docs/reference/rest-api/usage.asciidoc @@ -328,6 +328,19 @@ GET /_xpack/usage "data_streams" : 0, "indices_count" : 0 }, + "data_lifecycle" : { + "available": true, + "enabled": true, + "lifecycle": { + "count": 0, + "default_rollover_used": true, + "retention": { + "minimum_millis": 0, + "maximum_millis": 0, + "average_millis": 0.0 + } + } + }, "data_tiers" : { "available" : true, "enabled" : true, @@ -413,6 +426,9 @@ GET /_xpack/usage "enabled": true, "search_applications" : { "count": 0 + }, + "analytics_collections": { + "count": 0 } } } diff --git a/docs/reference/transform/apis/stop-transform.asciidoc b/docs/reference/transform/apis/stop-transform.asciidoc index a4b633994b3eb..670e015c33149 100644 --- a/docs/reference/transform/apis/stop-transform.asciidoc +++ b/docs/reference/transform/apis/stop-transform.asciidoc @@ -41,7 +41,7 @@ comma-separated list or a wildcard expression. To stop all {transforms}, use `allow_no_match`:: (Optional, Boolean) - include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=allow-no-match-transforms2] +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=allow-no-match-transforms2] `force`:: (Optional, Boolean) Set to `true` to stop a failed {transform} or to diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 234affb4e49fb..867b636bc8059 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -2483,124 +2483,124 @@ - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + diff --git a/libs/core/src/main/java/org/elasticsearch/core/AbstractRefCounted.java b/libs/core/src/main/java/org/elasticsearch/core/AbstractRefCounted.java index 42073489faf8d..04cd4375a42be 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/AbstractRefCounted.java +++ b/libs/core/src/main/java/org/elasticsearch/core/AbstractRefCounted.java @@ -8,17 +8,32 @@ package org.elasticsearch.core; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.VarHandle; import java.util.Objects; -import java.util.concurrent.atomic.AtomicInteger; /** * A basic {@link RefCounted} implementation that is initialized with a ref count of 1 and calls {@link #closeInternal()} once it reaches * a 0 ref count. */ public abstract class AbstractRefCounted implements RefCounted { + public static final String ALREADY_CLOSED_MESSAGE = "already closed, can't increment ref count"; - private final AtomicInteger refCount = new AtomicInteger(1); + private static final VarHandle VH_REFCOUNT_FIELD; + + static { + try { + VH_REFCOUNT_FIELD = MethodHandles.lookup() + .in(AbstractRefCounted.class) + .findVarHandle(AbstractRefCounted.class, "refCount", int.class); + } catch (NoSuchFieldException | IllegalAccessException e) { + throw new RuntimeException(e); + } + } + + @SuppressWarnings("FieldMayBeFinal") // updated via VH_REFCOUNT_FIELD (and _only_ via VH_REFCOUNT_FIELD) + private volatile int refCount = 1; protected AbstractRefCounted() {} @@ -32,9 +47,9 @@ public final void incRef() { @Override public final boolean tryIncRef() { do { - int i = refCount.get(); + int i = refCount; if (i > 0) { - if (refCount.compareAndSet(i, i + 1)) { + if (VH_REFCOUNT_FIELD.weakCompareAndSet(this, i, i + 1)) { touch(); return true; } @@ -47,9 +62,9 @@ public final boolean tryIncRef() { @Override public final boolean decRef() { touch(); - int i = refCount.decrementAndGet(); - assert i >= 0 : "invalid decRef call: already closed"; - if (i == 0) { + int i = (int) VH_REFCOUNT_FIELD.getAndAdd(this, -1); + assert i > 0 : "invalid decRef call: already closed"; + if (i == 1) { try { closeInternal(); } catch (Exception e) { @@ -63,7 +78,7 @@ public final boolean decRef() { @Override public final boolean hasReferences() { - return refCount.get() > 0; + return refCount > 0; } /** @@ -73,7 +88,7 @@ public final boolean hasReferences() { protected void touch() {} protected void alreadyClosed() { - final int currentRefCount = refCount.get(); + final int currentRefCount = refCount; assert currentRefCount == 0 : currentRefCount; throw new IllegalStateException(ALREADY_CLOSED_MESSAGE); } @@ -82,7 +97,7 @@ protected void alreadyClosed() { * Returns the current reference count. */ public final int refCount() { - return this.refCount.get(); + return refCount; } /** diff --git a/libs/core/src/main/java/org/elasticsearch/core/Releasables.java b/libs/core/src/main/java/org/elasticsearch/core/Releasables.java index f5ecb0ab9c09b..20b5b158a70a7 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/Releasables.java +++ b/libs/core/src/main/java/org/elasticsearch/core/Releasables.java @@ -122,21 +122,7 @@ public String toString() { * Wraps a {@link Releasable} such that its {@link Releasable#close()} method can be called multiple times without double-releasing. */ public static Releasable releaseOnce(final Releasable releasable) { - final var ref = new AtomicReference<>(releasable); - return new Releasable() { - @Override - public void close() { - final var acquired = ref.getAndSet(null); - if (acquired != null) { - acquired.close(); - } - } - - @Override - public String toString() { - return "releaseOnce[" + ref.get() + "]"; - } - }; + return new ReleaseOnce(releasable); } public static Releasable assertOnce(final Releasable delegate) { @@ -165,4 +151,23 @@ public String toString() { return delegate; } } + + private static class ReleaseOnce extends AtomicReference implements Releasable { + ReleaseOnce(Releasable releasable) { + super(releasable); + } + + @Override + public void close() { + final var acquired = getAndSet(null); + if (acquired != null) { + acquired.close(); + } + } + + @Override + public String toString() { + return "releaseOnce[" + get() + "]"; + } + } } diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/ParsedMediaType.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/ParsedMediaType.java index ecc82f293cb4c..3e0c4d517e935 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/ParsedMediaType.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/ParsedMediaType.java @@ -27,7 +27,7 @@ public class ParsedMediaType { private final String subType; private final Map parameters; // tchar pattern as defined by RFC7230 section 3.2.6 - private static final Pattern TCHAR_PATTERN = Pattern.compile("[a-zA-z0-9!#$%&'*+\\-.\\^_`|~]+"); + private static final Pattern TCHAR_PATTERN = Pattern.compile("[a-zA-Z0-9!#$%&'*+\\-.\\^_`|~]+"); private ParsedMediaType(String originalHeaderValue, String type, String subType, Map parameters) { this.originalHeaderValue = originalHeaderValue; diff --git a/libs/x-content/src/test/java/org/elasticsearch/xcontent/ParsedMediaTypeTests.java b/libs/x-content/src/test/java/org/elasticsearch/xcontent/ParsedMediaTypeTests.java index b7e3d640b4d7c..eb502c15e26aa 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/xcontent/ParsedMediaTypeTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/xcontent/ParsedMediaTypeTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.test.ESTestCase; import java.util.Collections; +import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.equalTo; @@ -108,6 +109,14 @@ public void testEmptyParams() { assertEquals(Collections.emptyMap(), parsedMediaType.getParameters()); } + public void testMalformedMediaType() { + List headers = List.of("a/b[", "a/b]", "a/b\\"); + for (String header : headers) { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> ParsedMediaType.parseMediaType(header)); + assertThat(e.getMessage(), equalTo("invalid media-type [" + header + "]")); + } + } + public void testMalformedParameters() { String mediaType = "application/foo"; IllegalArgumentException exception = expectThrows( diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java index 84b57c19c68d0..e948ee6718b43 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java @@ -31,7 +31,6 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.index.store.StoreStats; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.tasks.Task; @@ -141,10 +140,6 @@ protected void shardOperation( ActionListener.completeWith(listener, () -> { IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()); IndexShard indexShard = indexService.getShard(shardRouting.shardId().id()); - // if we don't have the routing entry yet, we need it stats wise, we treat it as if the shard is not ready yet - if (indexShard.routingEntry() == null) { - throw new ShardNotFoundException(indexShard.shardId()); - } StoreStats storeStats = indexShard.storeStats(); IndexAbstraction indexAbstraction = clusterService.state().getMetadata().getIndicesLookup().get(shardRouting.getIndexName()); assert indexAbstraction != null; diff --git a/modules/dlm/build.gradle b/modules/dlm/build.gradle index 20d738eabc03a..3ce0737b202f6 100644 --- a/modules/dlm/build.gradle +++ b/modules/dlm/build.gradle @@ -3,6 +3,7 @@ import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.internal-es-plugin' apply plugin: 'elasticsearch.internal-cluster-test' +apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.legacy-yaml-rest-test' apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' @@ -23,8 +24,6 @@ dependencies { testImplementation project(':modules:data-streams') } -addQaCheckDependencies(project) - testClusters.configureEach { module ':modules:reindex' testDistribution = 'DEFAULT' @@ -37,6 +36,10 @@ testClusters.configureEach { requiresFeature 'es.dlm_feature_flag_enabled', Version.fromString("8.8.0") } +tasks.named('javaRestTest') { + usesDefaultDistribution() +} + if (BuildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.dlm_feature_flag_enabled', 'true' @@ -47,4 +50,7 @@ if (BuildParams.isSnapshotBuild() == false) { tasks.named("yamlRestTest").configure { systemProperty 'es.dlm_feature_flag_enabled', 'true' } + tasks.named("javaRestTest").configure { + systemProperty 'es.dlm_feature_flag_enabled', 'true' + } } diff --git a/modules/dlm/src/internalClusterTest/java/org/elasticsearch/dlm/DataLifecycleServiceIT.java b/modules/dlm/src/internalClusterTest/java/org/elasticsearch/dlm/DataLifecycleServiceIT.java index d37aed794aabc..5b9632bf69277 100644 --- a/modules/dlm/src/internalClusterTest/java/org/elasticsearch/dlm/DataLifecycleServiceIT.java +++ b/modules/dlm/src/internalClusterTest/java/org/elasticsearch/dlm/DataLifecycleServiceIT.java @@ -10,13 +10,11 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; -import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; -import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; -import org.elasticsearch.action.admin.indices.segments.ShardSegments; import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; @@ -32,6 +30,7 @@ import org.elasticsearch.cluster.metadata.DataStreamAction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Template; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; @@ -40,23 +39,23 @@ import org.elasticsearch.datastreams.DataStreamsPlugin; import org.elasticsearch.dlm.action.PutDataLifecycleAction; import org.elasticsearch.index.Index; -import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.test.transport.StubbableTransport; +import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.XContentType; import org.junit.After; import java.io.IOException; -import java.util.Arrays; import java.util.Collection; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; -import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.backingIndexEqualTo; @@ -67,7 +66,6 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; @@ -261,15 +259,15 @@ public void testUpdatingLifecycleAppliesToAllBackingIndices() throws Exception { }); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/96084") public void testAutomaticForceMerge() throws Exception { /* * This test makes sure that (1) DLM does _not_ call forcemerge on an index in the same DLM pass when it rolls over the index and * that (2) it _does_ call forcemerge on an index that was rolled over in a previous DLM pass. * It's harder than you would think to detect through the REST API that forcemerge has been called. The reason is that segment * merging happens automatically during indexing, and when forcemerge is called it likely does nothing because all nececssary - * merging has already happened automatically. In order to force forcemerge to merge segments, we change - * "index.merge.policy.merge_factor" on the index to a value lower than the default. If the number of segments goes down, that is - * proof that DLM called forcemerge. + * merging has already happened automatically. So in order to detect whether forcemerge has been called, we use a + * SendRequestBehavior in the MockTransportService to detect it. */ DataLifecycle lifecycle = new DataLifecycle(); disableDLM(); @@ -282,6 +280,33 @@ public void testAutomaticForceMerge() throws Exception { null, lifecycle ); + // This is the set of all indices against which a ForceMergeAction has been run: + final Set forceMergedIndices = new HashSet<>(); + { + // This creates a SendRequestBehavior to add the name of any index that has been forcemerged to the forceMergedIndices set. + final StubbableTransport.SendRequestBehavior sendBehavior = (connection, requestId, action, request, options) -> { + if (action.startsWith(ForceMergeAction.NAME)) { + String index = ((IndicesRequest) request).indices()[0]; + forceMergedIndices.add(index); + logger.info("Force merging {}", index); + } + connection.sendRequest(requestId, action, request, options); + }; + String masterNode = internalCluster().getMasterName(); + final MockTransportService targetTransportService = (MockTransportService) internalCluster().getInstance( + TransportService.class, + masterNode + ); + + for (DiscoveryNode node : internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName()) + .state() + .getNodes()) { + if (node.canContainData() && node.getName().equals(masterNode) == false) { + final TransportService sourceTransportService = internalCluster().getInstance(TransportService.class, node.getName()); + targetTransportService.addSendBehavior(sourceTransportService, sendBehavior); + } + } + } CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); @@ -297,27 +322,11 @@ public void testAutomaticForceMerge() throws Exception { } final String toBeForceMergedIndex; - final int preDlmSegmentsForceMergedIndex; - if (currentGeneration == 1) { toBeForceMergedIndex = null; // Not going to be used - preDlmSegmentsForceMergedIndex = -1; // Not going to be used } else { toBeForceMergedIndex = DataStream.getDefaultBackingIndexName(dataStreamName, currentGeneration - 1); - preDlmSegmentsForceMergedIndex = getSegmentCount(toBeForceMergedIndex); - logger.info("preDlmSegmentsForceMergedIndex: {}", preDlmSegmentsForceMergedIndex); } - final int preDlmSegmentsAboutToBeRolledOverIndex = getSegmentCount(toBeRolledOverIndex); - logger.info("preDlmSegmentsAboutToBeRolledOverIndex: {}", preDlmSegmentsAboutToBeRolledOverIndex); - /* - * Without the following, calls to forcemerge are essentially a no-op since it has already done automatic merging. Setting - * merge_factor on its own does not do anything, but it results in calls to forcemerge making observable changes to the - * number of segments. So we're doing this just so that we can check that DLM did actually call forcemerge. - */ - updateIndexSettings( - Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MERGE_FACTOR_SETTING.getKey(), 5), - toBeRolledOverIndex - ); int currentBackingIndexCount = currentGeneration; DataLifecycleService dataLifecycleService = internalCluster().getInstance( DataLifecycleService.class, @@ -337,25 +346,22 @@ public void testAutomaticForceMerge() throws Exception { assertThat(backingIndices.size(), equalTo(currentBackingIndexCount + 1)); String writeIndex = dataStream.getWriteIndex().getName(); assertThat(writeIndex, backingIndexEqualTo(dataStreamName, currentBackingIndexCount + 1)); - int postDlmSegmentsNewlyRolledOverIndex = getSegmentCount(toBeRolledOverIndex); /* * We only expect forcemerge to happen on the 2nd DLM run and later, since on the first there's only the single write * index to be rolled over. */ if (currentBackingIndexCount > 1) { - int postDlmSegmentsForceMergedIndex = getSegmentCount(toBeForceMergedIndex); assertThat( "The segments for " + toBeForceMergedIndex + " were not merged", - postDlmSegmentsForceMergedIndex, - lessThan(preDlmSegmentsForceMergedIndex) + forceMergedIndices.contains(toBeForceMergedIndex), + equalTo(true) ); - logger.info("postDlmSegmentsForceMergedIndex: {}", postDlmSegmentsForceMergedIndex); } // We want to assert that when DLM rolls over the write index it, it doesn't forcemerge it on that iteration: assertThat( "The segments for " + toBeRolledOverIndex + " were unexpectedly merged", - postDlmSegmentsNewlyRolledOverIndex, - equalTo(preDlmSegmentsAboutToBeRolledOverIndex) + forceMergedIndices.contains(toBeRolledOverIndex), + equalTo(false) ); }); } @@ -365,22 +371,6 @@ private static void disableDLM() { updateClusterSettings(Settings.builder().put(DataLifecycleService.DLM_POLL_INTERVAL, TimeValue.MAX_VALUE)); } - private int getSegmentCount(String indexName) throws ExecutionException, InterruptedException { - IndicesSegmentResponse segmentResponse = client().admin().indices().segments(new IndicesSegmentsRequest(indexName)).get(); - return (int) segmentResponse.getIndices() - .get(indexName) - .getShards() - .values() - .stream() - .map(IndexShardSegments::shards) - .flatMap(Arrays::stream) - .filter(shard -> shard.getShardRouting().primary()) - .map(ShardSegments::getSegments) - .flatMap(List::stream) - .filter(segment -> segment.search) // in case there hasn't been a flush - .count(); - } - public void testErrorRecordingOnRollover() throws Exception { // empty lifecycle contains the default rollover DataLifecycle lifecycle = new DataLifecycle(); diff --git a/modules/dlm/src/javaRestTest/java/org/elasticsearch/dlm/DlmPermissionsRestIT.java b/modules/dlm/src/javaRestTest/java/org/elasticsearch/dlm/DlmPermissionsRestIT.java new file mode 100644 index 0000000000000..c896ff8341580 --- /dev/null +++ b/modules/dlm/src/javaRestTest/java/org/elasticsearch/dlm/DlmPermissionsRestIT.java @@ -0,0 +1,227 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.dlm; + +import org.apache.http.HttpHost; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.PathUtils; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.FeatureFlag; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.ObjectPath; +import org.junit.BeforeClass; +import org.junit.ClassRule; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.file.Path; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class DlmPermissionsRestIT extends ESRestTestCase { + + private static final String PASSWORD = "secret-test-password"; + private static Path caPath; + + @BeforeClass + public static void init() throws URISyntaxException, FileNotFoundException { + URL resource = DlmPermissionsRestIT.class.getResource("/ssl/ca.crt"); + if (resource == null) { + throw new FileNotFoundException("Cannot find classpath resource /ssl/ca.crt"); + } + caPath = PathUtils.get(resource.toURI()); + } + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .feature(FeatureFlag.DLM_ENABLED) + .distribution(DistributionType.DEFAULT) + .setting("xpack.watcher.enabled", "false") + .setting("xpack.ml.enabled", "false") + .setting("xpack.security.enabled", "true") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.security.http.ssl.enabled", "true") + .setting("xpack.security.http.ssl.certificate", "node.crt") + .setting("xpack.security.http.ssl.key", "node.key") + .setting("xpack.security.http.ssl.certificate_authorities", "ca.crt") + .setting("xpack.security.transport.ssl.enabled", "true") + .setting("xpack.security.transport.ssl.certificate", "node.crt") + .setting("xpack.security.transport.ssl.key", "node.key") + .setting("xpack.security.transport.ssl.certificate_authorities", "ca.crt") + .setting("xpack.security.transport.ssl.verification_mode", "certificate") + .keystore("xpack.security.transport.ssl.secure_key_passphrase", "node-password") + .keystore("xpack.security.http.ssl.secure_key_passphrase", "node-password") + .keystore("bootstrap.password", PASSWORD) + .configFile("node.key", Resource.fromClasspath("ssl/node.key")) + .configFile("node.crt", Resource.fromClasspath("ssl/node.crt")) + .configFile("ca.crt", Resource.fromClasspath("ssl/ca.crt")) + .user("test_admin", PASSWORD, "superuser") + .user("test_dlm", PASSWORD, "manage_dlm") + .user("test_non_privileged", PASSWORD, "not_privileged") + .rolesFile(Resource.fromClasspath("roles.yml")) + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @Override + protected Settings restClientSettings() { + // Note: This user is assigned the role "manage_dlm". That role is defined in roles.yml. + String token = basicAuthHeaderValue("test_dlm", new SecureString(PASSWORD.toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).put(CERTIFICATE_AUTHORITIES, caPath).build(); + } + + @Override + protected Settings restAdminSettings() { + String token = basicAuthHeaderValue("test_admin", new SecureString(PASSWORD.toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).put(CERTIFICATE_AUTHORITIES, caPath).build(); + } + + private Settings restUnprivilegedClientSettings() { + // Note: This user is assigned the role "not_privileged". That role is defined in roles.yml. + String token = basicAuthHeaderValue("test_non_privileged", new SecureString(PASSWORD.toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).put(CERTIFICATE_AUTHORITIES, caPath).build(); + } + + @Override + protected String getProtocol() { + // Because http.ssl.enabled = true + return "https"; + } + + @SuppressWarnings("unchecked") + public void testManageDLM() throws Exception { + { + /* + * This test checks that a user with the "manage_dlm" index privilege on "dlm-*" data streams can delete and put a lifecycle + * on the "dlm-test" data stream, while a user with who does not have that privilege (but does have all of the other same + * "dlm-*" privileges) cannot delete or put a lifecycle on that datastream. + */ + String dataStreamName = "dlm-test"; // Needs to match the pattern of the names in roles.yml + createDataStreamAsAdmin(dataStreamName); + Response getDataStreamResponse = adminClient().performRequest(new Request("GET", "/_data_stream/" + dataStreamName)); + final List> nodes = ObjectPath.createFromResponse(getDataStreamResponse).evaluate("data_streams"); + String index = (String) ((List>) nodes.get(0).get("indices")).get(0).get("index_name"); + + Request explainLifecycleRequest = new Request("GET", "/" + randomFrom("_all", "*", index) + "/_lifecycle/explain"); + Request getLifecycleRequest = new Request("GET", "_data_stream/" + randomFrom("_all", "*", dataStreamName) + "/_lifecycle"); + Request deleteLifecycleRequest = new Request( + "DELETE", + "_data_stream/" + randomFrom("_all", "*", dataStreamName) + "/_lifecycle" + ); + Request putLifecycleRequest = new Request("PUT", "_data_stream/" + randomFrom("_all", "*", dataStreamName) + "/_lifecycle"); + putLifecycleRequest.setJsonEntity("{}"); + + makeRequest(client(), explainLifecycleRequest, true); + makeRequest(client(), getLifecycleRequest, true); + makeRequest(client(), deleteLifecycleRequest, true); + makeRequest(client(), putLifecycleRequest, true); + + try ( + RestClient nonDlmManagerClient = buildClient(restUnprivilegedClientSettings(), getClusterHosts().toArray(new HttpHost[0])) + ) { + makeRequest(nonDlmManagerClient, explainLifecycleRequest, true); + makeRequest(nonDlmManagerClient, getLifecycleRequest, true); + makeRequest(nonDlmManagerClient, deleteLifecycleRequest, false); + makeRequest(nonDlmManagerClient, putLifecycleRequest, false); + } + } + { + // Now test that the user who has the manage_dlm privilege on dlm-* data streams cannot manage other data streams: + String otherDataStreamName = "other-dlm-test"; + createDataStreamAsAdmin(otherDataStreamName); + Response getOtherDataStreamResponse = adminClient().performRequest(new Request("GET", "/_data_stream/" + otherDataStreamName)); + final List> otherNodes = ObjectPath.createFromResponse(getOtherDataStreamResponse).evaluate("data_streams"); + String otherIndex = (String) ((List>) otherNodes.get(0).get("indices")).get(0).get("index_name"); + Request putOtherLifecycleRequest = new Request("PUT", "_data_stream/" + otherDataStreamName + "/_lifecycle"); + putOtherLifecycleRequest.setJsonEntity("{}"); + makeRequest(client(), new Request("GET", "/" + otherIndex + "/_lifecycle/explain"), false); + makeRequest(client(), new Request("GET", "_data_stream/" + otherDataStreamName + "/_lifecycle"), false); + makeRequest(client(), new Request("DELETE", "_data_stream/" + otherDataStreamName + "/_lifecycle"), false); + makeRequest(client(), putOtherLifecycleRequest, false); + } + } + + /* + * This makes the given request with the given client. It asserts a 200 response if expectSuccess is true, and asserts an exception + * with a 403 response if expectStatus is false. + */ + private void makeRequest(RestClient client, Request request, boolean expectSuccess) throws IOException { + if (expectSuccess) { + Response response = client.performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); + } else { + ResponseException exception = expectThrows(ResponseException.class, () -> client.performRequest(request)); + assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.FORBIDDEN.getStatus())); + } + } + + private void createDataStreamAsAdmin(String name) throws IOException { + String mappingsTemplateName = name + "_mappings"; + Request mappingsRequest = new Request("PUT", "/_component_template/" + mappingsTemplateName); + mappingsRequest.setJsonEntity(""" + { + "template": { + "mappings": { + "properties": { + "@timestamp": { + "type": "date", + "format": "date_optional_time||epoch_millis" + }, + "message": { + "type": "wildcard" + } + } + } + } + }"""); + assertOK(adminClient().performRequest(mappingsRequest)); + + String settingsTemplateName = name + "_settings"; + Request settingsRequest = new Request("PUT", "/_component_template/" + settingsTemplateName); + settingsRequest.setJsonEntity(""" + { + "template": { + "settings": { + "number_of_shards": 1, + "number_of_replicas": 0 + } + } + }"""); + assertOK(adminClient().performRequest(settingsRequest)); + + Request indexTemplateRequest = new Request("PUT", "/_index_template/" + name + "_template"); + indexTemplateRequest.setJsonEntity(Strings.format(""" + { + "index_patterns": ["%s*"], + "data_stream": { }, + "composed_of": [ "%s", "%s" ] + }""", name, mappingsTemplateName, settingsTemplateName)); + assertOK(adminClient().performRequest(indexTemplateRequest)); + + Request request = new Request("PUT", "/_data_stream/" + name); + assertOK(adminClient().performRequest(request)); + } +} diff --git a/modules/dlm/src/javaRestTest/resources/roles.yml b/modules/dlm/src/javaRestTest/resources/roles.yml new file mode 100644 index 0000000000000..fc841695af367 --- /dev/null +++ b/modules/dlm/src/javaRestTest/resources/roles.yml @@ -0,0 +1,18 @@ +manage_dlm: + cluster: + - monitor + indices: + - names: [ 'dlm-*' ] + privileges: + - read + - write + - manage_dlm +not_privileged: + cluster: + - monitor + indices: + - names: [ 'dlm-*' ] + privileges: + - read + - write + - view_index_metadata diff --git a/modules/dlm/src/javaRestTest/resources/ssl/README.asciidoc b/modules/dlm/src/javaRestTest/resources/ssl/README.asciidoc new file mode 100644 index 0000000000000..d91e5653cdef9 --- /dev/null +++ b/modules/dlm/src/javaRestTest/resources/ssl/README.asciidoc @@ -0,0 +1,37 @@ += Keystore Details +This document details the steps used to create the certificate and keystore files in this directory. + +== Instructions on generating certificates + +The certificates in this directory have been generated using elasticsearch-certutil (8.0.0 SNAPSHOT) + +=== Certificates for security the HTTP server +[source,shell] +----------------------------------------------------------------------------------------------------------- +elasticsearch-certutil ca --pem --out=${PWD}/ca.zip --pass="ca-password" --days=3500 +unzip ca.zip +mv ca/ca.crt ./ca.crt +mv ca/ca.key ./ca.key + +rm ca.zip +rmdir ca +----------------------------------------------------------------------------------------------------------- + +[source,shell] +----------------------------------------------------------------------------------------------------------- +elasticsearch-certutil cert --pem --name=node --out=${PWD}/node.zip --pass="node-password" --days=3500 \ + --ca-cert=${PWD}/ca.crt --ca-key=${PWD}/ca.key --ca-pass="ca-password" \ + --dns=localhost --dns=localhost.localdomain --dns=localhost4 --dns=localhost4.localdomain4 --dns=localhost6 --dns=localhost6.localdomain6 \ + --ip=127.0.0.1 --ip=0:0:0:0:0:0:0:1 + +unzip node.zip +mv node/node.* ./ + +rm node.zip +rmdir node +----------------------------------------------------------------------------------------------------------- + +[source,shell] +----------------------------------------------------------------------------------------------------------- +keytool -importcert -file ca.crt -keystore ca.p12 -storetype PKCS12 -storepass "password" -alias ca +----------------------------------------------------------------------------------------------------------- diff --git a/modules/dlm/src/javaRestTest/resources/ssl/ca.crt b/modules/dlm/src/javaRestTest/resources/ssl/ca.crt new file mode 100644 index 0000000000000..ccfdadcab6d14 --- /dev/null +++ b/modules/dlm/src/javaRestTest/resources/ssl/ca.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDSTCCAjGgAwIBAgIUG4Vi/zqBSBJT7DgRTFDQwh4ShlQwDQYJKoZIhvcNAQEL +BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l +cmF0ZWQgQ0EwHhcNMjEwMzE4MDIyNjAyWhcNMzAxMDE3MDIyNjAyWjA0MTIwMAYD +VQQDEylFbGFzdGljIENlcnRpZmljYXRlIFRvb2wgQXV0b2dlbmVyYXRlZCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAIfrBgvsv/i4v6bAtfZTCIBY ++OdhW6d2aF5LSPClruryqmp2vNWhGTEkcqe6EcFe+JRc+E+CnW0nXWslWf6kLxOJ +VR5kjuT7LZ1tGbm70joh5V1t79NXu+BC0B/ET6T/BDzjnrDlt+AsFmR+F348UftY +Y04NZRy+gRh9SxS0Y4riDGj0pWWJkPBK314JXf8rJe1RiYGfNl5OgAljGrs7sHAn +1AO2nEH8Ihad3V55dtMIMXHGQTWkIx+QK25cGpySB78CXR432BmRMieMHZ5z1ELL +A658Kco22HDmbNk4o51r/2AXs1fxcPTVZwK3n5tvC2hABXuILE7ck9A3LyGRZGMC +AwEAAaNTMFEwHQYDVR0OBBYEFNlY6G4x4gG5/lRF8fO6knZaOzzlMB8GA1UdIwQY +MBaAFNlY6G4x4gG5/lRF8fO6knZaOzzlMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI +hvcNAQELBQADggEBAD4e1fOX00AT3bxXHyJd4tT6g40bxAmZhrtFDkoxX86Vp2bp +h+XfUfr54ziVzIbVynYMJ759mdB4BN8oZNTbOpmz/hNbz5skd2wIdAw/oZqAsOiW +l+OZLaaQYVfLesuBUJfxU7JvZeF0rB2F0ODc8BJz0Q6Mjbvj8fyCbSIQS01PjATN +0zeFQYuwJaQgTLVTU9jQYIbNBgCUuVmOW6IDF6QULtbCuH1Wtyr3u2I2nWfpyDhF +u7PY5Qh/O13rRy5o6NJofxaa3nU1PJalQzIA6ExA8ajol4ywiFtAyCVLYuJMKDt9 +HN0WWGAbhCPc/6i5KzNv6vW8EaWAOlAt2t1/7LU= +-----END CERTIFICATE----- diff --git a/modules/dlm/src/javaRestTest/resources/ssl/ca.key b/modules/dlm/src/javaRestTest/resources/ssl/ca.key new file mode 100644 index 0000000000000..4438c4e59b247 --- /dev/null +++ b/modules/dlm/src/javaRestTest/resources/ssl/ca.key @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: AES-128-CBC,AD07A96A73827285800BF6F4C8C37988 + +9F4L3SRxQaSkcmW72PaiPDDPNUW9zdoWy2VvSaKUp7cWCupUpF3gqvIwdpr/sHj5 +Jh4gfWCzASy2yb+Q/OAbeq2Nl5P7p6klDjBtDFVlLXmIearRiXBUgi7i55lic2nB +3zpUzBeXiqWxAiFTl1vhBB0DVexy0Ob7Hf3A7Zp669UQiMquplaGg+KtNVh2IxvJ +vZmV+danHJpTqd4CnC93J4l/4tH3/ZYHPydqe1a7Bhe0BwMurOqtoosuzF0BQMam +BcDVpyeRzg7C+ST1sZq+D/F1OpNvOOCE0hBjHg4NWdqyhiRLLwcbyEUutsyWo5zJ +QCnBiznVzeEobwFdglCLoe+fVFWVNe2fddX541kfcHRXozDvNbRMrkPwqWHzLLBc +bFn9PV3QSYoWE6Pee4/ibX4TYwe8yfxBBg5BpQQV+zjyBaXDQM6NNHMPxSE7YoD1 +TGAjQXwajse4uG0WRwOMgNHU9mzkMBLkv8s03PYmPXbnJkxd2jZSQoZ8FZrHQDXQ +oiMh6zMRDCiQRVrz7NwYN9uS5dwnj7fQDex5uyegIw7He57LuFJ92s7fqYAoaOtO +9QDRD5ky+q9+XN4T/3mOIaHTKNF5/kuN0eXH0vGVGWlNo2h+MBXGn+aA1p/97Cym +tZzmyAqDiXg9DhNMdHJor7DOQa9CCp5YxYYO5rzMa5ElvKIcOEmYkf1MTLq0Al/t +hYC5bL07aQ0sVhA+QW8kfxLkFT+u14rMlp6PJ9/KMLVBRoQWswwBMTBnocSwejkx +lZaGWjzpptQ3VqgSBOtEDjamItSFiZeN2ntwOckauVSRJZDig/q5yLgIlwrqxtDH +Sqh3u6JysIcBCcGg9U1q9AzxzFD8I4P8DwzUd56mbp3eR5iMvGsKcXbwlLvx/dSX +HVs0S7bEUr5WavmSIGwwrHtRO/l3STJNC1W7YxVKhBCxgz46DqADXbHuMvt8ZB13 +Zs94eEDA0vmPQnOilIG200V4OP3Uz8UP9HcNrhkLGuyCe+RIvv6NOwtq/O9YmazR +tmlcyrXEkvb5wubVg0zDlPpMBlYHGBEVl2vnVlNFHbsLkc55WahEbdpynnx3gYid +o4a5I/ywqaMou6ZTtOXZXc+0WuqjsLFAKmytZJtnktScGwJ+3JPWR51pi9j9q9W7 +oTnsyO4/a0nSZTNSGI2hxrmss5Y75bN/ydFuMhwd/GEiupKG40ZF+9hcGrqZRddM +uf0WoRvD5n611Bg8s9nwBMUjN7BFzu+a91s1W8LwwXUTZwkkyhkg/VUCKYbOH329 +Q6lZLb5nvvzEN/1HH/w0Bkl1jKBJSskw/R6zUGyviP1Sr3ZGkvUSvwXhrRHqI8MN +83t5AzZ6hivzy7rzCI/UsKoUx2/ef63TcvgLb/Vf85anuRR08Xcv/XIl775UvibQ +fAA0PE07sbYbO7vwRbv1bLhcPmA3wMsu0v/6Ohcv15uFFgUr/e9zhv5seP0tHdeR +ZKSbqlwfGRgp0smXPWJzIGG3g+lkadrfwTBuzgdjI8V/C+nEMk1eYy8SJd/CmfdG +IgZYMUWhc6GCcaq+eJ9VGVdgFkQU6aGTm4wNpmWPuDk/YDFo7ik48OrMvx67j1kz +-----END RSA PRIVATE KEY----- diff --git a/modules/dlm/src/javaRestTest/resources/ssl/ca.p12 b/modules/dlm/src/javaRestTest/resources/ssl/ca.p12 new file mode 100644 index 0000000000000..e79ddffd71981 Binary files /dev/null and b/modules/dlm/src/javaRestTest/resources/ssl/ca.p12 differ diff --git a/modules/dlm/src/javaRestTest/resources/ssl/node.crt b/modules/dlm/src/javaRestTest/resources/ssl/node.crt new file mode 100644 index 0000000000000..7b1bc7a5f5586 --- /dev/null +++ b/modules/dlm/src/javaRestTest/resources/ssl/node.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDszCCApugAwIBAgIVAO2bFGZI6jJKeo1hea8Yc+RvY1J7MA0GCSqGSIb3DQEB +CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu +ZXJhdGVkIENBMB4XDTIxMDMxODAyMjYzMloXDTMwMTAxNzAyMjYzMlowDzENMAsG +A1UEAxMEbm9kZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKQ7uDRl +d/wKlUkesl1jegzQcFw9po54Mt2O3UTOYBkFWg6amAIyA8Izkavkoh/kQZAR2gqa +O65jqO/rNRrNBlyX2x+IOm0XmDC1ZmHoOBIxaCZUGVqwkeHNxcb5TmVFbYAcRGOJ +b54v42SEarVoqJS9iQaGb7ScKTeQ7XWyPGImReVNwE7SJNWwuABTXMe9c6VtvZpY +xu1SX+gYVk7aWQ0p3ukHKJXrPfXYXSgozF3tKtFQvUrL1VjHEVWqWoBqjIbhl3X8 +eqkzxwC1y+8Zbp3Os9Y8PzHQ4etXG7UAPFRopy5MivlDxZ2u5DpVW/6Yy1B7i6Mp +9Leu2NPNZ7ul/iECAwEAAaOB4DCB3TAdBgNVHQ4EFgQUYVaPvntroOl+zfW5vDFg +Kvmmj1MwHwYDVR0jBBgwFoAU2VjobjHiAbn+VEXx87qSdlo7POUwgY8GA1UdEQSB +hzCBhIIJbG9jYWxob3N0ghdsb2NhbGhvc3Q2LmxvY2FsZG9tYWluNocEfwAAAYcQ +AAAAAAAAAAAAAAAAAAAAAYIKbG9jYWxob3N0NIIKbG9jYWxob3N0NoIVbG9jYWxo +b3N0LmxvY2FsZG9tYWlughdsb2NhbGhvc3Q0LmxvY2FsZG9tYWluNDAJBgNVHRME +AjAAMA0GCSqGSIb3DQEBCwUAA4IBAQAdP/Z/tDOWkM5Eob+6FwIJuM9Pe9+NOwUL ++0qrHNHDt5ITyUf/C/l6yfXgbkvoLRa9QefN0cxy0ru8ew3nUUn7US0EfWF0yrza +M8BwznKKh6cs4AiFUdDliBgyqAzYubcZ6G6Trm3Tdh334bAQKM7M1TOvZa8jwXXb +6T1PUs/2RCWE7nLxBooDTik86phUm65oVtTqoO0c4XbQzzTfRrF7Oy3kmqpKsrzv +UDB4G4TAfGyybdystyEqPPVX3KESV9PDcxpO01R2/BWi49E4YmdL4PitIA/v7iAk +SH0UISQNjDpncRz9mGrt8LrA+O2Canqiq3xXeHJEhU5/KPCPcsrm +-----END CERTIFICATE----- diff --git a/modules/dlm/src/javaRestTest/resources/ssl/node.key b/modules/dlm/src/javaRestTest/resources/ssl/node.key new file mode 100644 index 0000000000000..3ec434b717a99 --- /dev/null +++ b/modules/dlm/src/javaRestTest/resources/ssl/node.key @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: AES-128-CBC,4A5CF28950363F663AA997154AC331F9 + +oHO/8oRnLXHTCljeqW90XBa/pDmLHUwRo82hy6732bSUTrXuBwUopuzcj6r8QzbQ +1ZyCbja0wwWaQ5TuNX3ehseiaBUKWgCLYYjd2IfFsfyFmvVAcPqnltyKMRvpLkFj +NeGyKFXmfxT3rmvzrmId4KkRYCHvH+j3RKfJ0wuhduzv9sH3xfmEe521l2F8Vukq +zVNMRPT9FHlSYhM1h26WpBlzx6Wq7EfP7KdyUtmIZ5/RFJjELG5rUyLgZHDqfKCy +LdNPpOuBdpYuBC+Oy97p2YuaFSLPkkKhiI4MG4MYsOnCmEFBNup9OhF3U/t/ffXh +knTjXh2fX7h8RJ9pH/8czG+O6cZoe5O/1/Ympo+ghS7QYDUtDrNS5M4MI+eP+WiA +X3cev3VkugDw4dDSPq3i3E0oCRZesMpst2W6AtVcpa5EWRM75PVuUws0XY/V/ca0 +CdUO6CPVIAAT3urmJWC1reiNhkEMDrskOL1PnsrseGvOmCLava9xYjiAS6JGawm/ +kWN3unJ6BwlU0NkIEbj8OGHdiKAjNWr0HLR34Xa2gqup5pGVD8EoC20ZPjeDXZ2j +oEfuLo2ZaF5CWDt0CEcdN7v/JtXC9QJjf0BAMHKiULhPzv9rNfqj6xZKkNxgVrW/ +D2/Jpyn5qt6BDiyzG0jaO7AzIk3BTBksdf+5myc5/0NA+kdC9aKZKmeLAazCAK1G +CwtfTs1xF4tMj1P+GRD4DOwypml1OK528BSl+Ydubt2uc37hRsA2EctEEjy+vy2r +pR0akSVs2a4d00p26lWt5RP/h85KJdWwNj/YwRmRxWWMpNd/C4NrGgB5Ehs8CHFk +uQZOaAKXWuy/bPGKG+JdXqEM5CiasNqoJn0UBle2dOpG08Ezb19vHFgNSOuvrxEv +oxkklXzyw+JMyskmD67MxQBsHcxW4g+501OMhIb2J36LNsOMQxzjIpS2jia/P1lh +9R4WohPxKf7Hi0Ui6oQRC7i2USmisuHIlVAmv14AjiISJdhXVOFtu+hVWrCHqHEg +GWRj560G1WwT5EHZr4KN+6IRX6mCKJAO1XjSz5rPfDpet5OQGIr7N+lJwWE03kJs +6Pd8K0OYW+2rbwqFd4YugF18HQlA1T5aok4fj2P+BTOuCNfvf0ZZXFeBn45zgtZI +G/puduRwRRyUzB+XTzhN8o6dfuBjasq6U0/ZFDRKKJnAOAq/fmVxr51+zKvZ0T5B +NSPbD9wUdnABqGCR+y9AL63QP0iVrkLlKzjgUYdlb1lw4TnmLGadmfYaZoOtWH2c +FOucH3VVfinY7Q9EE5/EF5EHeG3pe3I3UHXTbAvcxvuhCByFZd6qe3Vz4AGcQLoT +ProWJzmjeElfziX4e4Ol6tNSAxwL+vhjn4KmvF4mFx6n+QMAyp8lEmPsYgnsT/n9 +pkdnk0VdLGQmp8eKExvvDfiDTagDnh6wr7Nys1VLBADIthsRW4Gdft02q3tFOyae +WpeZent5x28yRPbNgDtoStjqc0yQPdXVFuAsLzA6NT8ujlOhJCnmiPYOurGis0Ch +hQLV+kr5EybbUHGjMB01elqTXy2VTMEqQ/7TQdsy6vIDYeBq5t491t9P/TeeS5Om +-----END RSA PRIVATE KEY----- diff --git a/modules/dlm/src/main/java/org/elasticsearch/dlm/DataLifecycleService.java b/modules/dlm/src/main/java/org/elasticsearch/dlm/DataLifecycleService.java index 1a9f37e287ec1..6ecf20b6b47af 100644 --- a/modules/dlm/src/main/java/org/elasticsearch/dlm/DataLifecycleService.java +++ b/modules/dlm/src/main/java/org/elasticsearch/dlm/DataLifecycleService.java @@ -605,8 +605,8 @@ private void maybeScheduleJob() { scheduler.get().add(scheduledJob); } - // package visibility for testing - DataLifecycleErrorStore getErrorStore() { + // public visibility for testing + public DataLifecycleErrorStore getErrorStore() { return errorStore; } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/15_info_ingest.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/15_info_ingest.yml new file mode 100644 index 0000000000000..56f80d7bbe1db --- /dev/null +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/15_info_ingest.yml @@ -0,0 +1,76 @@ +--- +setup: + - skip: + version: " - 8.8.99" + reason: "/_info/ingest only available from v8.9" + +--- +teardown: + - do: + ingest.delete_pipeline: + id: "ingest_info_pipeline" + ignore: 404 + + - do: + indices.delete: + index: "ingest_info_index" + ignore_unavailable: true + +--- +"Cluster ingest information": + - do: + ingest.put_pipeline: + id: "ingest_info_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "set" : { + "field": "pipeline", + "value": "pipeline" + } + } + ] + } + + - do: + bulk: + refresh: true + index: ingest_info_index + body: + - '{"create": {"pipeline" : "ingest_info_pipeline"}}' + - '{"some-field": "some-value"}' + - '{"create": {"pipeline" : "ingest_info_pipeline"}}' + - '{"some-field": "another-value"}' + + - do: + cluster.info: + target: [ ingest ] + + - is_true: cluster_name + + # Summary ingest section + - is_true: ingest.total + - gte: { ingest.total.count: 2 } + - gte: { ingest.total.time_in_millis: 0 } + # next 2 conditions _should_ be 0, but because these yaml tests are sharing the same test cluster, other tests could + # pollute the information. + - gte: { ingest.total.current: 0 } + - gte: { ingest.total.failed: 0 } + + # Pipelines section + - is_true: ingest.pipelines.ingest_info_pipeline + - gte: { ingest.pipelines.ingest_info_pipeline.count: 2 } + - gte: { ingest.pipelines.ingest_info_pipeline.time_in_millis: 0 } + - match: { ingest.pipelines.ingest_info_pipeline.current: 0 } + - match: { ingest.pipelines.ingest_info_pipeline.failed: 0 } + + # Processors section + - is_true: ingest.pipelines.ingest_info_pipeline.processors.0.set + - match: { ingest.pipelines.ingest_info_pipeline.processors.0.set.type: "set" } + - is_true: ingest.pipelines.ingest_info_pipeline.processors.0.set.stats + - gte: { ingest.pipelines.ingest_info_pipeline.processors.0.set.stats.count: 2 } + - gte: { ingest.pipelines.ingest_info_pipeline.processors.0.set.stats.time_in_millis: 0 } + - match: { ingest.pipelines.ingest_info_pipeline.processors.0.set.stats.current: 0 } + - match: { ingest.pipelines.ingest_info_pipeline.processors.0.set.stats.failed: 0 } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java index bb549de980b8e..f277450aa1833 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java @@ -30,8 +30,10 @@ import org.elasticsearch.xcontent.XContentParser.Token; import java.io.IOException; +import java.util.Collections; import java.util.Map; import java.util.Objects; +import java.util.Set; /** * A {@link FieldMapper} that exposes Lucene's {@link FeatureField}. @@ -152,12 +154,13 @@ public ValueFetcher valueFetcher(SearchExecutionContext context, String format) if (format != null) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); } - return new SourceValueFetcher(name(), context) { + return sourceValueFetcher(context.isSourceEnabled() ? context.sourcePath(name()) : Collections.emptySet()); + } + + private SourceValueFetcher sourceValueFetcher(Set sourcePaths) { + return new SourceValueFetcher(sourcePaths, nullValue) { @Override - protected Float parseSourceValue(Object value) { - if (value.equals("")) { - return nullValue; - } + protected Object parseSourceValue(Object value) { return objectToFloat(value); } }; diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SourceConfirmedTextQuery.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SourceConfirmedTextQuery.java index 86214844d8919..f351cca7f73a2 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SourceConfirmedTextQuery.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SourceConfirmedTextQuery.java @@ -10,7 +10,6 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.index.FieldInvertState; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermStates; @@ -189,8 +188,8 @@ public void visit(QueryVisitor visitor) { } @Override - public Query rewrite(IndexReader reader) throws IOException { - Query inRewritten = in.rewrite(reader); + public Query rewrite(IndexSearcher searcher) throws IOException { + Query inRewritten = in.rewrite(searcher); if (inRewritten != in) { return new SourceConfirmedTextQuery(inRewritten, valueFetcherProvider, indexAnalyzer); } else if (in instanceof ConstantScoreQuery) { @@ -203,7 +202,7 @@ public Query rewrite(IndexReader reader) throws IOException { } else if (in instanceof MatchNoDocsQuery) { return in; // e.g. empty phrase query } - return super.rewrite(reader); + return super.rewrite(searcher); } @Override diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldTypeTests.java index f859d6da78ed8..c9bb726f8e11d 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldTypeTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldTypeTests.java @@ -30,6 +30,6 @@ public void testFetchSourceValue() throws IOException { assertEquals(List.of(3.14f), fetchSourceValue(mapper, 3.14)); assertEquals(List.of(42.9f), fetchSourceValue(mapper, "42.9")); - assertEquals(List.of(2.0f), fetchSourceValue(mapper, "")); + assertEquals(List.of(2.0f), fetchSourceValue(mapper, null)); } } diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/20_null_value.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/20_null_value.yml index 3175f3e50a63f..eab28f07d6810 100644 --- a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/20_null_value.yml +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/rank_feature/20_null_value.yml @@ -1,12 +1,12 @@ --- -"Non positive null_vallue": +"Non positive null_value": - skip: version: " - 8.8.99" reason: "null_value parameter was added in 8.9.0" - do: - catch: bad_request + catch: /\[null_value\] must be a positive normal float for field of type \[rank_feature\]/ indices.create: index: test2 body: @@ -16,7 +16,7 @@ properties: pagerank: type: rank_feature - null_vallue: -3 + null_value: -3 --- "Search rank_feature with and without null_value": @@ -35,7 +35,7 @@ properties: pagerank: type: rank_feature - null_value: 15 + null_value: 100 url_length: type: rank_feature @@ -55,9 +55,19 @@ pagerank: null url_length: null + # can't index a field value equal to an empty string + - do: + catch: /failed to parse field \[pagerank\] of type \[rank_feature\] in document/ + index: + index: test1 + id: "wrong_document1" + body: + pagerank: "" + - do: indices.refresh: {} + # docs with null values are absent in search results - do: search: index: test1 @@ -72,6 +82,7 @@ - match: hits.hits.0._id: "1" + # docs with null values are present in search results - do: search: index: test1 @@ -79,6 +90,9 @@ query: rank_feature: field: pagerank + fields: + - field: 'pagerank' + - field: 'url_length' - match: hits.total.value: 2 @@ -88,3 +102,8 @@ - match: hits.hits.1._id: "1" + + - match: { hits.hits.0._source.pagerank: null } + - match: { hits.hits.0.fields.pagerank.0: 100 } + - match: { hits.hits.0._source.url_length: null } + - is_false: hits.hits.0.fields.url_length diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java index 1095ffaba8bc9..9738bbd43c31a 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java @@ -398,11 +398,12 @@ public static final class LateParsingQuery extends Query { } @Override - public Query rewrite(IndexReader reader) throws IOException { - Query rewritten = super.rewrite(reader); + public Query rewrite(IndexSearcher searcher) throws IOException { + Query rewritten = super.rewrite(searcher); if (rewritten != this) { return rewritten; } + IndexReader reader = searcher.getIndexReader(); if (reader instanceof DirectoryReader) { IndexSearcher indexSearcher = new IndexSearcher(reader); indexSearcher.setQueryCache(null); @@ -428,7 +429,7 @@ public Query rewrite(IndexReader reader) throws IOException { return new MatchNoDocsQuery("Can't load against an empty reader"); } throw new IllegalStateException( - "can't load global ordinals for reader of type: " + reader.getClass() + " must be a DirectoryReader" + "can't load global ordinals for reader of type: " + searcher.getClass() + " must be a DirectoryReader" ); } } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQuery.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQuery.java index e85c7b88f4a80..11c726481d0b3 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQuery.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQuery.java @@ -8,7 +8,6 @@ package org.elasticsearch.percolator; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; @@ -65,8 +64,8 @@ final class PercolateQuery extends Query implements Accountable { } @Override - public Query rewrite(IndexReader reader) throws IOException { - Query rewritten = candidateMatchesQuery.rewrite(reader); + public Query rewrite(IndexSearcher searcher) throws IOException { + Query rewritten = candidateMatchesQuery.rewrite(searcher); if (rewritten != candidateMatchesQuery) { return new PercolateQuery( name, diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index 52422a35a4079..bf20674e64aa5 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -1225,7 +1225,7 @@ private CustomQuery(Term term) { } @Override - public Query rewrite(IndexReader reader) throws IOException { + public Query rewrite(IndexSearcher searcher) throws IOException { return new TermQuery(term); } diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index 62ded398c11f4..5f1d22fa9ecc7 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -765,11 +765,6 @@ public void close() { } } - @Override - public long skip(long n) { - throw new UnsupportedOperationException("skip is not supported"); - } - private void releaseByteBuf(ByteBuf buf) { ReferenceCountUtil.safeRelease(buf); this.byteBuf = null; diff --git a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java index 36c05191089bd..025873878975a 100644 --- a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java +++ b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java @@ -246,8 +246,10 @@ public void close() throws IOException { } @Override - public long skip(long n) { - throw new UnsupportedOperationException("GoogleCloudStorageRetryingInputStream does not support seeking"); + public long skip(long n) throws IOException { + // This could be optimized on a failure by re-opening stream directly to the preferred location. However, it is rarely called, + // so for now we will rely on the default implementation which just discards bytes by reading. + return super.skip(n); } @Override diff --git a/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java b/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java index 44af39f16e957..3eba5a7b91131 100644 --- a/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java +++ b/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java @@ -132,7 +132,7 @@ public void testProjectIdDefaultsToCredentials() throws Exception { public void testLoadsProxySettings() throws Exception { final String clientName = randomAlphaOfLength(5); final ServiceAccountCredentials credential = randomCredential(clientName).v1(); - var proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(InetAddress.getLoopbackAddress(), randomIntBetween(1024, 65536))); + var proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(InetAddress.getLoopbackAddress(), randomIntBetween(49152, 65535))); final GoogleCloudStorageClientSettings googleCloudStorageClientSettings = new GoogleCloudStorageClientSettings( credential, ENDPOINT_SETTING.getDefault(Settings.EMPTY), diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java index 93e218c70048f..7885e36c3c295 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java @@ -225,8 +225,10 @@ private void maybeAbort(S3ObjectInputStream stream) { } @Override - public long skip(long n) { - throw new UnsupportedOperationException("S3RetryingInputStream does not support seeking"); + public long skip(long n) throws IOException { + // This could be optimized on a failure by re-opening stream directly to the preferred location. However, it is rarely called, + // so for now we will rely on the default implementation which just discards bytes by reading. + return super.skip(n); } @Override diff --git a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/http/RetryingHttpInputStream.java b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/http/RetryingHttpInputStream.java index e2644da2182d2..70aaf9864d56d 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/http/RetryingHttpInputStream.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/http/RetryingHttpInputStream.java @@ -112,8 +112,10 @@ public int read(byte[] b, int off, int len) throws IOException { } @Override - public long skip(long n) { - throw new UnsupportedOperationException("RetryingHttpInputStream does not support seeking"); + public long skip(long n) throws IOException { + // This could be optimized on a failure by re-opening stream directly to the preferred location. However, it is rarely called, + // so for now we will rely on the default implementation which just discards bytes by reading. + return super.skip(n); } @Override diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index d80bf0ea47e77..2dbfe12d1337b 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -38,6 +38,7 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" setting 'xpack.security.enabled', 'false' requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") + requiresFeature 'es.dlm_feature_flag_enabled', Version.fromString("8.8.0") } tasks.register("${baseName}#mixedClusterTest", StandaloneRestIntegTestTask) { diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/action/support/tasks/RestListTasksCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/action/support/tasks/RestListTasksCancellationIT.java new file mode 100644 index 0000000000000..29e59af7b9f70 --- /dev/null +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/action/support/tasks/RestListTasksCancellationIT.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.support.tasks; + +import org.apache.http.client.methods.HttpGet; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Cancellable; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.http.HttpSmokeTestCase; +import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.transport.TransportService; + +import java.util.ArrayList; +import java.util.concurrent.CancellationException; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.action.support.ActionTestUtils.wrapAsRestResponseListener; +import static org.elasticsearch.test.TaskAssertions.awaitTaskWithPrefix; + +public class RestListTasksCancellationIT extends HttpSmokeTestCase { + + public void testListTasksCancellation() throws Exception { + final Request clusterStateRequest = new Request(HttpGet.METHOD_NAME, "/_cluster/state"); + clusterStateRequest.addParameter("wait_for_metadata_version", Long.toString(Long.MAX_VALUE)); + clusterStateRequest.addParameter("wait_for_timeout", "1h"); + + final PlainActionFuture clusterStateFuture = new PlainActionFuture<>(); + final Cancellable clusterStateCancellable = getRestClient().performRequestAsync( + clusterStateRequest, + wrapAsRestResponseListener(clusterStateFuture) + ); + + awaitTaskWithPrefix(ClusterStateAction.NAME); + + final Request tasksRequest = new Request(HttpGet.METHOD_NAME, "/_tasks"); + tasksRequest.addParameter("actions", ClusterStateAction.NAME); + tasksRequest.addParameter("wait_for_completion", Boolean.toString(true)); + tasksRequest.addParameter("timeout", "1h"); + + final PlainActionFuture tasksFuture = new PlainActionFuture<>(); + final Cancellable tasksCancellable = getRestClient().performRequestAsync(tasksRequest, wrapAsRestResponseListener(tasksFuture)); + + awaitTaskWithPrefix(ListTasksAction.NAME + "[n]"); + + tasksCancellable.cancel(); + + final var taskManagers = new ArrayList(internalCluster().getNodeNames().length); + for (final var transportService : internalCluster().getInstances(TransportService.class)) { + taskManagers.add(transportService.getTaskManager()); + } + assertBusy( + () -> assertFalse( + taskManagers.stream() + .flatMap(taskManager -> taskManager.getCancellableTasks().values().stream()) + .anyMatch(t -> t.getAction().startsWith(ListTasksAction.NAME)) + ) + ); + + expectThrows(CancellationException.class, () -> tasksFuture.actionGet(10, TimeUnit.SECONDS)); + clusterStateCancellable.cancel(); + } + +} diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterInfoRestCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterInfoRestCancellationIT.java new file mode 100644 index 0000000000000..d41eeab7aef26 --- /dev/null +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterInfoRestCancellationIT.java @@ -0,0 +1,172 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.http; + +import org.apache.http.client.methods.HttpGet; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.http.netty4.Netty4HttpServerTransport; +import org.elasticsearch.http.netty4.internal.HttpValidator; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.plugins.NetworkPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.tracing.Tracer; +import org.elasticsearch.transport.netty4.AcceptChannelHandler; +import org.elasticsearch.transport.netty4.SharedGroupFactory; +import org.elasticsearch.transport.netty4.TLSConfig; +import org.elasticsearch.xcontent.NamedXContentRegistry; + +import java.util.Collection; +import java.util.Map; +import java.util.concurrent.CancellationException; +import java.util.concurrent.CyclicBarrier; +import java.util.function.BiConsumer; +import java.util.function.Supplier; + +import static org.elasticsearch.action.support.ActionTestUtils.wrapAsRestResponseListener; +import static org.elasticsearch.test.TaskAssertions.assertAllCancellableTasksAreCancelled; +import static org.elasticsearch.test.TaskAssertions.assertAllTasksHaveFinished; +import static org.elasticsearch.test.TaskAssertions.awaitTaskWithPrefix; + +public class ClusterInfoRestCancellationIT extends HttpSmokeTestCase { + + public void testClusterInfoRequestCancellation() throws Exception { + // we create a barrier with one extra party, so we can lock in each node within this method. + final var cyclicBarrier = new CyclicBarrier(internalCluster().size() + 1); + var future = new PlainActionFuture(); + internalCluster().getInstances(HttpServerTransport.class) + .forEach(transport -> ((FakeHttpTransport) transport).cyclicBarrier = cyclicBarrier); + + logger.info("--> Sending request"); + var cancellable = getRestClient().performRequestAsync( + new Request(HttpGet.METHOD_NAME, "/_info/_all"), + wrapAsRestResponseListener(future) + ); + + assertFalse(future.isDone()); + awaitTaskWithPrefix(NodesStatsAction.NAME); + + logger.info("--> Checking that all the HttpTransport are waiting..."); + safeAwait(cyclicBarrier); + + logger.info("--> Cancelling request"); + cancellable.cancel(); + + assertTrue(future.isDone()); + expectThrows(CancellationException.class, future::actionGet); + assertAllCancellableTasksAreCancelled(NodesStatsAction.NAME); + + logger.info("--> Releasing all the node requests :)"); + safeAwait(cyclicBarrier); + + assertAllTasksHaveFinished(NodesStatsAction.NAME); + } + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopy(super.nodePlugins(), FakeNetworkPlugin.class); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put(NetworkModule.HTTP_TYPE_KEY, FakeHttpTransport.NAME) + .build(); + } + + public static class FakeNetworkPlugin extends Plugin implements NetworkPlugin { + + public FakeNetworkPlugin() {} + + @Override + public Map> getHttpTransports( + Settings settings, + ThreadPool threadPool, + BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedXContentRegistry xContentRegistry, + NetworkService networkService, + HttpServerTransport.Dispatcher dispatcher, + BiConsumer perRequestThreadContext, + ClusterSettings clusterSettings, + Tracer tracer + ) { + return Map.of( + FakeHttpTransport.NAME, + () -> new FakeHttpTransport( + settings, + networkService, + threadPool, + xContentRegistry, + dispatcher, + clusterSettings, + new SharedGroupFactory(settings), + tracer, + TLSConfig.noTLS(), + null, + null + ) + ); + } + } + + public static class FakeHttpTransport extends Netty4HttpServerTransport { + + public static final String NAME = "fake-transport"; + private CyclicBarrier cyclicBarrier; + + public FakeHttpTransport( + Settings settings, + NetworkService networkService, + ThreadPool threadPool, + NamedXContentRegistry xContentRegistry, + Dispatcher dispatcher, + ClusterSettings clusterSettings, + SharedGroupFactory sharedGroupFactory, + Tracer tracer, + TLSConfig tlsConfig, + AcceptChannelHandler.AcceptPredicate acceptChannelPredicate, + HttpValidator httpValidator + ) { + super( + settings, + networkService, + threadPool, + xContentRegistry, + dispatcher, + clusterSettings, + sharedGroupFactory, + tracer, + tlsConfig, + acceptChannelPredicate, + httpValidator + ); + } + + @Override + public HttpStats stats() { + safeAwait(cyclicBarrier); + safeAwait(cyclicBarrier); + return super.stats(); + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/security.create_cross_cluster_api_key.json b/rest-api-spec/src/main/resources/rest-api-spec/api/security.create_cross_cluster_api_key.json new file mode 100644 index 0000000000000..98069a46e907a --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/security.create_cross_cluster_api_key.json @@ -0,0 +1,34 @@ +{ + "security.create_cross_cluster_api_key": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-cross-cluster-api-key.html", + "description": "Creates a cross-cluster API key for API key based remote cluster access." + }, + "stability": "experimental", + "visibility": "feature_flag", + "feature_flag": "es.untrusted_remote_cluster_feature_flag_registered", + "headers": { + "accept": [ + "application/json" + ], + "content_type": [ + "application/json" + ] + }, + "url": { + "paths": [ + { + "path": "/_security/cross_cluster/api_key", + "methods": [ + "POST" + ] + } + ] + }, + "params": {}, + "body": { + "description": "The request to create a cross-cluster API key", + "required": true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/security.update_cross_cluster_api_key.json b/rest-api-spec/src/main/resources/rest-api-spec/api/security.update_cross_cluster_api_key.json new file mode 100644 index 0000000000000..c0466bac79b91 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/security.update_cross_cluster_api_key.json @@ -0,0 +1,39 @@ +{ + "security.update_cross_cluster_api_key": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-update-cross-cluster-api-key.html", + "description": "Updates attributes of an existing cross-cluster API key." + }, + "stability": "experimental", + "visibility": "feature_flag", + "feature_flag": "es.untrusted_remote_cluster_feature_flag_registered", + "headers": { + "accept": [ + "application/json" + ], + "content_type": [ + "application/json" + ] + }, + "url": { + "paths": [ + { + "path": "/_security/cross_cluster/api_key/{id}", + "methods": [ + "PUT" + ], + "parts": { + "id": { + "type": "string", + "description": "The ID of the cross-cluster API key to update" + } + } + } + ] + }, + "body": { + "description": "The request to update attributes of a cross-cluster API key.", + "required": true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get.json new file mode 100644 index 0000000000000..b7a28d71e95af --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/synonyms.get.json @@ -0,0 +1,42 @@ +{ + "synonyms.get": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-synonyms.html", + "description": "Retrieves a synonym set" + }, + "stability": "experimental", + "visibility": "feature_flag", + "feature_flag": "es.synonyms_feature_flag_enabled", + "headers": { + "accept": [ + "application/json" + ] + }, + "url": { + "paths": [ + { + "path": "/_synonyms/{synonyms_set}", + "methods": [ + "GET" + ], + "parts": { + "synonyms_set": { + "type": "string", + "description": "The name of the synonyms set to be retrieved" + } + } + } + ] + }, + "params": { + "from": { + "type": "int", + "description": "Starting offset (default: 0)" + }, + "size": { + "type": "int", + "description": "specifies a max number of results to get" + } + } + } +} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.info/10_info_all.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.info/10_info_all.yml new file mode 100644 index 0000000000000..982dc56e67cfc --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.info/10_info_all.yml @@ -0,0 +1,38 @@ +--- +setup: + - skip: + version: " - 8.8.99" + reason: "/_info/_all only available from v8.9" + +--- +"Cluster Info _all": + - do: + cluster.info: + target: [ _all ] + + # this tests only checks that the target exists, to check the structure of them, we have specific tests + - is_true: cluster_name + - is_true: http + - is_true: ingest + +--- +"Cluster Info fails when mixing _all with other targets": + - do: + catch: bad_request + cluster.info: + target: [ _all, ingest ] + + - match: { status: 400 } + - match: { error.type: illegal_argument_exception } + - match: { error.reason: "request [/_info/_all,ingest] contains _all and individual target [_all,ingest]" } + +--- +"Cluster Info fails with an invalid target": + - do: + catch: bad_request + cluster.info: + target: [ ingest, invalid_target ] + + - match: { status: 400 } + - match: { error.type: illegal_argument_exception } + - match: { error.reason: "request [/_info/ingest,invalid_target] contains unrecognized target: [invalid_target]" } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/info.http/10_info_http.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.info/20_info_http.yml similarity index 97% rename from rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/info.http/10_info_http.yml rename to rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.info/20_info_http.yml index b6c2dcdff8aa2..f238e5116e146 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/info.http/10_info_http.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.info/20_info_http.yml @@ -1,5 +1,5 @@ --- -"HTTP Stats": +"Cluster HTTP Info": - skip: version: " - 8.8.99" reason: "/_info/http only available from v8.9" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/20_synonyms_get.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/20_synonyms_get.yml new file mode 100644 index 0000000000000..8769dc3742d48 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/20_synonyms_get.yml @@ -0,0 +1,129 @@ +setup: + - do: + synonyms.put: + synonyms_set: test-get-synonyms + body: + synonyms_set: + - synonyms: "hello, hi" + id: "test-id-1" + - synonyms: "bye => goodbye" + id: "test-id-2" + - synonyms: "test => check" + id: "test-id-3" + +--- +"Get synonyms set": + - skip: + version: " - 8.8.99" + reason: Introduced in 8.9.0 + - do: + synonyms.get: + synonyms_set: test-get-synonyms + + - match: + count: 3 + - match: + synonyms_set: + - synonyms: "hello, hi" + id: "test-id-1" + - synonyms: "bye => goodbye" + id: "test-id-2" + - synonyms: "test => check" + id: "test-id-3" + +--- +"Get synonyms set - not found": + - skip: + version: " - 8.8.99" + reason: Introduced in 8.9.0 + - do: + catch: missing + synonyms.get: + synonyms_set: unknown-synonym-set + +--- +"Pagination - size": + - skip: + version: " - 8.8.99" + reason: Introduced in 8.9.0 + - do: + synonyms.get: + synonyms_set: test-get-synonyms + size: 2 + + - match: + count: 3 + - match: + synonyms_set: + - synonyms: "hello, hi" + id: "test-id-1" + - synonyms: "bye => goodbye" + id: "test-id-2" + +--- +"Pagination - from": + - skip: + version: " - 8.8.99" + reason: Introduced in 8.9.0 + - do: + synonyms.get: + synonyms_set: test-get-synonyms + from: 1 + + - match: + count: 3 + - match: + synonyms_set: + - synonyms: "bye => goodbye" + id: "test-id-2" + - synonyms: "test => check" + id: "test-id-3" + + +--- +"Synonyms set with same IDs": + - skip: + version: " - 8.8.99" + reason: Introduced in 8.9.0 + + - do: + synonyms.put: + synonyms_set: test-get-synonyms-same-ids + body: + synonyms_set: + - synonyms: "another, different" + id: "test-id-1" + - synonyms: "same => equal" + id: "test-id-2" + - synonyms: "work, hard" + id: "test-id-3" + + - do: + synonyms.get: + synonyms_set: test-get-synonyms + + - match: + count: 3 + - match: + synonyms_set: + - synonyms: "hello, hi" + id: "test-id-1" + - synonyms: "bye => goodbye" + id: "test-id-2" + - synonyms: "test => check" + id: "test-id-3" + + - do: + synonyms.get: + synonyms_set: test-get-synonyms-same-ids + + - match: + count: 3 + - match: + synonyms_set: + - synonyms: "another, different" + id: "test-id-1" + - synonyms: "same => equal" + id: "test-id-2" + - synonyms: "work, hard" + id: "test-id-3" diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/RejectionActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/RejectionActionIT.java index 178175b8b5554..6678cc5ac2701 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/RejectionActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/RejectionActionIT.java @@ -37,7 +37,8 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { .put("thread_pool.search.size", 1) .put("thread_pool.search.queue_size", 1) .put("thread_pool.write.size", 1) - .put("thread_pool.write.queue_size", 1) + // Needs to be 2 since we have concurrent indexing and global checkpoint syncs + .put("thread_pool.write.queue_size", 2) .put("thread_pool.get.size", 1) .put("thread_pool.get.queue_size", 1) .build(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index 30ce9364aa9c3..8945ef7596be8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -132,20 +132,24 @@ public void testTaskCounts() { assertThat(response.getTasks().size(), greaterThanOrEqualTo(cluster().numDataNodes())); } - public void testMasterNodeOperationTasks() { + public void testMasterNodeOperationTasks() throws Exception { registerTaskManagerListeners(ClusterHealthAction.NAME); // First run the health on the master node - should produce only one task on the master node internalCluster().masterClient().admin().cluster().prepareHealth().get(); assertEquals(1, numberOfEvents(ClusterHealthAction.NAME, Tuple::v1)); // counting only registration events - assertEquals(1, numberOfEvents(ClusterHealthAction.NAME, event -> event.v1() == false)); // counting only unregistration events + // counting only unregistration events + // When checking unregistration events there might be some delay since receiving the response from the cluster doesn't + // guarantee that the task has been unregistered. + assertBusy(() -> assertEquals(1, numberOfEvents(ClusterHealthAction.NAME, event -> event.v1() == false))); resetTaskManagerListeners(ClusterHealthAction.NAME); // Now run the health on a non-master node - should produce one task on master and one task on another node internalCluster().nonMasterClient().admin().cluster().prepareHealth().get(); assertEquals(2, numberOfEvents(ClusterHealthAction.NAME, Tuple::v1)); // counting only registration events - assertEquals(2, numberOfEvents(ClusterHealthAction.NAME, event -> event.v1() == false)); // counting only unregistration events + // counting only unregistration events + assertBusy(() -> assertEquals(2, numberOfEvents(ClusterHealthAction.NAME, event -> event.v1() == false))); List tasks = findEvents(ClusterHealthAction.NAME, Tuple::v1); // Verify that one of these tasks is a parent of another task diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java index 240996aedd88c..20a9bc9781a37 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -160,7 +160,10 @@ public void testDurableFlagHasEffect() { Translog.Location lastWriteLocation = tlog.getLastWriteLocation(); try { // the lastWriteLocaltion has a Integer.MAX_VALUE size so we have to create a new one - return tlog.ensureSynced(new Translog.Location(lastWriteLocation.generation, lastWriteLocation.translogLocation, 0)); + return tlog.ensureSynced( + new Translog.Location(lastWriteLocation.generation, lastWriteLocation.translogLocation, 0), + SequenceNumbers.UNASSIGNED_SEQ_NO + ); } catch (IOException e) { throw new UncheckedIOException(e); } @@ -647,7 +650,7 @@ public static final IndexShard newIndexShard( null, Collections.emptyList(), Arrays.asList(listeners), - () -> {}, + IndexShardTestCase.NOOP_GCP_SYNCER, RetentionLeaseSyncer.EMPTY, cbs, IndexModule.DEFAULT_SNAPSHOT_COMMIT_SUPPLIER, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedRestoreIT.java index 4ff3cc298a10d..4bebbf5cdbd95 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedRestoreIT.java @@ -97,6 +97,6 @@ public void testAbortedRestoreAlsoAbortFileRestores() throws Exception { } private static void waitForMaxActiveSnapshotThreads(final String node, final Matcher matcher) throws Exception { - assertBusy(() -> assertThat(snapshotThreadPoolStats(node).getActive(), matcher), 30L, TimeUnit.SECONDS); + assertBusy(() -> assertThat(snapshotThreadPoolStats(node).active(), matcher), 30L, TimeUnit.SECONDS); } } diff --git a/server/src/main/java/org/elasticsearch/TransportVersion.java b/server/src/main/java/org/elasticsearch/TransportVersion.java index 4739325cf48ff..7a57eada23375 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersion.java +++ b/server/src/main/java/org/elasticsearch/TransportVersion.java @@ -125,12 +125,14 @@ private static TransportVersion registerTransportVersion(int id, String uniqueId public static final TransportVersion V_8_500_002 = registerTransportVersion(8_500_002, "055dd314-ff40-4313-b4c6-9fccddfa42a8"); public static final TransportVersion V_8_500_003 = registerTransportVersion(8_500_003, "30adbe0c-8614-40dd-81b5-44e9c657bb77"); public static final TransportVersion V_8_500_004 = registerTransportVersion(8_500_004, "6a00db6a-fd66-42a9-97ea-f6cc53169110"); + public static final TransportVersion V_8_500_005 = registerTransportVersion(8_500_005, "65370d2a-d936-4383-a2e0-8403f708129b"); + public static final TransportVersion V_8_500_006 = registerTransportVersion(8_500_006, "7BB5621A-80AC-425F-BA88-75543C442F23"); /** * Reference to the most recent transport version. * This should be the transport version with the highest id. */ - public static final TransportVersion CURRENT = V_8_500_004; + public static final TransportVersion CURRENT = V_8_500_006; /** * Reference to the earliest compatible transport version to this version of the codebase. diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 83af65dbd621c..83350471a4a83 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -138,9 +138,9 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_8_6_2 = new Version(8_06_02_99, org.apache.lucene.util.Version.LUCENE_9_4_2); public static final Version V_8_7_0 = new Version(8_07_00_99, org.apache.lucene.util.Version.LUCENE_9_5_0); public static final Version V_8_7_1 = new Version(8_07_01_99, org.apache.lucene.util.Version.LUCENE_9_5_0); - public static final Version V_8_7_2 = new Version(8_07_02_99, org.apache.lucene.util.Version.LUCENE_9_5_0); public static final Version V_8_8_0 = new Version(8_08_00_99, org.apache.lucene.util.Version.LUCENE_9_6_0); - public static final Version V_8_9_0 = new Version(8_09_00_99, org.apache.lucene.util.Version.LUCENE_9_6_0); + public static final Version V_8_8_1 = new Version(8_08_01_99, org.apache.lucene.util.Version.LUCENE_9_6_0); + public static final Version V_8_9_0 = new Version(8_09_00_99, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version CURRENT = V_8_9_0; private static final NavigableMap VERSION_IDS; diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index db0821805aea0..ad67d94c728b6 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -249,7 +249,9 @@ import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.action.synonyms.GetSynonymsAction; import org.elasticsearch.action.synonyms.PutSynonymsAction; +import org.elasticsearch.action.synonyms.TransportGetSynonymsAction; import org.elasticsearch.action.synonyms.TransportPutSynonymsAction; import org.elasticsearch.action.termvectors.MultiTermVectorsAction; import org.elasticsearch.action.termvectors.TermVectorsAction; @@ -434,6 +436,7 @@ import org.elasticsearch.rest.action.search.RestMultiSearchAction; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.rest.action.search.RestSearchScrollAction; +import org.elasticsearch.rest.action.synonyms.RestGetSynonymsAction; import org.elasticsearch.rest.action.synonyms.RestPutSynonymsAction; import org.elasticsearch.synonyms.SynonymsAPI; import org.elasticsearch.tasks.Task; @@ -779,6 +782,7 @@ public void reg // Synonyms if (SynonymsAPI.isEnabled()) { actions.register(PutSynonymsAction.INSTANCE, TransportPutSynonymsAction.class); + actions.register(GetSynonymsAction.INSTANCE, TransportGetSynonymsAction.class); } return unmodifiableMap(actions.getRegistry()); @@ -992,6 +996,7 @@ public void initRestHandlers(Supplier nodesInCluster) { // Synonyms if (SynonymsAPI.isEnabled()) { registerHandler.accept(new RestPutSynonymsAction()); + registerHandler.accept(new RestGetSynonymsAction()); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java index 3c3c097b605d5..dec9fc0111f29 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java @@ -311,7 +311,12 @@ public Iterator toXContentChunked(ToXContent.Params outerP ifPresent(getOs()).toXContent(builder, params); ifPresent(getProcess()).toXContent(builder, params); ifPresent(getJvm()).toXContent(builder, params); - ifPresent(getThreadPool()).toXContent(builder, params); + return builder; + }), + + ifPresent(getThreadPool()).toXContentChunked(outerParams), + + Iterators.single((builder, params) -> { ifPresent(getFs()).toXContent(builder, params); return builder; }), diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java index 5b0194c81283e..597c9821e48ec 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java @@ -14,9 +14,12 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; import java.io.IOException; +import java.util.Map; import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.common.regex.Regex.simpleMatch; @@ -119,4 +122,8 @@ public ListTasksRequest setDescriptions(String... descriptions) { return this; } + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, "", parentTaskId, headers); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java index d3a9ab80db5ca..eaaebb5d2bb9c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.RemovedTaskListener; import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -76,7 +77,13 @@ protected void taskOperation(CancellableTask actionTask, ListTasksRequest reques } @Override - protected void processTasks(ListTasksRequest request, ActionListener> nodeOperation) { + protected void doExecute(Task task, ListTasksRequest request, ActionListener listener) { + assert task instanceof CancellableTask; + super.doExecute(task, request, listener); + } + + @Override + protected void processTasks(CancellableTask nodeTask, ListTasksRequest request, ActionListener> nodeOperation) { if (request.getWaitForCompletion()) { final ListenableActionFuture> future = new ListenableActionFuture<>(); final List processedTasks = new ArrayList<>(); @@ -137,8 +144,9 @@ protected void processTasks(ListTasksRequest request, ActionListener> threadPool, ThreadPool.Names.SAME ); + nodeTask.addListener(() -> future.onFailure(new TaskCancelledException("task cancelled"))); } else { - super.processTasks(request, nodeOperation); + super.processTasks(nodeTask, request, nodeOperation); } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java index d7c00bfb00f65..a3335a02cdaad 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -26,7 +26,6 @@ import org.elasticsearch.index.seqno.RetentionLeaseStats; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; @@ -110,11 +109,6 @@ protected void shardOperation(IndicesStatsRequest request, ShardRouting shardRou assert task instanceof CancellableTask; IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()); IndexShard indexShard = indexService.getShard(shardRouting.shardId().id()); - // if we don't have the routing entry yet, we need it stats wise, we treat it as if the shard is not ready yet - if (indexShard.routingEntry() == null) { - throw new ShardNotFoundException(indexShard.shardId()); - } - CommonStats commonStats = CommonStats.getShardLevelStats(indicesService.getIndicesQueryCache(), indexShard, request.flags()); CommitStats commitStats; SeqNoStats seqNoStats; diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java index ed0a4e8973972..05da8963e4519 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java @@ -9,12 +9,20 @@ package org.elasticsearch.action.get; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.NoShardAvailableActionException; +import org.elasticsearch.action.admin.indices.refresh.TransportShardRefreshAction; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.replication.BasicReplicationRequest; import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.PlainShardIterator; import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.Writeable; @@ -24,6 +32,8 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.ExecutorSelector; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -34,8 +44,11 @@ */ public class TransportGetAction extends TransportSingleShardAction { + private static final Logger logger = LogManager.getLogger(TransportGetAction.class); + private final IndicesService indicesService; private final ExecutorSelector executorSelector; + private final NodeClient client; @Inject public TransportGetAction( @@ -45,7 +58,8 @@ public TransportGetAction( ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - ExecutorSelector executorSelector + ExecutorSelector executorSelector, + NodeClient client ) { super( GetAction.NAME, @@ -59,6 +73,7 @@ public TransportGetAction( ); this.indicesService = indicesService; this.executorSelector = executorSelector; + this.client = client; // register the internal TransportGetFromTranslogAction new TransportGetFromTranslogAction(transportService, indicesService, actionFilters); } @@ -78,7 +93,10 @@ protected ShardIterator shards(ClusterState state, InternalRequest request) { request.request().routing(), request.request().preference() ); - return clusterService.operationRouting().useOnlyPromotableShardsForStateless(iterator); + if (iterator == null) { + return null; + } + return new PlainShardIterator(iterator.shardId(), iterator.getShardRoutings().stream().filter(ShardRouting::isSearchable).toList()); } @Override @@ -91,6 +109,12 @@ protected void resolveRequest(ClusterState state, InternalRequest request) { protected void asyncShardOperation(GetRequest request, ShardId shardId, ActionListener listener) throws IOException { IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexShard indexShard = indexService.getShard(shardId.id()); + if (indexShard.routingEntry().isPromotableToPrimary() == false) { + handleGetOnUnpromotableShard(request, indexShard, listener); + return; + } + assert DiscoveryNode.isStateless(clusterService.getSettings()) == false + : "A TransportGetAction should always be handled by a search shard in Stateless"; if (request.realtime()) { // we are not tied to a refresh cycle here anyway asyncGet(request, shardId, listener); } else { @@ -148,6 +172,66 @@ private void asyncGet(GetRequest request, ShardId shardId, ActionListener listener) + throws IOException { + ShardId shardId = indexShard.shardId(); + DiscoveryNode node = getCurrentNodeOfPrimary(shardId); + if (request.refresh()) { + logger.trace("send refresh action for shard {} to node {}", shardId, node.getId()); + var refreshRequest = new BasicReplicationRequest(shardId); + refreshRequest.setParentTask(request.getParentTask()); + client.executeLocally( + TransportShardRefreshAction.TYPE, + refreshRequest, + ActionListener.wrap(replicationResponse -> super.asyncShardOperation(request, shardId, listener), listener::onFailure) + ); + } else if (request.realtime()) { + TransportGetFromTranslogAction.Request getFromTranslogRequest = new TransportGetFromTranslogAction.Request(request, shardId); + getFromTranslogRequest.setParentTask(request.getParentTask()); + transportService.sendRequest( + node, + TransportGetFromTranslogAction.NAME, + getFromTranslogRequest, + new ActionListenerResponseHandler<>(listener.delegateFailure((l, r) -> { + if (r.getResult() != null) { + logger.debug("received result for real-time get for id '{}' from promotable shard", request.id()); + l.onResponse(new GetResponse(r.getResult())); + } else { + logger.debug( + "no result for real-time get for id '{}' from promotable shard (segment generation to wait for: {})", + request.id(), + r.segmentGeneration() + ); + if (r.segmentGeneration() == -1) { + // Nothing to wait for (no previous unsafe generation), just handle the Get locally. + ActionRunnable.supply(listener, () -> shardOperation(request, shardId)).run(); + } else { + assert r.segmentGeneration() > -1L; + indexShard.waitForSegmentGeneration( + r.segmentGeneration(), + ActionListener.wrap(aLong -> super.asyncShardOperation(request, shardId, listener), listener::onFailure) + ); + } + } + }), TransportGetFromTranslogAction.Response::new, getExecutor(request, shardId)) + ); + } else { + // A non-real-time get with no explicit refresh requested. + super.asyncShardOperation(request, shardId, listener); + } + } + + private DiscoveryNode getCurrentNodeOfPrimary(ShardId shardId) { + var clusterState = clusterService.state(); + var shardRoutingTable = clusterState.routingTable().shardRoutingTable(shardId); + if (shardRoutingTable.primaryShard() == null || shardRoutingTable.primaryShard().active() == false) { + throw new NoShardAvailableActionException(shardId, "primary shard is not active"); + } + DiscoveryNode node = clusterState.nodes().get(shardRoutingTable.primaryShard().currentNodeId()); + assert node != null; + return node; + } + private IndexShard getIndexShard(ShardId shardId) { IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); return indexService.getShard(shardId.id()); diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 28d5616f77094..501b0edaae4ae 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -403,21 +403,23 @@ void executeRequest( } static void adjustSearchType(SearchRequest searchRequest, boolean singleShard) { - // optimize search type for cases where there is only one shard group to search on - if (singleShard) { - // if we only have one group, then we always want Q_T_F, no need for DFS, and no need to do THEN since we hit one shard - searchRequest.searchType(QUERY_THEN_FETCH); + // if there's a kNN search, always use DFS_QUERY_THEN_FETCH + if (searchRequest.hasKnnSearch()) { + searchRequest.searchType(DFS_QUERY_THEN_FETCH); + return; } // if there's only suggest, disable request cache and always use QUERY_THEN_FETCH if (searchRequest.isSuggestOnly()) { searchRequest.requestCache(false); searchRequest.searchType(QUERY_THEN_FETCH); + return; } - // if there's a kNN search, always use DFS_QUERY_THEN_FETCH - if (searchRequest.hasKnnSearch()) { - searchRequest.searchType(DFS_QUERY_THEN_FETCH); + // optimize search type for cases where there is only one shard group to search on + if (singleShard) { + // if we only have one group, then we always want Q_T_F, no need for DFS, and no need to do THEN since we hit one shard + searchRequest.searchType(QUERY_THEN_FETCH); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/CancellableFanOut.java b/server/src/main/java/org/elasticsearch/action/support/CancellableFanOut.java new file mode 100644 index 0000000000000..7c6f022eb7e83 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/support/CancellableFanOut.java @@ -0,0 +1,159 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.support; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.util.concurrent.RunOnce; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; + +import java.util.Iterator; + +/** + * Allows an action to fan-out to several sub-actions and accumulate their results, but which reacts to a cancellation by releasing all + * references to itself, and hence the partially-accumulated results, allowing them to be garbage-collected. This is a useful protection for + * cases where the results may consume a lot of heap (e.g. stats) but the final response may be delayed by a single slow node for long + * enough that the client gives up. + *

+ * Note that it's easy to accidentally capture another reference to this class when implementing it, and this will prevent the early release + * of any accumulated results. Beware of lambdas and method references. You must test your implementation carefully (using e.g. + * {@code ReachabilityChecker}) to make sure it doesn't do this. + */ +public abstract class CancellableFanOut { + + private static final Logger logger = LogManager.getLogger(CancellableFanOut.class); + + /** + * Run the fan-out action. + * + * @param task The task to watch for cancellations. If {@code null} or not a {@link CancellableTask} then the fan-out still + * works, just without any cancellation handling. + * @param itemsIterator The items over which to fan out. Iterated on the calling thread. + * @param listener A listener for the final response, which is completed after all the fanned-out actions have completed. It is not + * completed promptly on cancellation. Completed on the thread that handles the final per-item response (or + * the calling thread if there are no items). + */ + public final void run(@Nullable Task task, Iterator itemsIterator, ActionListener listener) { + + final var cancellableTask = task instanceof CancellableTask ct ? ct : null; + + // Captures the final result as soon as it's known (either on completion or on cancellation) without necessarily completing the + // outer listener, because we do not want to complete the outer listener until all sub-tasks are complete + final var resultListener = new SubscribableListener(); + + // Completes resultListener (either on completion or on cancellation). Captures a reference to 'this', but within a 'RunOnce' so it + // is released promptly when executed. + final var resultListenerCompleter = new RunOnce(() -> { + if (cancellableTask != null && cancellableTask.notifyIfCancelled(resultListener)) { + return; + } + // It's important that we complete resultListener before returning, because otherwise there's a risk that a cancellation arrives + // later which might unexpectedly complete the final listener on a transport thread. + ActionListener.completeWith(resultListener, this::onCompletion); + }); + + // Collects the per-item listeners up so they can all be completed exceptionally on cancellation. Never completed successfully. + final var itemCancellationListener = new SubscribableListener(); + if (cancellableTask != null) { + cancellableTask.addListener(() -> { + assert cancellableTask.isCancelled(); + resultListenerCompleter.run(); + cancellableTask.notifyIfCancelled(itemCancellationListener); + }); + } + + try (var refs = new RefCountingRunnable(() -> { + // When all sub-tasks are complete, pass the result from resultListener to the outer listener. + resultListenerCompleter.run(); + // resultListener is always complete by this point, so the outer listener is completed on this thread + resultListener.addListener(listener); + })) { + while (itemsIterator.hasNext()) { + final var item = itemsIterator.next(); + + // Captures a reference to 'this', but within a 'notifyOnce' so it is released promptly when completed. + final ActionListener itemResponseListener = ActionListener.notifyOnce(new ActionListener<>() { + @Override + public void onResponse(ItemResponse itemResponse) { + onItemResponse(item, itemResponse); + } + + @Override + public void onFailure(Exception e) { + if (cancellableTask != null && cancellableTask.isCancelled()) { + // Completed on cancellation so it is released promptly, but there's no need to handle the exception. + return; + } + onItemFailure(item, e); + } + + @Override + public String toString() { + return "[" + CancellableFanOut.this + "][" + item + "]"; + } + }); + + if (cancellableTask != null) { + if (cancellableTask.isCancelled()) { + return; + } + + // Register this item's listener for prompt cancellation notification. + itemCancellationListener.addListener(itemResponseListener); + } + + // Process the item, capturing a ref to make sure the outer listener is completed after this item is processed. + sendItemRequest(item, ActionListener.releaseAfter(itemResponseListener, refs.acquire())); + } + } catch (Exception e) { + // NB the listener may have been completed already (by exiting this try block) so this exception may not be sent to the caller, + // but we cannot do anything else with it; an exception here is a bug anyway. + logger.error("unexpected failure in [" + this + "]", e); + assert false : e; + throw e; + } + } + + /** + * Run the action (typically by sending a transport request) for an individual item. Called in sequence on the thread that invoked + * {@link #run}. May not be called for every item if the task is cancelled during the iteration. + *

+ * Note that it's easy to accidentally capture another reference to this class when implementing this method, and that will prevent the + * early release of any accumulated results. Beware of lambdas, and test carefully. + */ + protected abstract void sendItemRequest(Item item, ActionListener listener); + + /** + * Handle a successful response for an item. May be called concurrently for multiple items. Not called if the task is cancelled. + *

+ * Note that it's easy to accidentally capture another reference to this class when implementing this method, and that will prevent the + * early release of any accumulated results. Beware of lambdas, and test carefully. + */ + protected abstract void onItemResponse(Item item, ItemResponse itemResponse); + + /** + * Handle a failure for an item. May be called concurrently for multiple items. Not called if the task is cancelled. + *

+ * Note that it's easy to accidentally capture another reference to this class when implementing this method, and that will prevent the + * early release of any accumulated results. Beware of lambdas, and test carefully. + */ + protected abstract void onItemFailure(Item item, Exception e); + + /** + * Called when responses for all items have been processed, on the thread that processed the last per-item response. Not called if the + * task is cancelled. + *

+ * Note that it's easy to accidentally capture another reference to this class when implementing this method, and that will prevent the + * early release of any accumulated results. Beware of lambdas, and test carefully. + */ + protected abstract FinalResponse onCompletion() throws Exception; +} diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index f7c4fad29fdfa..aec75e3300481 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -18,11 +18,11 @@ import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.CancellableFanOut; import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastRequest; @@ -37,9 +37,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.util.concurrent.ListenableFuture; -import org.elasticsearch.common.util.concurrent.RunOnce; -import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.transport.TransportChannel; @@ -280,100 +277,18 @@ private void executeAsCoordinatingNode( ResponseFactory responseFactory, ActionListener listener ) { - final var mutex = new Object(); - final var shardResponses = new ArrayList(availableShardCount); - final var exceptions = new ArrayList(0); - final var totalShards = new AtomicInteger(unavailableShardCount); - final var successfulShards = new AtomicInteger(0); - - final var resultListener = new ListenableFuture(); - final var resultListenerCompleter = new RunOnce(() -> { - if (task instanceof CancellableTask cancellableTask) { - if (cancellableTask.notifyIfCancelled(resultListener)) { - return; - } - } - // ref releases all happen-before here so no need to be synchronized - resultListener.onResponse( - responseFactory.newResponse(totalShards.get(), successfulShards.get(), exceptions.size(), shardResponses, exceptions) - ); - }); - - final var nodeFailureListeners = new ListenableFuture(); - if (task instanceof CancellableTask cancellableTask) { - cancellableTask.addListener(() -> { - assert cancellableTask.isCancelled(); - resultListenerCompleter.run(); - cancellableTask.notifyIfCancelled(nodeFailureListeners); - }); - } - - final var transportRequestOptions = TransportRequestOptions.timeout(request.timeout()); - - try (var refs = new RefCountingRunnable(() -> { - resultListener.addListener(listener); - resultListenerCompleter.run(); - })) { - for (final var entry : shardsByNodeId.entrySet()) { + new CancellableFanOut>, NodeResponse, Response>() { + final ArrayList shardResponses = new ArrayList<>(availableShardCount); + final ArrayList exceptions = new ArrayList<>(0); + final AtomicInteger totalShards = new AtomicInteger(unavailableShardCount); + final AtomicInteger successfulShards = new AtomicInteger(0); + final TransportRequestOptions transportRequestOptions = TransportRequestOptions.timeout(request.timeout()); + + @Override + protected void sendItemRequest(Map.Entry> entry, ActionListener listener) { final var node = nodes.get(entry.getKey()); final var shards = entry.getValue(); - final ActionListener nodeResponseListener = ActionListener.notifyOnce(new ActionListener() { - @Override - public void onResponse(NodeResponse nodeResponse) { - synchronized (mutex) { - shardResponses.addAll(nodeResponse.getResults()); - } - totalShards.addAndGet(nodeResponse.getTotalShards()); - successfulShards.addAndGet(nodeResponse.getSuccessfulShards()); - - for (BroadcastShardOperationFailedException exception : nodeResponse.getExceptions()) { - if (TransportActions.isShardNotAvailableException(exception)) { - assert node.getVersion().before(Version.V_8_7_0) : node; // we stopped sending these ignored exceptions - } else { - synchronized (mutex) { - exceptions.add( - new DefaultShardOperationFailedException( - exception.getShardId().getIndexName(), - exception.getShardId().getId(), - exception - ) - ); - } - } - } - } - - @Override - public void onFailure(Exception e) { - if (task instanceof CancellableTask cancellableTask && cancellableTask.isCancelled()) { - return; - } - - logger.debug(() -> format("failed to execute [%s] on node [%s]", actionName, node), e); - - final var failedNodeException = new FailedNodeException(node.getId(), "Failed node [" + node.getId() + "]", e); - synchronized (mutex) { - for (ShardRouting shard : shards) { - exceptions.add( - new DefaultShardOperationFailedException(shard.getIndexName(), shard.getId(), failedNodeException) - ); - } - } - - totalShards.addAndGet(shards.size()); - } - - @Override - public String toString() { - return "[" + actionName + "][" + node.descriptionWithoutAttributes() + "]"; - } - }); - - if (task instanceof CancellableTask) { - nodeFailureListeners.addListener(nodeResponseListener); - } - final var nodeRequest = new NodeRequest(request, shards, node.getId()); if (task != null) { nodeRequest.setParentTask(clusterService.localNode().getId(), task.getId()); @@ -384,15 +299,74 @@ public String toString() { transportNodeBroadcastAction, nodeRequest, transportRequestOptions, - new ActionListenerResponseHandler<>( - ActionListener.releaseAfter(nodeResponseListener, refs.acquire()), - NodeResponse::new - ) + new ActionListenerResponseHandler<>(listener, nodeResponseReader) ); } - } + + @Override + protected void onItemResponse(Map.Entry> entry, NodeResponse nodeResponse) { + final var node = nodes.get(entry.getKey()); + synchronized (this) { + shardResponses.addAll(nodeResponse.getResults()); + } + totalShards.addAndGet(nodeResponse.getTotalShards()); + successfulShards.addAndGet(nodeResponse.getSuccessfulShards()); + + for (BroadcastShardOperationFailedException exception : nodeResponse.getExceptions()) { + if (TransportActions.isShardNotAvailableException(exception)) { + assert node.getVersion().before(Version.V_8_7_0) : node; // we stopped sending these ignored exceptions + } else { + synchronized (this) { + exceptions.add( + new DefaultShardOperationFailedException( + exception.getShardId().getIndexName(), + exception.getShardId().getId(), + exception + ) + ); + } + } + } + } + + @Override + protected void onItemFailure(Map.Entry> entry, Exception e) { + final var node = nodes.get(entry.getKey()); + final var shards = entry.getValue(); + logger.debug(() -> format("failed to execute [%s] on node [%s]", actionName, node), e); + + final var failedNodeException = new FailedNodeException(node.getId(), "Failed node [" + node.getId() + "]", e); + synchronized (this) { + for (ShardRouting shard : shards) { + exceptions.add(new DefaultShardOperationFailedException(shard.getIndexName(), shard.getId(), failedNodeException)); + } + } + + totalShards.addAndGet(shards.size()); + } + + @Override + protected Response onCompletion() { + // ref releases all happen-before here so no need to be synchronized + return responseFactory.newResponse( + totalShards.get(), + successfulShards.get(), + exceptions.size(), + shardResponses, + exceptions + ); + } + + @Override + public String toString() { + return actionName; + } + }.run(task, shardsByNodeId.entrySet().iterator(), listener); } + // not an inline method reference to avoid capturing CancellableFanOut.this. + private final Writeable.Reader nodeResponseReader = NodeResponse::new; + class BroadcastByNodeTransportRequestHandler implements TransportRequestHandler { @Override public void messageReceived(final NodeRequest request, TransportChannel channel, Task task) throws Exception { @@ -415,87 +389,51 @@ private void executeAsDataNode( ) { logger.trace("[{}] executing operation on [{}] shards", actionName, shards.size()); - final var results = new ArrayList(shards.size()); - final var exceptions = new ArrayList(0); + new CancellableFanOut() { - final var resultListener = new ListenableFuture(); - final var resultListenerCompleter = new RunOnce(() -> { - if (task instanceof CancellableTask cancellableTask) { - if (cancellableTask.notifyIfCancelled(resultListener)) { - return; - } + final ArrayList results = new ArrayList<>(shards.size()); + final ArrayList exceptions = new ArrayList<>(0); + + @Override + protected void sendItemRequest(ShardRouting shardRouting, ActionListener listener) { + logger.trace(() -> format("[%s] executing operation for shard [%s]", actionName, shardRouting.shortSummary())); + ActionRunnable.wrap(listener, l -> shardOperation(request, shardRouting, task, l)).run(); } - // ref releases all happen-before here so no need to be synchronized - resultListener.onResponse(new NodeResponse(nodeId, shards.size(), results, exceptions)); - }); - - final var shardFailureListeners = new ListenableFuture(); - if (task instanceof CancellableTask cancellableTask) { - cancellableTask.addListener(() -> { - assert cancellableTask.isCancelled(); - resultListenerCompleter.run(); - cancellableTask.notifyIfCancelled(shardFailureListeners); - }); - } - try (var refs = new RefCountingRunnable(() -> { - resultListener.addListener(listener); - resultListenerCompleter.run(); - })) { - for (final var shardRouting : shards) { - if (task instanceof CancellableTask cancellableTask && cancellableTask.isCancelled()) { - return; + @Override + protected void onItemResponse(ShardRouting shardRouting, ShardOperationResult shardOperationResult) { + synchronized (results) { + results.add(shardOperationResult); } + } - final ActionListener shardListener = ActionListener.notifyOnce(new ActionListener<>() { - @Override - public void onResponse(ShardOperationResult shardOperationResult) { - logger.trace(() -> format("[%s] completed operation for shard [%s]", actionName, shardRouting.shortSummary())); - synchronized (results) { - results.add(shardOperationResult); - } - } - - @Override - public void onFailure(Exception e) { - if (task instanceof CancellableTask cancellableTask && cancellableTask.isCancelled()) { - return; - } - logger.log( - TransportActions.isShardNotAvailableException(e) ? Level.TRACE : Level.DEBUG, - () -> format("[%s] failed to execute operation for shard [%s]", actionName, shardRouting.shortSummary()), - e + @Override + protected void onItemFailure(ShardRouting shardRouting, Exception e) { + logger.log( + TransportActions.isShardNotAvailableException(e) ? Level.TRACE : Level.DEBUG, + () -> format("[%s] failed to execute operation for shard [%s]", actionName, shardRouting.shortSummary()), + e + ); + if (TransportActions.isShardNotAvailableException(e) == false) { + synchronized (exceptions) { + exceptions.add( + new BroadcastShardOperationFailedException(shardRouting.shardId(), "operation " + actionName + " failed", e) ); - if (TransportActions.isShardNotAvailableException(e) == false) { - synchronized (exceptions) { - exceptions.add( - new BroadcastShardOperationFailedException( - shardRouting.shardId(), - "operation " + actionName + " failed", - e - ) - ); - } - } } - - @Override - public String toString() { - return "[" + actionName + "][" + shardRouting + "]"; - } - }); - - if (task instanceof CancellableTask) { - shardFailureListeners.addListener(shardListener); } + } - logger.trace(() -> format("[%s] executing operation for shard [%s]", actionName, shardRouting.shortSummary())); - ActionRunnable.wrap( - ActionListener.releaseAfter(shardListener, refs.acquire()), - l -> shardOperation(request, shardRouting, task, l) - ).run(); + @Override + protected NodeResponse onCompletion() { + // ref releases all happen-before here so no need to be synchronized + return new NodeResponse(nodeId, shards.size(), results, exceptions); } - } + + @Override + public String toString() { + return actionName; + } + }.run(task, shards.iterator(), listener); } class NodeRequest extends TransportRequest implements IndicesRequest { diff --git a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java index 5f805efe0c176..fedd357501ac1 100644 --- a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java @@ -15,16 +15,15 @@ import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.CancellableFanOut; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.util.concurrent.ListenableFuture; -import org.elasticsearch.common.util.concurrent.RunOnce; -import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; @@ -131,84 +130,64 @@ protected void doExecute(Task task, NodesRequest request, ActionListener(request.concreteNodes().length); - final var exceptions = new ArrayList(0); + new CancellableFanOut, Exception>>() { - final var resultListener = new ListenableFuture(); - final var resultListenerCompleter = new RunOnce(() -> { - if (task instanceof CancellableTask cancellableTask) { - if (cancellableTask.notifyIfCancelled(resultListener)) { - return; - } - } - // ref releases all happen-before here so no need to be synchronized - threadPool.executor(finalExecutor) - .execute(ActionRunnable.wrap(resultListener, l -> newResponseAsync(task, request, responses, exceptions, l))); - }); - - final var nodeCancellationListener = new ListenableFuture(); // collects node listeners & completes them if cancelled - if (task instanceof CancellableTask cancellableTask) { - cancellableTask.addListener(() -> { - assert cancellableTask.isCancelled(); - resultListenerCompleter.run(); - cancellableTask.notifyIfCancelled(nodeCancellationListener); - }); - } - - final var transportRequestOptions = TransportRequestOptions.timeout(request.timeout()); - - try (var refs = new RefCountingRunnable(() -> { - resultListener.addListener(listener); - resultListenerCompleter.run(); - })) { - for (final var node : request.concreteNodes()) { - final ActionListener nodeResponseListener = ActionListener.notifyOnce(new ActionListener<>() { - @Override - public void onResponse(NodeResponse nodeResponse) { - synchronized (responses) { - responses.add(nodeResponse); - } - } + final ArrayList responses = new ArrayList<>(request.concreteNodes().length); + final ArrayList exceptions = new ArrayList<>(0); - @Override - public void onFailure(Exception e) { - if (task instanceof CancellableTask cancellableTask && cancellableTask.isCancelled()) { - return; - } - - logger.debug(() -> format("failed to execute [%s] on node [%s]", actionName, node), e); - synchronized (exceptions) { - exceptions.add(new FailedNodeException(node.getId(), "Failed node [" + node.getId() + "]", e)); - } - } - - @Override - public String toString() { - return "[" + actionName + "][" + node.descriptionWithoutAttributes() + "]"; - } - }); - - if (task instanceof CancellableTask) { - nodeCancellationListener.addListener(nodeResponseListener); - } + final TransportRequestOptions transportRequestOptions = TransportRequestOptions.timeout(request.timeout()); + @Override + protected void sendItemRequest(DiscoveryNode discoveryNode, ActionListener listener) { final var nodeRequest = newNodeRequest(request); if (task != null) { nodeRequest.setParentTask(clusterService.localNode().getId(), task.getId()); } transportService.sendRequest( - node, + discoveryNode, transportNodeAction, nodeRequest, transportRequestOptions, - new ActionListenerResponseHandler<>( - ActionListener.releaseAfter(nodeResponseListener, refs.acquire()), - in -> newNodeResponse(in, node) - ) + new ActionListenerResponseHandler<>(listener, nodeResponseReader(discoveryNode)) ); } - } + + @Override + protected void onItemResponse(DiscoveryNode discoveryNode, NodeResponse nodeResponse) { + synchronized (responses) { + responses.add(nodeResponse); + } + } + + @Override + protected void onItemFailure(DiscoveryNode discoveryNode, Exception e) { + logger.debug(() -> format("failed to execute [%s] on node [%s]", actionName, discoveryNode), e); + synchronized (exceptions) { + exceptions.add(new FailedNodeException(discoveryNode.getId(), "Failed node [" + discoveryNode.getId() + "]", e)); + } + } + + @Override + protected CheckedConsumer, Exception> onCompletion() { + // ref releases all happen-before here so no need to be synchronized + return l -> newResponseAsync(task, request, responses, exceptions, l); + } + + @Override + public String toString() { + return actionName; + } + }.run( + task, + Iterators.forArray(request.concreteNodes()), + listener.delegateFailure((l, r) -> threadPool.executor(finalExecutor).execute(ActionRunnable.wrap(l, r))) + ); + } + + private Writeable.Reader nodeResponseReader(DiscoveryNode discoveryNode) { + // not an inline lambda to avoid capturing CancellableFanOut.this. + return in -> TransportNodesAction.this.newNodeResponse(in, discoveryNode); } /** diff --git a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java index daedacf6fb4ad..81f8f575f528d 100644 --- a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java @@ -10,40 +10,36 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.NoSuchNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.CancellableFanOut; import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.core.Tuple; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.List; import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReferenceArray; - -import static java.util.Collections.emptyList; /** * The base class for transport actions that are interacting with currently running tasks. @@ -85,67 +81,113 @@ protected TransportTasksAction( @Override protected void doExecute(Task task, TasksRequest request, ActionListener listener) { - new AsyncAction(task, request, listener).start(); - } + final var discoveryNodes = clusterService.state().nodes(); + final String[] nodeIds = resolveNodes(request, discoveryNodes); + + new CancellableFanOut() { + final ArrayList taskResponses = new ArrayList<>(); + final ArrayList taskOperationFailures = new ArrayList<>(); + final ArrayList failedNodeExceptions = new ArrayList<>(); + final TransportRequestOptions transportRequestOptions = TransportRequestOptions.timeout(request.getTimeout()); + + @Override + protected void sendItemRequest(String nodeId, ActionListener listener) { + final var discoveryNode = discoveryNodes.get(nodeId); + if (discoveryNode == null) { + listener.onFailure(new NoSuchNodeException(nodeId)); + return; + } + + transportService.sendChildRequest( + discoveryNode, + transportNodeAction, + new NodeTaskRequest(request), + task, + transportRequestOptions, + new ActionListenerResponseHandler<>(listener, nodeResponseReader) + ); + } + + @Override + protected void onItemResponse(String nodeId, NodeTasksResponse nodeTasksResponse) { + addAllSynchronized(taskResponses, nodeTasksResponse.results); + addAllSynchronized(taskOperationFailures, nodeTasksResponse.exceptions); + } + + @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter") + private static void addAllSynchronized(List allResults, Collection response) { + if (response.isEmpty() == false) { + synchronized (allResults) { + allResults.addAll(response); + } + } + } + + @Override + protected void onItemFailure(String nodeId, Exception e) { + logger.debug(() -> Strings.format("failed to execute on node [{}]", nodeId), e); + synchronized (failedNodeExceptions) { + failedNodeExceptions.add(new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", e)); + } + } + + @Override + protected TasksResponse onCompletion() { + // ref releases all happen-before here so no need to be synchronized + return newResponse(request, taskResponses, taskOperationFailures, failedNodeExceptions); + } - private void nodeOperation(CancellableTask task, NodeTaskRequest nodeTaskRequest, ActionListener listener) { - TasksRequest request = nodeTaskRequest.tasksRequest; - processTasks(request, ActionListener.wrap(tasks -> nodeOperation(task, listener, request, tasks), listener::onFailure)); + @Override + public String toString() { + return actionName; + } + }.run(task, Iterators.forArray(nodeIds), listener); } + // not an inline method reference to avoid capturing CancellableFanOut.this. + private final Writeable.Reader nodeResponseReader = NodeTasksResponse::new; + private void nodeOperation( - CancellableTask task, + CancellableTask nodeTask, ActionListener listener, TasksRequest request, - List tasks + List operationTasks ) { - if (tasks.isEmpty()) { - listener.onResponse(new NodeTasksResponse(clusterService.localNode().getId(), emptyList(), emptyList())); - return; - } - AtomicArray> responses = new AtomicArray<>(tasks.size()); - final AtomicInteger counter = new AtomicInteger(tasks.size()); - for (int i = 0; i < tasks.size(); i++) { - final int taskIndex = i; - ActionListener taskListener = new ActionListener() { - @Override - public void onResponse(TaskResponse response) { - responses.setOnce(taskIndex, response == null ? null : new Tuple<>(response, null)); - respondIfFinished(); - } + new CancellableFanOut() { - @Override - public void onFailure(Exception e) { - responses.setOnce(taskIndex, new Tuple<>(null, e)); - respondIfFinished(); + final ArrayList results = new ArrayList<>(operationTasks.size()); + final ArrayList exceptions = new ArrayList<>(); + + @Override + protected void sendItemRequest(OperationTask operationTask, ActionListener listener) { + ActionListener.run(listener, l -> taskOperation(nodeTask, request, operationTask, l)); + } + + @Override + protected void onItemResponse(OperationTask operationTask, TaskResponse taskResponse) { + synchronized (results) { + results.add(taskResponse); } + } - private void respondIfFinished() { - if (counter.decrementAndGet() != 0) { - return; - } - List results = new ArrayList<>(); - List exceptions = new ArrayList<>(); - for (Tuple response : responses.asList()) { - if (response.v1() == null) { - assert response.v2() != null; - exceptions.add( - new TaskOperationFailure(clusterService.localNode().getId(), tasks.get(taskIndex).getId(), response.v2()) - ); - } else { - assert response.v2() == null; - results.add(response.v1()); - } - } - listener.onResponse(new NodeTasksResponse(clusterService.localNode().getId(), results, exceptions)); + @Override + protected void onItemFailure(OperationTask operationTask, Exception e) { + synchronized (exceptions) { + exceptions.add(new TaskOperationFailure(clusterService.localNode().getId(), operationTask.getId(), e)); } - }; - try { - taskOperation(task, request, tasks.get(taskIndex), taskListener); - } catch (Exception e) { - taskListener.onFailure(e); } - } + + @Override + protected NodeTasksResponse onCompletion() { + // ref releases all happen-before here so no need to be synchronized + return new NodeTasksResponse(clusterService.localNode().getId(), results, exceptions); + } + + @Override + public String toString() { + return transportNodeAction; + } + }.run(nodeTask, operationTasks.iterator(), listener); } protected String[] resolveNodes(TasksRequest request, DiscoveryNodes discoveryNodes) { @@ -156,7 +198,7 @@ protected String[] resolveNodes(TasksRequest request, DiscoveryNodes discoveryNo } } - protected void processTasks(TasksRequest request, ActionListener> nodeOperation) { + protected void processTasks(CancellableTask nodeTask, TasksRequest request, ActionListener> nodeOperation) { nodeOperation.onResponse(processTasks(request)); } @@ -192,28 +234,6 @@ protected abstract TasksResponse newResponse( List failedNodeExceptions ); - @SuppressWarnings("unchecked") - protected TasksResponse newResponse(TasksRequest request, AtomicReferenceArray responses) { - List tasks = new ArrayList<>(); - List failedNodeExceptions = new ArrayList<>(); - List taskOperationFailures = new ArrayList<>(); - for (int i = 0; i < responses.length(); i++) { - Object response = responses.get(i); - if (response instanceof FailedNodeException) { - failedNodeExceptions.add((FailedNodeException) response); - } else { - NodeTasksResponse tasksResponse = (NodeTasksResponse) response; - if (tasksResponse.results != null) { - tasks.addAll(tasksResponse.results); - } - if (tasksResponse.exceptions != null) { - taskOperationFailures.addAll(tasksResponse.exceptions); - } - } - } - return newResponse(request, tasks, taskOperationFailures, failedNodeExceptions); - } - /** * Perform the required operation on the task. It is OK start an asynchronous operation or to throw an exception but not both. * @param actionTask The related transport action task. Can be used to create a task ID to handle upstream transport cancellations. @@ -228,120 +248,19 @@ protected abstract void taskOperation( ActionListener listener ); - private class AsyncAction { - - private final TasksRequest request; - private final String[] nodesIds; - private final DiscoveryNode[] nodes; - private final ActionListener listener; - private final AtomicReferenceArray responses; - private final AtomicInteger counter = new AtomicInteger(); - private final Task task; - - private AsyncAction(Task task, TasksRequest request, ActionListener listener) { - this.task = task; - this.request = request; - this.listener = listener; - final DiscoveryNodes discoveryNodes = clusterService.state().nodes(); - this.nodesIds = resolveNodes(request, discoveryNodes); - Map nodes = discoveryNodes.getNodes(); - this.nodes = new DiscoveryNode[nodesIds.length]; - for (int i = 0; i < this.nodesIds.length; i++) { - this.nodes[i] = nodes.get(this.nodesIds[i]); - } - this.responses = new AtomicReferenceArray<>(this.nodesIds.length); - } - - private void start() { - if (nodesIds.length == 0) { - // nothing to do - try { - listener.onResponse(newResponse(request, responses)); - } catch (Exception e) { - logger.debug("failed to generate empty response", e); - listener.onFailure(e); - } - } else { - final TransportRequestOptions transportRequestOptions = TransportRequestOptions.timeout(request.getTimeout()); - for (int i = 0; i < nodesIds.length; i++) { - final String nodeId = nodesIds[i]; - final int idx = i; - final DiscoveryNode node = nodes[i]; - try { - if (node == null) { - onFailure(idx, nodeId, new NoSuchNodeException(nodeId)); - } else { - NodeTaskRequest nodeRequest = new NodeTaskRequest(request); - nodeRequest.setParentTask(clusterService.localNode().getId(), task.getId()); - transportService.sendRequest( - node, - transportNodeAction, - nodeRequest, - transportRequestOptions, - new TransportResponseHandler() { - @Override - public NodeTasksResponse read(StreamInput in) throws IOException { - return new NodeTasksResponse(in); - } - - @Override - public void handleResponse(NodeTasksResponse response) { - onOperation(idx, response); - } - - @Override - public void handleException(TransportException exp) { - onFailure(idx, node.getId(), exp); - } - } - ); - } - } catch (Exception e) { - onFailure(idx, nodeId, e); - } - } - } - } - - private void onOperation(int idx, NodeTasksResponse nodeResponse) { - responses.set(idx, nodeResponse); - if (counter.incrementAndGet() == responses.length()) { - finishHim(); - } - } - - private void onFailure(int idx, String nodeId, Throwable t) { - logger.debug(() -> "failed to execute on node [" + nodeId + "]", t); - - responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t)); - - if (counter.incrementAndGet() == responses.length()) { - finishHim(); - } - } - - private void finishHim() { - if ((task instanceof CancellableTask t) && t.notifyIfCancelled(listener)) { - return; - } - TasksResponse finalResponse; - try { - finalResponse = newResponse(request, responses); - } catch (Exception e) { - logger.debug("failed to combine responses from nodes", e); - listener.onFailure(e); - return; - } - listener.onResponse(finalResponse); - } - } - class NodeTransportHandler implements TransportRequestHandler { @Override public void messageReceived(final NodeTaskRequest request, final TransportChannel channel, Task task) throws Exception { assert task instanceof CancellableTask; - nodeOperation((CancellableTask) task, request, new ChannelActionListener<>(channel)); + TasksRequest tasksRequest = request.tasksRequest; + processTasks( + (CancellableTask) task, + tasksRequest, + new ChannelActionListener(channel).delegateFailure( + (l, tasks) -> nodeOperation((CancellableTask) task, l, tasksRequest, tasks) + ) + ); } } diff --git a/server/src/main/java/org/elasticsearch/action/synonyms/GetSynonymsAction.java b/server/src/main/java/org/elasticsearch/action/synonyms/GetSynonymsAction.java new file mode 100644 index 0000000000000..77b2ed7e01526 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/synonyms/GetSynonymsAction.java @@ -0,0 +1,150 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.synonyms; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.synonyms.SynonymRule; +import org.elasticsearch.synonyms.SynonymsManagementAPIService; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class GetSynonymsAction extends ActionType { + + public static final GetSynonymsAction INSTANCE = new GetSynonymsAction(); + public static final String NAME = "cluster:admin/synonyms/get"; + + public GetSynonymsAction() { + super(NAME, Response::new); + } + + public static class Request extends ActionRequest { + private final String SynonymsSetId; + private final int from; + private final int size; + + public Request(StreamInput in) throws IOException { + super(in); + this.SynonymsSetId = in.readString(); + this.from = in.readInt(); + this.size = in.readInt(); + } + + public Request(String SynonymsSetId, int from, int size) { + Objects.requireNonNull(SynonymsSetId, "Synonym set ID cannot be null"); + this.SynonymsSetId = SynonymsSetId; + this.from = from; + this.size = size; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (from < 0) { + validationException = addValidationError("from must be a positive integer", validationException); + } + if (size < 0) { + validationException = addValidationError("size must be a positive integer", validationException); + } + return validationException; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(SynonymsSetId); + out.writeInt(from); + out.writeInt(size); + } + + public String synonymsSetId() { + return SynonymsSetId; + } + + public int from() { + return from; + } + + public int size() { + return size; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return from == request.from && size == request.size && Objects.equals(SynonymsSetId, request.SynonymsSetId); + } + + @Override + public int hashCode() { + return Objects.hash(SynonymsSetId, from, size); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private final SynonymsManagementAPIService.SynonymsSetResult synonymsSetResults; + + public Response(StreamInput in) throws IOException { + super(in); + this.synonymsSetResults = new SynonymsManagementAPIService.SynonymsSetResult( + in.readLong(), + in.readArray(SynonymRule::new, SynonymRule[]::new) + ); + } + + public Response(SynonymsManagementAPIService.SynonymsSetResult synonymsSetResult) { + super(); + Objects.requireNonNull(synonymsSetResult, "Synonyms set result must not be null"); + this.synonymsSetResults = synonymsSetResult; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field("count", synonymsSetResults.totalSynonymRules()); + builder.array(SynonymsManagementAPIService.SYNONYMS_SET_FIELD, (Object[]) synonymsSetResults.synonymRules()); + } + builder.endObject(); + + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(synonymsSetResults.totalSynonymRules()); + out.writeArray(synonymsSetResults.synonymRules()); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return Objects.equals(this.synonymsSetResults, response.synonymsSetResults); + } + + @Override + public int hashCode() { + return Objects.hash(synonymsSetResults); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/synonyms/PutSynonymsAction.java b/server/src/main/java/org/elasticsearch/action/synonyms/PutSynonymsAction.java index caba653477dcc..5ba32759ceed8 100644 --- a/server/src/main/java/org/elasticsearch/action/synonyms/PutSynonymsAction.java +++ b/server/src/main/java/org/elasticsearch/action/synonyms/PutSynonymsAction.java @@ -20,13 +20,18 @@ import org.elasticsearch.common.xcontent.StatusToXContentObject; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.synonyms.SynonymsSet; +import org.elasticsearch.synonyms.SynonymRule; +import org.elasticsearch.synonyms.SynonymsManagementAPIService; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import java.io.IOException; +import java.util.Arrays; +import java.util.List; import java.util.Locale; import java.util.Objects; @@ -40,31 +45,40 @@ public PutSynonymsAction() { } public static class Request extends ActionRequest { - private final String synonymssetId; - private final SynonymsSet synonymsset; + private final String synonymsSetId; + private final SynonymRule[] synonymRules; + + public static final ParseField SYNONYMS_SET_FIELD = new ParseField(SynonymsManagementAPIService.SYNONYMS_SET_FIELD); + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("synonyms_set", args -> { + @SuppressWarnings("unchecked") + final List synonyms = (List) args[0]; + return synonyms.toArray(new SynonymRule[synonyms.size()]); + }); + + static { + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), (p, c) -> SynonymRule.fromXContent(p), SYNONYMS_SET_FIELD); + } public Request(StreamInput in) throws IOException { super(in); - this.synonymssetId = in.readString(); - this.synonymsset = new SynonymsSet(in); + this.synonymsSetId = in.readString(); + this.synonymRules = in.readArray(SynonymRule::new, SynonymRule[]::new); } - public Request(String synonymssetId, BytesReference content, XContentType contentType) throws IOException { - this.synonymssetId = synonymssetId; - this.synonymsset = SynonymsSet.fromXContent( - XContentHelper.createParser(XContentParserConfiguration.EMPTY, content, contentType) - ); + public Request(String synonymsSetId, BytesReference content, XContentType contentType) throws IOException { + this.synonymsSetId = synonymsSetId; + this.synonymRules = PARSER.apply(XContentHelper.createParser(XContentParserConfiguration.EMPTY, content, contentType), null); } - Request(String synonymssetId, SynonymsSet synonymsset) { - this.synonymssetId = synonymssetId; - this.synonymsset = synonymsset; + Request(String synonymsSetId, SynonymRule[] synonymRules) { + this.synonymsSetId = synonymsSetId; + this.synonymRules = synonymRules; } @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; - if (Strings.isEmpty(synonymssetId)) { + if (Strings.isEmpty(synonymsSetId)) { validationException = ValidateActions.addValidationError("synonyms set must be specified", validationException); } @@ -75,16 +89,16 @@ public ActionRequestValidationException validate() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeString(synonymssetId); - synonymsset.writeTo(out); + out.writeString(synonymsSetId); + out.writeArray(synonymRules); } public String synonymsSetId() { - return synonymssetId; + return synonymsSetId; } - public SynonymsSet synonymsset() { - return synonymsset; + public SynonymRule[] synonymRules() { + return synonymRules; } @Override @@ -92,25 +106,25 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Request request = (Request) o; - return Objects.equals(synonymssetId, request.synonymssetId) && Objects.equals(synonymsset, request.synonymsset); + return Objects.equals(synonymsSetId, request.synonymsSetId) && Arrays.equals(synonymRules, request.synonymRules); } @Override public int hashCode() { - return Objects.hash(synonymssetId, synonymsset); + return Objects.hash(synonymsSetId, Arrays.hashCode(synonymRules)); } } public static class Response extends ActionResponse implements StatusToXContentObject { - private final Result result; + private final SynonymsManagementAPIService.UpdateSynonymsResult result; public Response(StreamInput in) throws IOException { super(in); - this.result = in.readEnum((Result.class)); + this.result = in.readEnum((SynonymsManagementAPIService.UpdateSynonymsResult.class)); } - public Response(Result result) { + public Response(SynonymsManagementAPIService.UpdateSynonymsResult result) { super(); Objects.requireNonNull(result, "Result must not be null"); this.result = result; @@ -150,10 +164,5 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(result); } - - public enum Result { - CREATED, - UPDATED - } } } diff --git a/server/src/main/java/org/elasticsearch/action/synonyms/TransportGetSynonymsAction.java b/server/src/main/java/org/elasticsearch/action/synonyms/TransportGetSynonymsAction.java new file mode 100644 index 0000000000000..cfee7335db5e6 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/synonyms/TransportGetSynonymsAction.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.synonyms; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.synonyms.SynonymsManagementAPIService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; + +public class TransportGetSynonymsAction extends HandledTransportAction { + + private final SynonymsManagementAPIService synonymsManagementAPIService; + + @Inject + public TransportGetSynonymsAction(TransportService transportService, ActionFilters actionFilters, Client client) { + super(GetSynonymsAction.NAME, transportService, actionFilters, GetSynonymsAction.Request::new); + + this.synonymsManagementAPIService = new SynonymsManagementAPIService(client); + } + + @Override + protected void doExecute(Task task, GetSynonymsAction.Request request, ActionListener listener) { + synonymsManagementAPIService.getSynonymsSet( + request.synonymsSetId(), + request.from(), + request.size(), + listener.map(GetSynonymsAction.Response::new) + ); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/synonyms/TransportPutSynonymsAction.java b/server/src/main/java/org/elasticsearch/action/synonyms/TransportPutSynonymsAction.java index d7eea65a6b3c9..28682ec7ec384 100644 --- a/server/src/main/java/org/elasticsearch/action/synonyms/TransportPutSynonymsAction.java +++ b/server/src/main/java/org/elasticsearch/action/synonyms/TransportPutSynonymsAction.java @@ -30,6 +30,10 @@ public TransportPutSynonymsAction(TransportService transportService, ActionFilte @Override protected void doExecute(Task task, PutSynonymsAction.Request request, ActionListener listener) { - synonymsManagementAPIService.putSynonymsset(request.synonymsSetId(), request.synonymsset(), listener); + synonymsManagementAPIService.putSynonymsSet( + request.synonymsSetId(), + request.synonymRules(), + listener.map(PutSynonymsAction.Response::new) + ); } } diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index 9fbfad8423b75..62a1f1f37c5eb 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.env.Environment; @@ -181,6 +182,8 @@ private static void initPhase2(Bootstrap bootstrap) throws IOException { try { // ReferenceDocs class does nontrivial static initialization which should always succeed but load it now (before SM) to be sure MethodHandles.publicLookup().ensureInitialized(ReferenceDocs.class); + // AbstractRefCounted class uses MethodHandles.lookup during initialization, load it now (before SM) to be sure it succeeds + MethodHandles.publicLookup().ensureInitialized(AbstractRefCounted.class); } catch (IllegalAccessException unexpected) { throw new AssertionError(unexpected); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index 552947fd3e92d..99b075754a008 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -1264,8 +1264,8 @@ public Map dataStreamAliases() { return this.custom(DataStreamMetadata.TYPE, DataStreamMetadata.EMPTY).getDataStreamAliases(); } - public Map nodeShutdowns() { - return this.custom(NodesShutdownMetadata.TYPE, NodesShutdownMetadata.EMPTY).getAllNodeMetadataMap(); + public NodesShutdownMetadata nodeShutdowns() { + return custom(NodesShutdownMetadata.TYPE, NodesShutdownMetadata.EMPTY); } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index 088c644c6fddd..f08d1d82017a8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -934,7 +934,7 @@ public static ClusterState innerRemoveIndexTemplateV2(ClusterState currentState, } } if (templateNames.isEmpty()) { - // if its a match all pattern, and no templates are found (we have none), don't + // if it's a match all pattern, and no templates are found (we have none), don't // fail with index missing... boolean isMatchAll = false; if (Regex.isMatchAllPattern(name)) { @@ -948,7 +948,7 @@ public static ClusterState innerRemoveIndexTemplateV2(ClusterState currentState, } } - Set dataStreamsUsingTemplates = dataStreamsUsingTemplates(currentState, templateNames); + Set dataStreamsUsingTemplates = dataStreamsExclusivelyUsingTemplates(currentState, templateNames); if (dataStreamsUsingTemplates.size() > 0) { throw new IllegalArgumentException( "unable to remove composable templates " @@ -966,7 +966,12 @@ public static ClusterState innerRemoveIndexTemplateV2(ClusterState currentState, return ClusterState.builder(currentState).metadata(metadata).build(); } - static Set dataStreamsUsingTemplates(final ClusterState state, final Set templateNames) { + /** + * Returns the data stream names that solely match the patterns of the template names that were provided and no + * other templates. This means that the returned data streams depend on these templates which has implications for + * these templates, for example they cannot be removed. + */ + static Set dataStreamsExclusivelyUsingTemplates(final ClusterState state, final Set templateNames) { Metadata metadata = state.metadata(); Set namePatterns = templateNames.stream() @@ -983,10 +988,22 @@ static Set dataStreamsUsingTemplates(final ClusterState state, final Set // Limit to checking data streams that match any of the templates' index patterns .filter(ds -> namePatterns.stream().anyMatch(pattern -> Regex.simpleMatch(pattern, ds.getName()))) .filter(ds -> { - // Retrieve the template that matches the data stream name that has the highest priority - String matchedTemplate = findV2Template(metadata, ds.getName(), ds.isHidden()); - // Limit data streams where their in-use template is the one of specified templates - return templateNames.contains(matchedTemplate); + // Retrieve the templates that match the data stream name ordered by priority + List> candidates = findV2CandidateTemplates(metadata, ds.getName(), ds.isHidden()); + if (candidates.isEmpty()) { + throw new IllegalStateException("Data stream " + ds.getName() + " did not match any composable index templates."); + } + + // Limit data streams that can ONLY use any of the specified templates, we do this by filtering + // the matching templates that are others than the ones requested and could be a valid template to use. + return candidates.stream() + .filter( + template -> templateNames.contains(template.v1()) == false + && isGlobalAndHasIndexHiddenSetting(metadata, template.v2(), template.v1()) == false + ) + .map(Tuple::v1) + .toList() + .isEmpty(); }) .map(DataStream::getName) .collect(Collectors.toSet()); @@ -1183,54 +1200,67 @@ public static List findV1Templates(Metadata metadata, Str */ @Nullable public static String findV2Template(Metadata metadata, String indexName, boolean isHidden) { + final List> candidates = findV2CandidateTemplates(metadata, indexName, isHidden); + if (candidates.isEmpty()) { + return null; + } + + ComposableIndexTemplate winner = candidates.get(0).v2(); + String winnerName = candidates.get(0).v1(); + + // if the winner template is a global template that specifies the `index.hidden` setting (which is not allowed, so it'd be due to + // a restored index cluster state that modified a component template used by this global template such that it has this setting) + // we will fail and the user will have to update the index template and remove this setting or update the corresponding component + // template that contributes to the index template resolved settings + if (isGlobalAndHasIndexHiddenSetting(metadata, winner, winnerName)) { + throw new IllegalStateException( + "global index template [" + + winnerName + + "], composed of component templates [" + + String.join(",", winner.composedOf()) + + "] defined the index.hidden setting, which is not allowed" + ); + } + + return winnerName; + } + + /** + * Return an ordered list of the name (id) and composable index templates that would apply to an index. The first + * one is the winner template that is applied to this index. In the event that no templates are matched, + * an empty list is returned. + */ + static List> findV2CandidateTemplates(Metadata metadata, String indexName, boolean isHidden) { final String resolvedIndexName = IndexNameExpressionResolver.DateMathExpressionResolver.resolveExpression(indexName); final Predicate patternMatchPredicate = pattern -> Regex.simpleMatch(pattern, resolvedIndexName); - final Map matchedTemplates = new HashMap<>(); + final List> candidates = new ArrayList<>(); for (Map.Entry entry : metadata.templatesV2().entrySet()) { final String name = entry.getKey(); final ComposableIndexTemplate template = entry.getValue(); if (isHidden == false) { final boolean matched = template.indexPatterns().stream().anyMatch(patternMatchPredicate); if (matched) { - matchedTemplates.put(template, name); + candidates.add(Tuple.tuple(name, template)); } } else { final boolean isNotMatchAllTemplate = template.indexPatterns().stream().noneMatch(Regex::isMatchAllPattern); if (isNotMatchAllTemplate) { if (template.indexPatterns().stream().anyMatch(patternMatchPredicate)) { - matchedTemplates.put(template, name); + candidates.add(Tuple.tuple(name, template)); } } } } - if (matchedTemplates.size() == 0) { - return null; - } - - final List candidates = new ArrayList<>(matchedTemplates.keySet()); - CollectionUtil.timSort(candidates, Comparator.comparing(ComposableIndexTemplate::priorityOrZero, Comparator.reverseOrder())); - - assert candidates.size() > 0 : "we should have returned early with no candidates"; - ComposableIndexTemplate winner = candidates.get(0); - String winnerName = matchedTemplates.get(winner); - - // if the winner template is a global template that specifies the `index.hidden` setting (which is not allowed, so it'd be due to - // a restored index cluster state that modified a component template used by this global template such that it has this setting) - // we will fail and the user will have to update the index template and remove this setting or update the corresponding component - // template that contributes to the index template resolved settings - if (winner.indexPatterns().stream().anyMatch(Regex::isMatchAllPattern) - && IndexMetadata.INDEX_HIDDEN_SETTING.exists(resolveSettings(metadata, winnerName))) { - throw new IllegalStateException( - "global index template [" - + winnerName - + "], composed of component templates [" - + String.join(",", winner.composedOf()) - + "] defined the index.hidden setting, which is not allowed" - ); - } + CollectionUtil.timSort(candidates, Comparator.comparing(candidate -> candidate.v2().priorityOrZero(), Comparator.reverseOrder())); + return candidates; + } - return winnerName; + // Checks if a global template specifies the `index.hidden` setting. This check is important because a global + // template shouldn't specify the `index.hidden` setting, we leave it up to the caller to handle this situation. + private static boolean isGlobalAndHasIndexHiddenSetting(Metadata metadata, ComposableIndexTemplate template, String templateName) { + return template.indexPatterns().stream().anyMatch(Regex::isMatchAllPattern) + && IndexMetadata.INDEX_HIDDEN_SETTING.exists(resolveSettings(metadata, templateName)); } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java index aefefdcbc5941..5222eb1605591 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java @@ -9,7 +9,6 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.TransportVersion; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.NamedDiff; @@ -17,6 +16,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; +import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; @@ -29,7 +29,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Optional; +import java.util.Set; import java.util.TreeMap; import java.util.function.Function; import java.util.stream.Collectors; @@ -67,27 +67,6 @@ public static NamedDiff readDiffFrom(StreamInput in) throws IOE return new NodeShutdownMetadataDiff(in); } - public static Optional getShutdowns(final ClusterState state) { - assert state != null : "cluster state should never be null"; - return Optional.of(state).map(ClusterState::metadata).map(m -> m.custom(TYPE)); - } - - /** - * Returns true if the given node is marked as shutting down with any - * shutdown type. - */ - public static boolean isNodeShuttingDown(final ClusterState state, final String nodeId) { - // Right now we make no distinction between the type of shutdown, but maybe in the future we might? - return NodesShutdownMetadata.getShutdowns(state) - .map(NodesShutdownMetadata::getAllNodeMetadataMap) - .map(allNodes -> allNodes.get(nodeId)) - .isPresent(); - } - - public static NodesShutdownMetadata getShutdownsOrEmpty(final ClusterState state) { - return getShutdowns(state).orElse(EMPTY); - } - private final Map nodes; public NodesShutdownMetadata(Map nodes) { @@ -106,10 +85,45 @@ public void writeTo(StreamOutput out) throws IOException { /** * @return A map of NodeID to shutdown metadata. */ - public Map getAllNodeMetadataMap() { + public Map getAll() { return nodes; } + /** + * @return a set of all node ids that might be restarting or shutting down + */ + public Set getAllNodeIds() { + return nodes.keySet(); + } + + /** + * @return a shutdown entry for the node if exists with any shutdown type + */ + @Nullable + public SingleNodeShutdownMetadata get(String nodeId) { + return nodes.get(nodeId); + } + + /** + * Returns true if the given node is marked as shutting down with any shutdown type. + */ + public boolean contains(String nodeId) { + return get(nodeId) != null; + } + + /** + * @return a shutdown entry for the node if exists and matches the supplied type + */ + @Nullable + public SingleNodeShutdownMetadata get(String nodeId, SingleNodeShutdownMetadata.Type type) { + var shutdown = get(nodeId); + return shutdown != null && shutdown.getType() == type ? shutdown : null; + } + + public boolean contains(String nodeId, SingleNodeShutdownMetadata.Type type) { + return get(nodeId, type) != null; + } + /** * Add or update the shutdown metadata for a single node. * @param nodeShutdownMetadata The single node shutdown metadata to add or update. @@ -216,5 +230,4 @@ public TransportVersion getMinimalSupportedVersion() { } } - } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java index 874f6cf07c376..df8e5bb0199f6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java @@ -459,17 +459,13 @@ public enum Type { SIGTERM; // locally-initiated version of REMOVE public static Type parse(String type) { - if ("remove".equals(type.toLowerCase(Locale.ROOT))) { - return REMOVE; - } else if ("restart".equals(type.toLowerCase(Locale.ROOT))) { - return RESTART; - } else if ("replace".equals(type.toLowerCase(Locale.ROOT))) { - return REPLACE; - } else if ("sigterm".equals(type.toLowerCase(Locale.ROOT))) { - return SIGTERM; - } else { - throw new IllegalArgumentException("unknown shutdown type: " + type); - } + return switch (type.toLowerCase(Locale.ROOT)) { + case "remove" -> REMOVE; + case "restart" -> RESTART; + case "replace" -> REPLACE; + case "sigterm" -> SIGTERM; + default -> throw new IllegalArgumentException("unknown shutdown type: " + type); + }; } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java index 3b3a64a1d2e1b..48cfa6626197d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java @@ -54,6 +54,7 @@ void setUseAdaptiveReplicaSelection(boolean useAdaptiveReplicaSelection) { /** * Shards to use for a {@code GET} operation. + * @return A shard iterator that can be used for GETs, or null if e.g. due to preferences no match is found. */ public ShardIterator getShards( ClusterState clusterState, diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java b/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java index 8b02cc6e83662..9902d7605cded 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java @@ -12,6 +12,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; @@ -33,7 +34,6 @@ import java.util.Collections; import java.util.List; import java.util.Locale; -import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Set; @@ -430,17 +430,12 @@ public Set getFailedNodeIds() { * * @return calculated delay in nanoseconds */ - public long getRemainingDelay( - final long nanoTimeNow, - final Settings indexSettings, - final Map nodesShutdownMap - ) { + public long getRemainingDelay(final long nanoTimeNow, final Settings indexSettings, final NodesShutdownMetadata nodesShutdownMetadata) { final long indexLevelDelay = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexSettings).nanos(); long delayTimeoutNanos = Optional.ofNullable(lastAllocatedNodeId) // If the node wasn't restarting when this became unassigned, use default delay .filter(nodeId -> reason.equals(Reason.NODE_RESTARTING)) - .map(nodesShutdownMap::get) - .filter(shutdownMetadata -> SingleNodeShutdownMetadata.Type.RESTART.equals(shutdownMetadata.getType())) + .map(nodeId -> nodesShutdownMetadata.get(nodeId, SingleNodeShutdownMetadata.Type.RESTART)) .map(SingleNodeShutdownMetadata::getAllocationDelay) .map(TimeValue::nanos) .map(knownRestartDelay -> Math.max(indexLevelDelay, knownRestartDelay)) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 53c5372a380c0..91b77b79f8df6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -51,7 +51,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Set; import java.util.function.Function; import java.util.function.Supplier; @@ -161,11 +160,13 @@ private static ClusterState buildResultAndLogHealthChange(ClusterState oldState, assert newRoutingTable.validate(newMetadata); // validates the routing table is coherent with the cluster state metadata final ClusterState.Builder newStateBuilder = ClusterState.builder(oldState).routingTable(newRoutingTable).metadata(newMetadata); - final RestoreInProgress restoreInProgress = allocation.custom(RestoreInProgress.TYPE); + final RestoreInProgress restoreInProgress = allocation.getClusterState().custom(RestoreInProgress.TYPE); if (restoreInProgress != null) { RestoreInProgress updatedRestoreInProgress = allocation.updateRestoreInfoWithRoutingChanges(restoreInProgress); if (updatedRestoreInProgress != restoreInProgress) { - ImmutableOpenMap.Builder customsBuilder = ImmutableOpenMap.builder(allocation.getCustoms()); + ImmutableOpenMap.Builder customsBuilder = ImmutableOpenMap.builder( + allocation.getClusterState().getCustoms() + ); customsBuilder.put(RestoreInProgress.TYPE, updatedRestoreInProgress); newStateBuilder.customs(customsBuilder.build()); } @@ -566,13 +567,9 @@ private static void disassociateDeadNodes(RoutingAllocation allocation) { continue; } - var nodeShutdownMetadata = allocation.metadata().nodeShutdowns().get(node.nodeId()); - var unassignedReason = nodeShutdownMetadata != null && Objects.equals(nodeShutdownMetadata.getType(), Type.RESTART) - ? UnassignedInfo.Reason.NODE_RESTARTING - : UnassignedInfo.Reason.NODE_LEFT; - boolean delayedDueToKnownRestart = nodeShutdownMetadata != null - && Objects.equals(nodeShutdownMetadata.getType(), Type.RESTART) - && nodeShutdownMetadata.getAllocationDelay().nanos() > 0; + var nodeShutdownMetadata = allocation.metadata().nodeShutdowns().get(node.nodeId(), Type.RESTART); + var unassignedReason = nodeShutdownMetadata != null ? UnassignedInfo.Reason.NODE_RESTARTING : UnassignedInfo.Reason.NODE_LEFT; + boolean delayedDueToKnownRestart = nodeShutdownMetadata != null && nodeShutdownMetadata.getAllocationDelay().nanos() > 0; // now, go over all the shards routing on the node, and fail them for (ShardRouting shardRouting : node.copyShards()) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java index 57473dbb86099..449aac6e76f41 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java @@ -373,6 +373,7 @@ public void onNewInfo(ClusterInfo info) { // Calculate both the source node id and the target node id of a "replace" type shutdown final Set nodesIdsPartOfReplacement = state.metadata() .nodeShutdowns() + .getAll() .values() .stream() .filter(meta -> meta.getType() == SingleNodeShutdownMetadata.Type.REPLACE) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java index 9e0f39e3b2ce6..ed6b2af2fb55d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java @@ -147,7 +147,7 @@ private RoutingAllocation( private static Map nodeReplacementTargets(ClusterState clusterState) { Map nodeReplacementTargets = new HashMap<>(); - for (SingleNodeShutdownMetadata shutdown : clusterState.metadata().nodeShutdowns().values()) { + for (SingleNodeShutdownMetadata shutdown : clusterState.metadata().nodeShutdowns().getAll().values()) { if (shutdown.getType() == SingleNodeShutdownMetadata.Type.REPLACE) { nodeReplacementTargets.put(shutdown.getTargetNodeName(), shutdown); } @@ -250,15 +250,6 @@ public Map replacementTargetShutdowns() { return this.nodeReplacementTargets; } - @SuppressWarnings("unchecked") - public T custom(String key) { - return (T) clusterState.customs().get(key); - } - - public Map getCustoms() { - return clusterState.getCustoms(); - } - public void ignoreDisable(boolean ignoreDisable) { this.ignoreDisable = ignoreDisable; } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java index 1f6bdc9584358..e04e8a47349b6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java @@ -445,8 +445,8 @@ private static boolean isUnassignedDueToTimelyRestart(ShardRouting routing, Node if (info == null || info.getReason() != UnassignedInfo.Reason.NODE_RESTARTING) { return false; } - var shutdown = shutdowns.getAllNodeMetadataMap().get(info.getLastAllocatedNodeId()); - if (shutdown == null || shutdown.getType() != SingleNodeShutdownMetadata.Type.RESTART) { + var shutdown = shutdowns.get(info.getLastAllocatedNodeId(), SingleNodeShutdownMetadata.Type.RESTART); + if (shutdown == null) { return false; } var now = System.nanoTime(); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index ce42291ff70bd..e905641bee119 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -15,7 +15,6 @@ import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; @@ -57,6 +56,7 @@ import java.util.function.BiFunction; import java.util.stream.StreamSupport; +import static org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata.Type.REPLACE; import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; import static org.elasticsearch.common.settings.ClusterSettings.createBuiltInClusterSettings; @@ -897,8 +897,7 @@ public MoveDecision decideMove(final ShardRouting shardRouting) { */ MoveDecision moveDecision = decideMove(shardRouting, sourceNode, canRemain, this::decideCanAllocate); if (moveDecision.canRemain() == false && moveDecision.forceMove() == false) { - final SingleNodeShutdownMetadata shutdown = allocation.metadata().nodeShutdowns().get(shardRouting.currentNodeId()); - final boolean shardsOnReplacedNode = shutdown != null && shutdown.getType().equals(SingleNodeShutdownMetadata.Type.REPLACE); + final boolean shardsOnReplacedNode = allocation.metadata().nodeShutdowns().contains(shardRouting.currentNodeId(), REPLACE); if (shardsOnReplacedNode) { return decideMove(shardRouting, sourceNode, canRemain, this::decideCanForceAllocateForVacate); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java index 113f798bbcd94..b286f74bde308 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java @@ -14,7 +14,6 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MetadataIndexStateService; -import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; @@ -23,9 +22,13 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.gateway.PriorityComparator; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.threadpool.ThreadPool; import java.util.Collections; import java.util.Comparator; @@ -36,6 +39,8 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; +import static org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata.Type.REPLACE; + /** * Given the current allocation of shards and the desired balance, performs the next (legal) shard movements towards the goal. */ @@ -43,415 +48,474 @@ public class DesiredBalanceReconciler { private static final Logger logger = LogManager.getLogger(DesiredBalanceReconciler.class); - private final DesiredBalance desiredBalance; - private final RoutingAllocation allocation; // name chosen to align with code in BalancedShardsAllocator but TODO rename - private final RoutingNodes routingNodes; - private final NodeAllocationOrdering allocationOrdering; - private final NodeAllocationOrdering moveOrdering; - - DesiredBalanceReconciler( - DesiredBalance desiredBalance, - RoutingAllocation routingAllocation, - NodeAllocationOrdering allocationOrdering, - NodeAllocationOrdering moveOrdering - ) { - this.desiredBalance = desiredBalance; - this.allocation = routingAllocation; - this.routingNodes = routingAllocation.routingNodes(); - this.allocationOrdering = allocationOrdering; - this.moveOrdering = moveOrdering; + public static final Setting UNDESIRED_ALLOCATIONS_LOG_INTERVAL_SETTING = Setting.timeSetting( + "cluster.routing.allocation.desired_balance.undesired_allocations.log_interval", + TimeValue.timeValueHours(1), + TimeValue.ZERO, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting UNDESIRED_ALLOCATIONS_LOG_THRESHOLD_SETTING = Setting.doubleSetting( + "cluster.routing.allocation.desired_balance.undesired_allocations.threshold", + 0.1, + 0, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + private final FrequencyCappedAction undesiredAllocationLogInterval; + private double undesiredAllocationsLogThreshold; + private final NodeAllocationOrdering allocationOrdering = new NodeAllocationOrdering(); + private final NodeAllocationOrdering moveOrdering = new NodeAllocationOrdering(); + + public DesiredBalanceReconciler(ClusterSettings clusterSettings, ThreadPool threadPool) { + this.undesiredAllocationLogInterval = new FrequencyCappedAction(threadPool); + clusterSettings.initializeAndWatch(UNDESIRED_ALLOCATIONS_LOG_INTERVAL_SETTING, this.undesiredAllocationLogInterval::setMinInterval); + clusterSettings.initializeAndWatch( + UNDESIRED_ALLOCATIONS_LOG_THRESHOLD_SETTING, + value -> this.undesiredAllocationsLogThreshold = value + ); } - void run() { - try (var ignored = allocation.withReconcilingFlag()) { + public void reconcile(DesiredBalance desiredBalance, RoutingAllocation allocation) { + var nodeIds = allocation.routingNodes().getAllNodeIds(); + allocationOrdering.retainNodes(nodeIds); + moveOrdering.retainNodes(nodeIds); + new Reconciliation(desiredBalance, allocation).run(); + } - logger.debug("Reconciling desired balance for [{}]", desiredBalance.lastConvergedIndex()); + public void clear() { + allocationOrdering.clear(); + moveOrdering.clear(); + } - if (routingNodes.size() == 0) { - // no data nodes, so fail allocation to report red health - failAllocationOfNewPrimaries(allocation); - logger.trace("no nodes available, nothing to reconcile"); - return; - } + private class Reconciliation { - if (desiredBalance.assignments().isEmpty()) { - // no desired state yet but it is on its way and we'll reroute again when it is ready - logger.trace("desired balance is empty, nothing to reconcile"); - return; - } + private final DesiredBalance desiredBalance; + private final RoutingAllocation allocation; + private final RoutingNodes routingNodes; - // compute next moves towards current desired balance: + Reconciliation(DesiredBalance desiredBalance, RoutingAllocation allocation) { + this.desiredBalance = desiredBalance; + this.allocation = allocation; + this.routingNodes = allocation.routingNodes(); + } - // 1. allocate unassigned shards first - logger.trace("Reconciler#allocateUnassigned"); - allocateUnassigned(); - assert allocateUnassignedInvariant(); + void run() { + try (var ignored = allocation.withReconcilingFlag()) { - // 2. move any shards that cannot remain where they are - logger.trace("Reconciler#moveShards"); - moveShards(); - // 3. move any other shards that are desired elsewhere - logger.trace("Reconciler#balance"); - balance(); + logger.debug("Reconciling desired balance for [{}]", desiredBalance.lastConvergedIndex()); - logger.debug("Reconciliation is complete"); - } - } + if (routingNodes.size() == 0) { + // no data nodes, so fail allocation to report red health + failAllocationOfNewPrimaries(allocation); + logger.trace("no nodes available, nothing to reconcile"); + return; + } + + if (desiredBalance.assignments().isEmpty()) { + // no desired state yet but it is on its way and we'll reroute again when it is ready + logger.trace("desired balance is empty, nothing to reconcile"); + return; + } - private boolean allocateUnassignedInvariant() { - // after allocateUnassigned, every shard must be either assigned or ignored + // compute next moves towards current desired balance: - assert routingNodes.unassigned().isEmpty(); + // 1. allocate unassigned shards first + logger.trace("Reconciler#allocateUnassigned"); + allocateUnassigned(); + assert allocateUnassignedInvariant(); - final var shardCounts = allocation.metadata().stream().filter(indexMetadata -> - // skip any pre-7.2 closed indices which have no routing table entries at all - indexMetadata.getCreationVersion().onOrAfter(Version.V_7_2_0) - || indexMetadata.getState() == IndexMetadata.State.OPEN - || MetadataIndexStateService.isIndexVerifiedBeforeClosed(indexMetadata)) - .flatMap( - indexMetadata -> IntStream.range(0, indexMetadata.getNumberOfShards()) - .mapToObj( - shardId -> Tuple.tuple(new ShardId(indexMetadata.getIndex(), shardId), indexMetadata.getNumberOfReplicas() + 1) - ) - ) - .collect(Collectors.toMap(Tuple::v1, Tuple::v2)); + // 2. move any shards that cannot remain where they are + logger.trace("Reconciler#moveShards"); + moveShards(); + // 3. move any other shards that are desired elsewhere + logger.trace("Reconciler#balance"); + balance(); - for (final var shardRouting : routingNodes.unassigned().ignored()) { - shardCounts.computeIfPresent(shardRouting.shardId(), (ignored, count) -> count == 1 ? null : count - 1); + logger.debug("Reconciliation is complete"); + } } - for (final var routingNode : routingNodes) { - for (final var shardRouting : routingNode) { + private boolean allocateUnassignedInvariant() { + // after allocateUnassigned, every shard must be either assigned or ignored + + assert routingNodes.unassigned().isEmpty(); + + final var shardCounts = allocation.metadata().stream().filter(indexMetadata -> + // skip any pre-7.2 closed indices which have no routing table entries at all + indexMetadata.getCreationVersion().onOrAfter(Version.V_7_2_0) + || indexMetadata.getState() == IndexMetadata.State.OPEN + || MetadataIndexStateService.isIndexVerifiedBeforeClosed(indexMetadata)) + .flatMap( + indexMetadata -> IntStream.range(0, indexMetadata.getNumberOfShards()) + .mapToObj( + shardId -> Tuple.tuple(new ShardId(indexMetadata.getIndex(), shardId), indexMetadata.getNumberOfReplicas() + 1) + ) + ) + .collect(Collectors.toMap(Tuple::v1, Tuple::v2)); + + for (final var shardRouting : routingNodes.unassigned().ignored()) { shardCounts.computeIfPresent(shardRouting.shardId(), (ignored, count) -> count == 1 ? null : count - 1); } - } - assert shardCounts.isEmpty() : shardCounts; + for (final var routingNode : routingNodes) { + for (final var shardRouting : routingNode) { + shardCounts.computeIfPresent(shardRouting.shardId(), (ignored, count) -> count == 1 ? null : count - 1); + } + } - return true; - } + assert shardCounts.isEmpty() : shardCounts; - private void failAllocationOfNewPrimaries(RoutingAllocation allocation) { - RoutingNodes routingNodes = allocation.routingNodes(); - assert routingNodes.size() == 0 : routingNodes; - final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator(); - while (unassignedIterator.hasNext()) { - final ShardRouting shardRouting = unassignedIterator.next(); - final UnassignedInfo unassignedInfo = shardRouting.unassignedInfo(); - if (shardRouting.primary() && unassignedInfo.getLastAllocationStatus() == UnassignedInfo.AllocationStatus.NO_ATTEMPT) { - unassignedIterator.updateUnassigned( - new UnassignedInfo( - unassignedInfo.getReason(), - unassignedInfo.getMessage(), - unassignedInfo.getFailure(), - unassignedInfo.getNumFailedAllocations(), - unassignedInfo.getUnassignedTimeInNanos(), - unassignedInfo.getUnassignedTimeInMillis(), - unassignedInfo.isDelayed(), - UnassignedInfo.AllocationStatus.DECIDERS_NO, - unassignedInfo.getFailedNodeIds(), - unassignedInfo.getLastAllocatedNodeId() - ), - shardRouting.recoverySource(), - allocation.changes() - ); - } + return true; } - } - private void allocateUnassigned() { - RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned(); - if (logger.isTraceEnabled()) { - logger.trace("Start allocating unassigned shards: {}", routingNodes.toString()); - } - if (unassigned.isEmpty()) { - return; + private void failAllocationOfNewPrimaries(RoutingAllocation allocation) { + RoutingNodes routingNodes = allocation.routingNodes(); + assert routingNodes.size() == 0 : routingNodes; + final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator(); + while (unassignedIterator.hasNext()) { + final ShardRouting shardRouting = unassignedIterator.next(); + final UnassignedInfo unassignedInfo = shardRouting.unassignedInfo(); + if (shardRouting.primary() && unassignedInfo.getLastAllocationStatus() == UnassignedInfo.AllocationStatus.NO_ATTEMPT) { + unassignedIterator.updateUnassigned( + new UnassignedInfo( + unassignedInfo.getReason(), + unassignedInfo.getMessage(), + unassignedInfo.getFailure(), + unassignedInfo.getNumFailedAllocations(), + unassignedInfo.getUnassignedTimeInNanos(), + unassignedInfo.getUnassignedTimeInMillis(), + unassignedInfo.isDelayed(), + UnassignedInfo.AllocationStatus.DECIDERS_NO, + unassignedInfo.getFailedNodeIds(), + unassignedInfo.getLastAllocatedNodeId() + ), + shardRouting.recoverySource(), + allocation.changes() + ); + } + } } - /* - * TODO: We could be smarter here and group the shards by index and then - * use the sorter to save some iterations. - */ - final PriorityComparator secondaryComparator = PriorityComparator.getAllocationComparator(allocation); - final Comparator comparator = (o1, o2) -> { - if (o1.primary() ^ o2.primary()) { - return o1.primary() ? -1 : 1; + private void allocateUnassigned() { + RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned(); + if (logger.isTraceEnabled()) { + logger.trace("Start allocating unassigned shards: {}", routingNodes.toString()); } - if (o1.getIndexName().compareTo(o2.getIndexName()) == 0) { - return o1.getId() - o2.getId(); + if (unassigned.isEmpty()) { + return; } - // this comparator is more expensive than all the others up there - // that's why it's added last even though it could be easier to read - // if we'd apply it earlier. this comparator will only differentiate across - // indices all shards of the same index is treated equally. - final int secondary = secondaryComparator.compare(o1, o2); - assert secondary != 0 : "Index names are equal, should be returned early."; - return secondary; - }; - /* - * we use 2 arrays and move replicas to the second array once we allocated an identical - * replica in the current iteration to make sure all indices get allocated in the same manner. - * The arrays are sorted by primaries first and then by index and shard ID so a 2 indices with - * 2 replica and 1 shard would look like: - * [(0,P,IDX1), (0,P,IDX2), (0,R,IDX1), (0,R,IDX1), (0,R,IDX2), (0,R,IDX2)] - * if we allocate for instance (0, R, IDX1) we move the second replica to the secondary array and proceed with - * the next replica. If we could not find a node to allocate (0,R,IDX1) we move all it's replicas to ignoreUnassigned. - */ - ShardRouting[] primary = unassigned.drain(); - ShardRouting[] secondary = new ShardRouting[primary.length]; - int secondaryLength = 0; - int primaryLength = primary.length; - ArrayUtil.timSort(primary, comparator); - - do { - nextShard: for (int i = 0; i < primaryLength; i++) { - final var shard = primary[i]; - final var assignment = desiredBalance.getAssignment(shard.shardId()); - final var isThrottled = new AtomicBoolean(false); - if (assignment != null) { - - for (final var nodeIdIterator : List.of( - getDesiredNodesIds(shard, assignment), - getFallbackNodeIds(shard, isThrottled) - )) { - for (final var desiredNodeId : nodeIdIterator) { - final var routingNode = routingNodes.node(desiredNodeId); - if (routingNode == null) { - // desired node no longer exists - continue; - } - final var decision = allocation.deciders().canAllocate(shard, routingNode, allocation); - switch (decision.type()) { - case YES -> { - logger.debug("Assigning shard [{}] to [{}]", shard, desiredNodeId); - final long shardSize = DiskThresholdDecider.getExpectedShardSize( - shard, - ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, - allocation.clusterInfo(), - allocation.snapshotShardSizeInfo(), - allocation.metadata(), - allocation.routingTable() - ); - routingNodes.initializeShard(shard, desiredNodeId, null, shardSize, allocation.changes()); - allocationOrdering.recordAllocation(desiredNodeId); - if (shard.primary() == false) { - // copy over the same replica shards to the secondary array so they will get allocated - // in a subsequent iteration, allowing replicas of other shards to be allocated first - while (i < primaryLength - 1 && comparator.compare(primary[i], primary[i + 1]) == 0) { - secondary[secondaryLength++] = primary[++i]; + + /* + * TODO: We could be smarter here and group the shards by index and then + * use the sorter to save some iterations. + */ + final PriorityComparator secondaryComparator = PriorityComparator.getAllocationComparator(allocation); + final Comparator comparator = (o1, o2) -> { + if (o1.primary() ^ o2.primary()) { + return o1.primary() ? -1 : 1; + } + if (o1.getIndexName().compareTo(o2.getIndexName()) == 0) { + return o1.getId() - o2.getId(); + } + // this comparator is more expensive than all the others up there + // that's why it's added last even though it could be easier to read + // if we'd apply it earlier. this comparator will only differentiate across + // indices all shards of the same index is treated equally. + final int secondary = secondaryComparator.compare(o1, o2); + assert secondary != 0 : "Index names are equal, should be returned early."; + return secondary; + }; + /* + * we use 2 arrays and move replicas to the second array once we allocated an identical + * replica in the current iteration to make sure all indices get allocated in the same manner. + * The arrays are sorted by primaries first and then by index and shard ID so a 2 indices with + * 2 replica and 1 shard would look like: + * [(0,P,IDX1), (0,P,IDX2), (0,R,IDX1), (0,R,IDX1), (0,R,IDX2), (0,R,IDX2)] + * if we allocate for instance (0, R, IDX1) we move the second replica to the secondary array and proceed with + * the next replica. If we could not find a node to allocate (0,R,IDX1) we move all it's replicas to ignoreUnassigned. + */ + ShardRouting[] primary = unassigned.drain(); + ShardRouting[] secondary = new ShardRouting[primary.length]; + int secondaryLength = 0; + int primaryLength = primary.length; + ArrayUtil.timSort(primary, comparator); + + do { + nextShard: for (int i = 0; i < primaryLength; i++) { + final var shard = primary[i]; + final var assignment = desiredBalance.getAssignment(shard.shardId()); + final var isThrottled = new AtomicBoolean(false); + if (assignment != null) { + + for (final var nodeIdIterator : List.of( + getDesiredNodesIds(shard, assignment), + getFallbackNodeIds(shard, isThrottled) + )) { + for (final var desiredNodeId : nodeIdIterator) { + final var routingNode = routingNodes.node(desiredNodeId); + if (routingNode == null) { + // desired node no longer exists + continue; + } + final var decision = allocation.deciders().canAllocate(shard, routingNode, allocation); + switch (decision.type()) { + case YES -> { + logger.debug("Assigning shard [{}] to [{}]", shard, desiredNodeId); + final long shardSize = DiskThresholdDecider.getExpectedShardSize( + shard, + ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, + allocation.clusterInfo(), + allocation.snapshotShardSizeInfo(), + allocation.metadata(), + allocation.routingTable() + ); + routingNodes.initializeShard(shard, desiredNodeId, null, shardSize, allocation.changes()); + allocationOrdering.recordAllocation(desiredNodeId); + if (shard.primary() == false) { + // copy over the same replica shards to the secondary array so they will get allocated + // in a subsequent iteration, allowing replicas of other shards to be allocated first + while (i < primaryLength - 1 && comparator.compare(primary[i], primary[i + 1]) == 0) { + secondary[secondaryLength++] = primary[++i]; + } } + continue nextShard; } - continue nextShard; - } - case THROTTLE -> isThrottled.set(true); - case NO -> { - if (logger.isTraceEnabled()) { - logger.trace("Couldn't assign shard [{}] to [{}]", shard.shardId(), desiredNodeId); + case THROTTLE -> isThrottled.set(true); + case NO -> { + if (logger.isTraceEnabled()) { + logger.trace("Couldn't assign shard [{}] to [{}]", shard.shardId(), desiredNodeId); + } } } } } } + + logger.debug("No eligible node found to assign shard [{}] amongst [{}]", shard, assignment); + + final UnassignedInfo.AllocationStatus allocationStatus; + if (assignment == null || assignment.isIgnored(shard.primary())) { + allocationStatus = UnassignedInfo.AllocationStatus.NO_ATTEMPT; + } else if (isThrottled.get()) { + allocationStatus = UnassignedInfo.AllocationStatus.DECIDERS_THROTTLED; + } else { + allocationStatus = UnassignedInfo.AllocationStatus.DECIDERS_NO; + } + + unassigned.ignoreShard(shard, allocationStatus, allocation.changes()); + if (shard.primary() == false) { + // we could not allocate it and we are a replica - check if we can ignore the other replicas + while (i < primaryLength - 1 && comparator.compare(primary[i], primary[i + 1]) == 0) { + unassigned.ignoreShard(primary[++i], allocationStatus, allocation.changes()); + } + } } + primaryLength = secondaryLength; + ShardRouting[] tmp = primary; + primary = secondary; + secondary = tmp; + secondaryLength = 0; + } while (primaryLength > 0); + } - logger.debug("No eligible node found to assign shard [{}] amongst [{}]", shard, assignment); + private Iterable getDesiredNodesIds(ShardRouting shard, ShardAssignment assignment) { + return allocationOrdering.sort(allocation.deciders().getForcedInitialShardAllocationToNodes(shard, allocation).map(forced -> { + logger.debug("Shard [{}] assignment is ignored. Initial allocation forced to {}", shard.shardId(), forced); + return forced; + }).orElse(assignment.nodeIds())); + } - final UnassignedInfo.AllocationStatus allocationStatus; - if (assignment == null || assignment.isIgnored(shard.primary())) { - allocationStatus = UnassignedInfo.AllocationStatus.NO_ATTEMPT; - } else if (isThrottled.get()) { - allocationStatus = UnassignedInfo.AllocationStatus.DECIDERS_THROTTLED; + private Iterable getFallbackNodeIds(ShardRouting shard, AtomicBoolean isThrottled) { + return () -> { + if (shard.primary() && isThrottled.get() == false) { + var fallbackNodeIds = allocation.routingNodes().getAllNodeIds(); + logger.debug("Shard [{}] assignment is temporary not possible. Falling back to {}", shard.shardId(), fallbackNodeIds); + return allocationOrdering.sort(fallbackNodeIds).iterator(); } else { - allocationStatus = UnassignedInfo.AllocationStatus.DECIDERS_NO; + return Collections.emptyIterator(); } + }; + } - unassigned.ignoreShard(shard, allocationStatus, allocation.changes()); - if (shard.primary() == false) { - // we could not allocate it and we are a replica - check if we can ignore the other replicas - while (i < primaryLength - 1 && comparator.compare(primary[i], primary[i + 1]) == 0) { - unassigned.ignoreShard(primary[++i], allocationStatus, allocation.changes()); - } + private void moveShards() { + // Iterate over all started shards and check if they can remain. In the presence of throttling shard movements, + // the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are offloading the shards. + for (final var iterator = OrderedShardsIterator.create(routingNodes, moveOrdering); iterator.hasNext();) { + final var shardRouting = iterator.next(); + + if (shardRouting.started() == false) { + // can only move started shards + continue; } - } - primaryLength = secondaryLength; - ShardRouting[] tmp = primary; - primary = secondary; - secondary = tmp; - secondaryLength = 0; - } while (primaryLength > 0); - } - private Iterable getDesiredNodesIds(ShardRouting shard, ShardAssignment assignment) { - return allocationOrdering.sort(allocation.deciders().getForcedInitialShardAllocationToNodes(shard, allocation).map(forced -> { - logger.debug("Shard [{}] assignment is ignored. Initial allocation forced to {}", shard.shardId(), forced); - return forced; - }).orElse(assignment.nodeIds())); - } + final var assignment = desiredBalance.getAssignment(shardRouting.shardId()); + if (assignment == null) { + // balance is not computed + continue; + } - private Iterable getFallbackNodeIds(ShardRouting shard, AtomicBoolean isThrottled) { - return () -> { - if (shard.primary() && isThrottled.get() == false) { - var fallbackNodeIds = allocation.routingNodes().getAllNodeIds(); - logger.debug("Shard [{}] assignment is temporary not possible. Falling back to {}", shard.shardId(), fallbackNodeIds); - return allocationOrdering.sort(fallbackNodeIds).iterator(); - } else { - return Collections.emptyIterator(); - } - }; - } + if (assignment.nodeIds().contains(shardRouting.currentNodeId())) { + // shard is already on a desired node + continue; + } - private void moveShards() { - // Iterate over all started shards and check if they can remain. In the presence of throttling shard movements, - // the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are offloading the shards. - for (final var iterator = OrderedShardsIterator.create(routingNodes, moveOrdering); iterator.hasNext();) { - final var shardRouting = iterator.next(); + if (allocation.deciders().canAllocate(shardRouting, allocation).type() != Decision.Type.YES) { + // cannot allocate anywhere, no point in looking for a target node + continue; + } - if (shardRouting.started() == false) { - // can only move started shards - continue; - } + final var routingNode = routingNodes.node(shardRouting.currentNodeId()); + final var canRemainDecision = allocation.deciders().canRemain(shardRouting, routingNode, allocation); + if (canRemainDecision.type() != Decision.Type.NO) { + // it's desired elsewhere but technically it can remain on its current node. Defer its movement until later on to give + // priority to shards that _must_ move. + continue; + } - final var assignment = desiredBalance.getAssignment(shardRouting.shardId()); - if (assignment == null) { - // balance is not computed - continue; + final var moveTarget = findRelocationTarget(shardRouting, assignment.nodeIds()); + if (moveTarget != null) { + logger.debug("Moving shard {} from {} to {}", shardRouting.shardId(), shardRouting.currentNodeId(), moveTarget.getId()); + routingNodes.relocateOrReinitializeShard( + shardRouting, + moveTarget.getId(), + allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), + allocation.changes() + ); + iterator.dePrioritizeNode(shardRouting.currentNodeId()); + moveOrdering.recordAllocation(shardRouting.currentNodeId()); + } } + } - if (assignment.nodeIds().contains(shardRouting.currentNodeId())) { - // shard is already on a desired node - continue; + private void balance() { + if (allocation.deciders().canRebalance(allocation).type() != Decision.Type.YES) { + return; } - if (allocation.deciders().canAllocate(shardRouting, allocation).type() != Decision.Type.YES) { - // cannot allocate anywhere, no point in looking for a target node - continue; - } + long allAllocations = 0; + long undesiredAllocations = 0; - final var routingNode = routingNodes.node(shardRouting.currentNodeId()); - final var canRemainDecision = allocation.deciders().canRemain(shardRouting, routingNode, allocation); - if (canRemainDecision.type() != Decision.Type.NO) { - // it's desired elsewhere but technically it can remain on its current node. Defer its movement until later on to give - // priority to shards that _must_ move. - continue; - } + // Iterate over all started shards and try to move any which are on undesired nodes. In the presence of throttling shard + // movements, the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are offloading the + // shards. + for (final var iterator = OrderedShardsIterator.create(routingNodes, moveOrdering); iterator.hasNext();) { + final var shardRouting = iterator.next(); - final var moveTarget = findRelocationTarget(shardRouting, assignment.nodeIds()); - if (moveTarget != null) { - logger.debug("Moving shard {} from {} to {}", shardRouting.shardId(), shardRouting.currentNodeId(), moveTarget.getId()); - routingNodes.relocateOrReinitializeShard( - shardRouting, - moveTarget.getId(), - allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), - allocation.changes() - ); - iterator.dePrioritizeNode(shardRouting.currentNodeId()); - moveOrdering.recordAllocation(shardRouting.currentNodeId()); - } - } - } + allAllocations++; - private void balance() { - if (allocation.deciders().canRebalance(allocation).type() != Decision.Type.YES) { - return; - } + if (shardRouting.started() == false) { + // can only rebalance started shards + continue; + } - // Iterate over all started shards and try to move any which are on undesired nodes. In the presence of throttling shard movements, - // the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are offloading the shards. - for (final var iterator = OrderedShardsIterator.create(routingNodes, moveOrdering); iterator.hasNext();) { - final var shardRouting = iterator.next(); + final var assignment = desiredBalance.getAssignment(shardRouting.shardId()); + if (assignment == null) { + // balance is not computed + continue; + } - if (shardRouting.started() == false) { - // can only rebalance started shards - continue; - } + if (assignment.nodeIds().contains(shardRouting.currentNodeId())) { + // shard is already on a desired node + continue; + } - final var assignment = desiredBalance.getAssignment(shardRouting.shardId()); - if (assignment == null) { - // balance is not computed - continue; - } + undesiredAllocations++; - if (assignment.nodeIds().contains(shardRouting.currentNodeId())) { - // shard is already on a desired node - continue; - } + if (allocation.deciders().canRebalance(shardRouting, allocation).type() != Decision.Type.YES) { + // rebalancing disabled for this shard + continue; + } - if (allocation.deciders().canRebalance(shardRouting, allocation).type() != Decision.Type.YES) { - // rebalancing disabled for this shard - continue; - } + if (allocation.deciders().canAllocate(shardRouting, allocation).type() != Decision.Type.YES) { + // cannot allocate anywhere, no point in looking for a target node + continue; + } - if (allocation.deciders().canAllocate(shardRouting, allocation).type() != Decision.Type.YES) { - // cannot allocate anywhere, no point in looking for a target node - continue; + final var rebalanceTarget = findRelocationTarget(shardRouting, assignment.nodeIds(), this::decideCanAllocate); + if (rebalanceTarget != null) { + logger.debug( + "Rebalancing shard {} from {} to {}", + shardRouting.shardId(), + shardRouting.currentNodeId(), + rebalanceTarget.getId() + ); + + routingNodes.relocateOrReinitializeShard( + shardRouting, + rebalanceTarget.getId(), + allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), + allocation.changes() + ); + iterator.dePrioritizeNode(shardRouting.currentNodeId()); + moveOrdering.recordAllocation(shardRouting.currentNodeId()); + } } - final var rebalanceTarget = findRelocationTarget(shardRouting, assignment.nodeIds(), this::decideCanAllocate); - if (rebalanceTarget != null) { - logger.debug( - "Rebalancing shard {} from {} to {}", - shardRouting.shardId(), - shardRouting.currentNodeId(), - rebalanceTarget.getId() - ); + maybeLogUndesiredAllocationsWarning(allAllocations, undesiredAllocations); + } - routingNodes.relocateOrReinitializeShard( - shardRouting, - rebalanceTarget.getId(), - allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), - allocation.changes() + private void maybeLogUndesiredAllocationsWarning(long allAllocations, long undesiredAllocations) { + if (allAllocations > 0 && undesiredAllocations > undesiredAllocationsLogThreshold * allAllocations) { + undesiredAllocationLogInterval.maybeExecute( + () -> logger.warn( + "[{}%] of assigned shards ({}/{}) are not on their desired nodes, which exceeds the warn threshold of [{}%]", + 100.0 * undesiredAllocations / allAllocations, + undesiredAllocations, + allAllocations, + 100.0 * undesiredAllocationsLogThreshold + ) ); - iterator.dePrioritizeNode(shardRouting.currentNodeId()); - moveOrdering.recordAllocation(shardRouting.currentNodeId()); } } - } - private DiscoveryNode findRelocationTarget(final ShardRouting shardRouting, Set desiredNodeIds) { - final var moveDecision = findRelocationTarget(shardRouting, desiredNodeIds, this::decideCanAllocate); - if (moveDecision != null) { - return moveDecision; - } + private DiscoveryNode findRelocationTarget(final ShardRouting shardRouting, Set desiredNodeIds) { + final var moveDecision = findRelocationTarget(shardRouting, desiredNodeIds, this::decideCanAllocate); + if (moveDecision != null) { + return moveDecision; + } - final var shutdown = allocation.metadata().nodeShutdowns().get(shardRouting.currentNodeId()); - final var shardsOnReplacedNode = shutdown != null && shutdown.getType().equals(SingleNodeShutdownMetadata.Type.REPLACE); - if (shardsOnReplacedNode) { - return findRelocationTarget(shardRouting, desiredNodeIds, this::decideCanForceAllocateForVacate); + final var shardsOnReplacedNode = allocation.metadata().nodeShutdowns().contains(shardRouting.currentNodeId(), REPLACE); + if (shardsOnReplacedNode) { + return findRelocationTarget(shardRouting, desiredNodeIds, this::decideCanForceAllocateForVacate); + } + return null; } - return null; - } - private DiscoveryNode findRelocationTarget( - ShardRouting shardRouting, - Set desiredNodeIds, - BiFunction canAllocateDecider - ) { - for (final var nodeId : desiredNodeIds) { - // TODO consider ignored nodes here too? - if (nodeId.equals(shardRouting.currentNodeId())) { - continue; - } - final var node = routingNodes.node(nodeId); - if (node == null) { // node left the cluster while reconciliation is still in progress - continue; - } - final var decision = canAllocateDecider.apply(shardRouting, node); - logger.trace("relocate {} to {}: {}", shardRouting, nodeId, decision); - if (decision.type() == Decision.Type.YES) { - return node.node(); + private DiscoveryNode findRelocationTarget( + ShardRouting shardRouting, + Set desiredNodeIds, + BiFunction canAllocateDecider + ) { + for (final var nodeId : desiredNodeIds) { + // TODO consider ignored nodes here too? + if (nodeId.equals(shardRouting.currentNodeId())) { + continue; + } + final var node = routingNodes.node(nodeId); + if (node == null) { // node left the cluster while reconciliation is still in progress + continue; + } + final var decision = canAllocateDecider.apply(shardRouting, node); + logger.trace("relocate {} to {}: {}", shardRouting, nodeId, decision); + if (decision.type() == Decision.Type.YES) { + return node.node(); + } } - } - return null; - } + return null; + } - private Decision decideCanAllocate(ShardRouting shardRouting, RoutingNode target) { - assert target != null : "Target node is not found"; - return allocation.deciders().canAllocate(shardRouting, target, allocation); - } + private Decision decideCanAllocate(ShardRouting shardRouting, RoutingNode target) { + assert target != null : "Target node is not found"; + return allocation.deciders().canAllocate(shardRouting, target, allocation); + } - private Decision decideCanForceAllocateForVacate(ShardRouting shardRouting, RoutingNode target) { - assert target != null : "Target node is not found"; - return allocation.deciders().canForceAllocateDuringReplace(shardRouting, target, allocation); + private Decision decideCanForceAllocateForVacate(ShardRouting shardRouting, RoutingNode target) { + assert target != null : "Target node is not found"; + return allocation.deciders().canForceAllocateDuringReplace(shardRouting, target, allocation); + } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java index 554b568ceb55f..5d4526c263e33 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java @@ -50,13 +50,12 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator { private final ThreadPool threadPool; private final DesiredBalanceReconcilerAction reconciler; private final DesiredBalanceComputer desiredBalanceComputer; + private final DesiredBalanceReconciler desiredBalanceReconciler; private final ContinuousComputation desiredBalanceComputation; private final PendingListenersQueue queue; private final AtomicLong indexGenerator = new AtomicLong(-1); private final ConcurrentLinkedQueue> pendingDesiredBalanceMoves = new ConcurrentLinkedQueue<>(); private final MasterServiceTaskQueue masterServiceTaskQueue; - private final NodeAllocationOrdering allocationOrdering = new NodeAllocationOrdering(); - private final NodeAllocationOrdering moveOrdering = new NodeAllocationOrdering(); private volatile DesiredBalance currentDesiredBalance = DesiredBalance.INITIAL; private volatile boolean resetCurrentDesiredBalance = false; @@ -100,6 +99,7 @@ public DesiredBalanceShardsAllocator( this.threadPool = threadPool; this.reconciler = reconciler; this.desiredBalanceComputer = desiredBalanceComputer; + this.desiredBalanceReconciler = new DesiredBalanceReconciler(clusterService.getClusterSettings(), threadPool); this.desiredBalanceComputation = new ContinuousComputation<>(threadPool) { @Override @@ -228,13 +228,7 @@ protected void reconcile(DesiredBalance desiredBalance, RoutingAllocation alloca } else { logger.debug("Reconciling desired balance for [{}]", desiredBalance.lastConvergedIndex()); } - var allNodeIds = allocation.routingNodes().getAllNodeIds(); - allocationOrdering.retainNodes(allNodeIds); - moveOrdering.retainNodes(allNodeIds); - recordTime( - cumulativeReconciliationTime, - new DesiredBalanceReconciler(desiredBalance, allocation, allocationOrdering, moveOrdering)::run - ); + recordTime(cumulativeReconciliationTime, () -> desiredBalanceReconciler.reconcile(desiredBalance, allocation)); if (logger.isTraceEnabled()) { logger.trace("Reconciled desired balance: {}", desiredBalance); } else { @@ -287,7 +281,7 @@ private void onNoLongerMaster() { currentDesiredBalance = DesiredBalance.INITIAL; queue.completeAllAsNotMaster(); pendingDesiredBalanceMoves.clear(); - allocationOrdering.clear(); + desiredBalanceReconciler.clear(); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/FrequencyCappedAction.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/FrequencyCappedAction.java new file mode 100644 index 0000000000000..3c827923ebefa --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/FrequencyCappedAction.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing.allocation.allocator; + +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.function.LongSupplier; + +/** + * Execute an action at most once per time interval + */ +public class FrequencyCappedAction { + + private final LongSupplier currentTimeMillisSupplier; + private TimeValue minInterval; + + private long next = -1; + + public FrequencyCappedAction(ThreadPool threadPool) { + this(threadPool::relativeTimeInMillis); + } + + public FrequencyCappedAction(LongSupplier currentTimeMillisSupplier) { + this.currentTimeMillisSupplier = currentTimeMillisSupplier; + this.minInterval = TimeValue.MAX_VALUE; + } + + public void setMinInterval(TimeValue minInterval) { + this.minInterval = minInterval; + } + + public void maybeExecute(Runnable runnable) { + var current = currentTimeMillisSupplier.getAsLong(); + if (current >= next) { + next = current + minInterval.millis(); + runnable.run(); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeReplacementAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeReplacementAllocationDecider.java index 56e9b633ba091..ce3357f74b980 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeReplacementAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeReplacementAllocationDecider.java @@ -15,8 +15,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import java.util.Optional; - public class NodeReplacementAllocationDecider extends AllocationDecider { public static final String NAME = "node_replacement"; @@ -36,8 +34,8 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing if (replacementOngoing(allocation) == false) { return YES__NO_REPLACEMENTS; } else if (replacementFromSourceToTarget(allocation, shardRouting.currentNodeId(), node.node().getName())) { - return Decision.single( - Decision.Type.YES, + return allocation.decision( + Decision.YES, NAME, "node [%s] is replacing node [%s], and may receive shards from it", shardRouting.currentNodeId(), @@ -50,16 +48,16 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing return YES__RECONCILING; } - return Decision.single( - Decision.Type.NO, + return allocation.decision( + Decision.NO, NAME, "node [%s] is being replaced, and its shards may only be allocated to the replacement target [%s]", shardRouting.currentNodeId(), getReplacementName(allocation, shardRouting.currentNodeId()) ); } else if (isReplacementSource(allocation, node.nodeId())) { - return Decision.single( - Decision.Type.NO, + return allocation.decision( + Decision.NO, NAME, "node [%s] is being replaced by [%s], so no data may be allocated to it", node.nodeId(), @@ -75,8 +73,8 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing } final SingleNodeShutdownMetadata shutdown = allocation.replacementTargetShutdowns().get(node.node().getName()); - return Decision.single( - Decision.Type.NO, + return allocation.decision( + Decision.NO, NAME, "node [%s] is replacing the vacating node [%s], only data currently allocated to the source node " + "may be allocated to it until the replacement is complete", @@ -94,8 +92,8 @@ public Decision canRemain(IndexMetadata indexMetadata, ShardRouting shardRouting if (replacementOngoing(allocation) == false) { return YES__NO_REPLACEMENTS; } else if (isReplacementSource(allocation, node.nodeId())) { - return Decision.single( - Decision.Type.NO, + return allocation.decision( + Decision.NO, NAME, "node [%s] is being replaced by node [%s], so no data may remain on it", node.nodeId(), @@ -112,8 +110,8 @@ public Decision shouldAutoExpandToNode(IndexMetadata indexMetadata, DiscoveryNod return YES__NO_REPLACEMENTS; } else if (isReplacementTargetName(allocation, node.getName())) { final SingleNodeShutdownMetadata shutdown = allocation.replacementTargetShutdowns().get(node.getName()); - return Decision.single( - Decision.Type.NO, + return allocation.decision( + Decision.NO, NAME, "node [%s] is a node replacement target for node [%s], " + "shards cannot auto expand to be on it until the replacement is complete", @@ -121,8 +119,8 @@ public Decision shouldAutoExpandToNode(IndexMetadata indexMetadata, DiscoveryNod shutdown == null ? null : shutdown.getNodeId() ); } else if (isReplacementSource(allocation, node.getId())) { - return Decision.single( - Decision.Type.NO, + return allocation.decision( + Decision.NO, NAME, "node [%s] is being replaced by [%s], shards cannot auto expand to be on it", node.getId(), @@ -136,16 +134,16 @@ public Decision shouldAutoExpandToNode(IndexMetadata indexMetadata, DiscoveryNod @Override public Decision canForceAllocateDuringReplace(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { if (replacementFromSourceToTarget(allocation, shardRouting.currentNodeId(), node.node().getName())) { - return Decision.single( - Decision.Type.YES, + return allocation.decision( + Decision.YES, NAME, "node [%s] is being replaced by node [%s], and can be force vacated to the target", shardRouting.currentNodeId(), node.nodeId() ); } else { - return Decision.single( - Decision.Type.NO, + return allocation.decision( + Decision.NO, NAME, "shard is not on the source of a node replacement relocated to the replacement target" ); @@ -155,8 +153,8 @@ public Decision canForceAllocateDuringReplace(ShardRouting shardRouting, Routing @Override public Decision canAllocateReplicaWhenThereIsRetentionLease(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { if (isReplacementTargetName(allocation, node.node().getName())) { - return Decision.single( - Decision.Type.YES, + return allocation.decision( + Decision.YES, NAME, "node [%s] is a node replacement target and can have a previously allocated replica re-allocated to it", node.nodeId() @@ -183,11 +181,8 @@ private static boolean replacementFromSourceToTarget(RoutingAllocation allocatio if (sourceNodeId == null || targetNodeName == null) { return false; } - final SingleNodeShutdownMetadata shutdown = allocation.metadata().nodeShutdowns().get(sourceNodeId); - return shutdown != null - && shutdown.getType().equals(SingleNodeShutdownMetadata.Type.REPLACE) - && shutdown.getNodeId().equals(sourceNodeId) - && shutdown.getTargetNodeName().equals(targetNodeName); + var shutdown = allocation.metadata().nodeShutdowns().get(sourceNodeId, SingleNodeShutdownMetadata.Type.REPLACE); + return shutdown != null && shutdown.getTargetNodeName().equals(targetNodeName); } /** @@ -197,8 +192,7 @@ private static boolean isReplacementSource(RoutingAllocation allocation, String if (nodeId == null || replacementOngoing(allocation) == false) { return false; } - final SingleNodeShutdownMetadata shutdown = allocation.metadata().nodeShutdowns().get(nodeId); - return shutdown != null && shutdown.getType().equals(SingleNodeShutdownMetadata.Type.REPLACE); + return allocation.metadata().nodeShutdowns().contains(nodeId, SingleNodeShutdownMetadata.Type.REPLACE); } /** @@ -215,9 +209,7 @@ private static String getReplacementName(RoutingAllocation allocation, String no if (nodeIdBeingReplaced == null || replacementOngoing(allocation) == false) { return null; } - return Optional.ofNullable(allocation.metadata().nodeShutdowns().get(nodeIdBeingReplaced)) - .filter(shutdown -> shutdown.getType().equals(SingleNodeShutdownMetadata.Type.REPLACE)) - .map(SingleNodeShutdownMetadata::getTargetNodeName) - .orElse(null); + var metadata = allocation.metadata().nodeShutdowns().get(nodeIdBeingReplaced, SingleNodeShutdownMetadata.Type.REPLACE); + return metadata != null ? metadata.getTargetNodeName() : null; } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDecider.java index f31671ca32bd2..1d22d8262783b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDecider.java @@ -57,7 +57,7 @@ public Decision shouldAutoExpandToNode(IndexMetadata indexMetadata, DiscoveryNod } private static Decision getDecision(RoutingAllocation allocation, String nodeId) { - final var nodeShutdowns = allocation.metadata().nodeShutdowns(); + final var nodeShutdowns = allocation.metadata().nodeShutdowns().getAll(); if (nodeShutdowns.isEmpty()) { return YES_EMPTY_SHUTDOWN_METADATA; } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDecider.java index 97d4beb7e3daf..fb5ff3c8411f0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDecider.java @@ -39,7 +39,7 @@ public Decision canAllocate(final ShardRouting shardRouting, final RoutingAlloca return allocation.decision(Decision.YES, NAME, "not an API-level restore"); } - final RestoreInProgress restoresInProgress = allocation.custom(RestoreInProgress.TYPE); + final RestoreInProgress restoresInProgress = allocation.getClusterState().custom(RestoreInProgress.TYPE); if (restoresInProgress != null) { RestoreInProgress.Entry restoreInProgress = restoresInProgress.get(source.restoreUUID()); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java index b5d3276be4b9e..ab9989c8dad2c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java @@ -59,7 +59,7 @@ private static Decision canMove(ShardRouting shardRouting, RoutingAllocation all return YES_NOT_SNAPSHOTTED; } - SnapshotsInProgress snapshotsInProgress = allocation.custom(SnapshotsInProgress.TYPE); + SnapshotsInProgress snapshotsInProgress = allocation.getClusterState().custom(SnapshotsInProgress.TYPE); if (snapshotsInProgress == null || snapshotsInProgress.isEmpty()) { // Snapshots are not running return YES_NOT_RUNNING; diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java b/server/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java index d16e927efc973..594e414c3a7ed 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java @@ -12,12 +12,12 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.index.Fields; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryVisitor; import org.apache.lucene.search.similarities.ClassicSimilarity; @@ -116,12 +116,12 @@ public boolean equals(Object obj) { } @Override - public Query rewrite(IndexReader reader) throws IOException { - Query rewritten = super.rewrite(reader); + public Query rewrite(IndexSearcher searcher) throws IOException { + Query rewritten = super.rewrite(searcher); if (rewritten != this) { return rewritten; } - XMoreLikeThis mlt = new XMoreLikeThis(reader, similarity == null ? new ClassicSimilarity() : similarity); + XMoreLikeThis mlt = new XMoreLikeThis(searcher.getIndexReader(), similarity == null ? new ClassicSimilarity() : similarity); mlt.setFieldNames(moreLikeFields); mlt.setAnalyzer(analyzer); diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java b/server/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java index 376ed6b2626b5..545fbf6cb7b59 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java @@ -14,6 +14,7 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.Query; @@ -129,8 +130,8 @@ public int[] getPositions() { } @Override - public Query rewrite(IndexReader reader) throws IOException { - Query rewritten = super.rewrite(reader); + public Query rewrite(IndexSearcher searcher) throws IOException { + Query rewritten = super.rewrite(searcher); if (rewritten != this) { return rewritten; } @@ -147,7 +148,7 @@ public Query rewrite(IndexReader reader) throws IOException { int position = positions.get(sizeMinus1); Set terms = new HashSet<>(); for (Term term : suffixTerms) { - getPrefixTerms(terms, term, reader); + getPrefixTerms(terms, term, searcher.getIndexReader()); if (terms.size() > maxExpansions) { break; } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java index 9b913aebb9594..aed11297d4285 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java @@ -77,7 +77,7 @@ protected int doHashCode() { @Override protected ScoreFunction rewrite(IndexReader reader) throws IOException { - Query newFilter = filter.rewrite(reader); + Query newFilter = filter.rewrite(new IndexSearcher(reader)); if (newFilter == filter) { return this; } @@ -201,16 +201,16 @@ public void visit(QueryVisitor visitor) { } @Override - public Query rewrite(IndexReader reader) throws IOException { - Query rewritten = super.rewrite(reader); + public Query rewrite(IndexSearcher searcher) throws IOException { + Query rewritten = super.rewrite(searcher); if (rewritten != this) { return rewritten; } - Query newQ = subQuery.rewrite(reader); + Query newQ = subQuery.rewrite(searcher); ScoreFunction[] newFunctions = new ScoreFunction[functions.length]; boolean needsRewrite = (newQ != subQuery); for (int i = 0; i < functions.length; i++) { - newFunctions[i] = functions[i].rewrite(reader); + newFunctions[i] = functions[i].rewrite(searcher.getIndexReader()); needsRewrite |= (newFunctions[i] != functions[i]); } if (needsRewrite) { diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreQuery.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreQuery.java index ee35a9a25af1a..77154eda0a649 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreQuery.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreQuery.java @@ -8,7 +8,6 @@ package org.elasticsearch.common.lucene.search.function; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BulkScorer; @@ -69,12 +68,12 @@ public ScriptScoreQuery( } @Override - public Query rewrite(IndexReader reader) throws IOException { - Query newQ = subQuery.rewrite(reader); + public Query rewrite(IndexSearcher searcher) throws IOException { + Query newQ = subQuery.rewrite(searcher); if (newQ != subQuery) { return new ScriptScoreQuery(newQ, script, scriptBuilder, lookup, minScore, indexName, shardId, indexVersion); } - return super.rewrite(reader); + return super.rewrite(searcher); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index eb6a81d87a688..1c077e2c3c203 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -43,6 +43,7 @@ import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceComputer; +import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceReconciler; import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ConcurrentRebalanceAllocationDecider; @@ -213,6 +214,8 @@ public void apply(Settings value, Settings current, Settings previous) { BalancedShardsAllocator.DISK_USAGE_BALANCE_FACTOR_SETTING, BalancedShardsAllocator.THRESHOLD_SETTING, DesiredBalanceComputer.PROGRESS_LOG_INTERVAL_SETTING, + DesiredBalanceReconciler.UNDESIRED_ALLOCATIONS_LOG_INTERVAL_SETTING, + DesiredBalanceReconciler.UNDESIRED_ALLOCATIONS_LOG_THRESHOLD_SETTING, BreakerSettings.CIRCUIT_BREAKER_LIMIT_SETTING, BreakerSettings.CIRCUIT_BREAKER_OVERHEAD_SETTING, BreakerSettings.CIRCUIT_BREAKER_TYPE, diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java index a9f6a5ac3948b..58b67b00b6f30 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java @@ -26,22 +26,6 @@ public final class KeyedLock { private final ConcurrentMap map = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(); - private final boolean fair; - - /** - * Creates a new lock - * @param fair Use fair locking, ie threads get the lock in the order they requested it - */ - public KeyedLock(boolean fair) { - this.fair = fair; - } - - /** - * Creates a non-fair lock - */ - public KeyedLock() { - this(false); - } /** * Acquires a lock for the given key. The key is compared by it's equals method not by object identity. The lock can be acquired @@ -56,7 +40,6 @@ public Releasable acquire(T key) { return newLock; } } else { - assert perNodeLock != null; int i = perNodeLock.count.get(); if (i > 0 && perNodeLock.count.compareAndSet(i, i + 1)) { perNodeLock.lock(); @@ -90,7 +73,7 @@ public Releasable tryAcquire(T key) { } private ReleasableLock tryCreateNewLock(T key) { - KeyLock newLock = new KeyLock(fair); + KeyLock newLock = new KeyLock(); newLock.lock(); KeyLock keyLock = map.putIfAbsent(key, newLock); if (keyLock == null) { @@ -120,10 +103,9 @@ private void release(T key, KeyLock lock) { assert decrementAndGet >= 0 : decrementAndGet + " must be >= 0 but wasn't"; } - private final class ReleasableLock implements Releasable { + private final class ReleasableLock extends AtomicBoolean implements Releasable { final T key; final KeyLock lock; - final AtomicBoolean closed = new AtomicBoolean(); private ReleasableLock(T key, KeyLock lock) { this.key = key; @@ -132,7 +114,7 @@ private ReleasableLock(T key, KeyLock lock) { @Override public void close() { - if (closed.compareAndSet(false, true)) { + if (compareAndSet(false, true)) { release(key, lock); } } @@ -140,8 +122,8 @@ public void close() { @SuppressWarnings("serial") private static final class KeyLock extends ReentrantLock { - KeyLock(boolean fair) { - super(fair); + KeyLock() { + super(); } private final AtomicInteger count = new AtomicInteger(1); diff --git a/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java b/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java index 197bd4aaa8f83..c52f69d232c87 100644 --- a/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java @@ -16,7 +16,6 @@ import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -187,8 +186,8 @@ void abortTaskIfApplicable(String reason) { } private boolean isNodeShuttingDown(ClusterChangedEvent event, String nodeId) { - return NodesShutdownMetadata.isNodeShuttingDown(event.previousState(), nodeId) == false - && NodesShutdownMetadata.isNodeShuttingDown(event.state(), nodeId); + return event.previousState().metadata().nodeShutdowns().contains(nodeId) == false + && event.state().metadata().nodeShutdowns().contains(nodeId); } public static List getNamedXContentParsers() { diff --git a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java index c77d4f3cc227b..bb76514dddd61 100644 --- a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java @@ -91,6 +91,7 @@ public abstract class AbstractHttpServerTransport extends AbstractLifecycleCompo private final HttpTracer httpLogger; private final Tracer tracer; + private volatile boolean gracefullyCloseConnections; private volatile long slowLogThresholdMs; @@ -454,7 +455,8 @@ private void handleIncomingRequest(final HttpRequest httpRequest, final HttpChan threadContext, corsHandler, maybeHttpLogger, - tracer + tracer, + gracefullyCloseConnections ); } catch (final IllegalArgumentException e) { badRequestCause = ExceptionsHelper.useOrSuppress(badRequestCause, e); @@ -468,7 +470,8 @@ private void handleIncomingRequest(final HttpRequest httpRequest, final HttpChan threadContext, corsHandler, httpLogger, - tracer + tracer, + gracefullyCloseConnections ); } channel = innerChannel; @@ -510,4 +513,8 @@ private static ActionListener earlyResponseListener(HttpRequest request, H public ThreadPool getThreadPool() { return threadPool; } + + public void gracefullyCloseConnections() { + gracefullyCloseConnections = true; + } } diff --git a/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java b/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java index 6fc6e7eb3ffbc..2b4e1fdc1d58c 100644 --- a/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java +++ b/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java @@ -56,6 +56,7 @@ public class DefaultRestChannel extends AbstractRestChannel implements RestChann private final HttpChannel httpChannel; private final CorsHandler corsHandler; private final Tracer tracer; + private final boolean closeConnection; @Nullable private final HttpTracer httpLogger; @@ -69,7 +70,8 @@ public class DefaultRestChannel extends AbstractRestChannel implements RestChann ThreadContext threadContext, CorsHandler corsHandler, @Nullable HttpTracer httpLogger, - Tracer tracer + Tracer tracer, + boolean closeConnection ) { super(request, settings.detailedErrorsEnabled()); this.httpChannel = httpChannel; @@ -80,6 +82,7 @@ public class DefaultRestChannel extends AbstractRestChannel implements RestChann this.corsHandler = corsHandler; this.httpLogger = httpLogger; this.tracer = tracer; + this.closeConnection = closeConnection; } @Override @@ -95,7 +98,7 @@ public void sendResponse(RestResponse restResponse) { final SpanId spanId = SpanId.forRestRequest(request); final ArrayList toClose = new ArrayList<>(4); - if (HttpUtils.shouldCloseConnection(httpRequest)) { + if (HttpUtils.shouldCloseConnection(httpRequest) || closeConnection) { toClose.add(() -> CloseableChannel.closeChannel(httpChannel)); } toClose.add(() -> tracer.stopTrace(request)); @@ -159,6 +162,9 @@ public void sendResponse(RestResponse restResponse) { // Add all custom headers addCustomHeaders(httpResponse, restResponse.getHeaders()); addCustomHeaders(httpResponse, restResponse.filterHeaders(threadContext.getResponseHeaders())); + if (closeConnection) { + setHeaderField(httpResponse, CONNECTION, CLOSE); + } // If our response doesn't specify a content-type header, set one setHeaderField(httpResponse, CONTENT_TYPE, restResponse.contentType(), false); diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index 4be3cf1f47d54..41ac48f8a8700 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -55,6 +55,7 @@ import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.query.SearchIndexNameMatcher; import org.elasticsearch.index.seqno.RetentionLeaseSyncer; +import org.elasticsearch.index.shard.GlobalCheckpointSyncer; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardClosedException; @@ -412,7 +413,7 @@ private long getAvgShardSizeInBytes() throws IOException { public synchronized IndexShard createShard( final ShardRouting routing, - final Consumer globalCheckpointSyncer, + final GlobalCheckpointSyncer globalCheckpointSyncer, final RetentionLeaseSyncer retentionLeaseSyncer ) throws IOException { Objects.requireNonNull(retentionLeaseSyncer); @@ -520,7 +521,7 @@ public synchronized IndexShard createShard( engineWarmer, searchOperationListeners, indexingOperationListeners, - () -> globalCheckpointSyncer.accept(shardId), + globalCheckpointSyncer, retentionLeaseSyncer, circuitBreakerService, snapshotCommitSupplier, diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 8901b1ded7d38..5ea2d08b8a709 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -758,6 +758,11 @@ public enum SearcherScope { */ public abstract void asyncEnsureTranslogSynced(Translog.Location location, Consumer listener); + /** + * Ensures that the global checkpoint has been persisted to the underlying storage. + */ + public abstract void asyncEnsureGlobalCheckpointSynced(long globalCheckpoint, Consumer listener); + public abstract void syncTranslog() throws IOException; /** diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 510de269b577d..24c2242708ead 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -179,7 +179,7 @@ public class InternalEngine extends Engine { private final SoftDeletesPolicy softDeletesPolicy; private final LastRefreshedCheckpointListener lastRefreshedCheckpointListener; private final FlushListeners flushListener; - private final AsyncIOProcessor translogSyncProcessor; + private final AsyncIOProcessor> translogSyncProcessor; private final CompletionStatsCache completionStatsCache; @@ -214,6 +214,9 @@ public class InternalEngine extends Engine { private final ByteSizeValue totalDiskSpace; + protected static final String REAL_TIME_GET_REFRESH_SOURCE = "realtime_get"; + protected static final String UNSAFE_VERSION_MAP_REFRESH_SOURCE = "unsafe_version_map"; + public InternalEngine(EngineConfig engineConfig) { this(engineConfig, IndexWriter.MAX_DOCS, LocalCheckpointTracker::new); } @@ -614,12 +617,23 @@ public boolean isTranslogSyncNeeded() { return getTranslog().syncNeeded(); } - private AsyncIOProcessor createTranslogSyncProcessor(Logger logger, ThreadContext threadContext) { + private AsyncIOProcessor> createTranslogSyncProcessor(Logger logger, ThreadContext threadContext) { return new AsyncIOProcessor<>(logger, 1024, threadContext) { @Override - protected void write(List>> candidates) throws IOException { + protected void write(List, Consumer>> candidates) throws IOException { try { - final boolean synced = translog.ensureSynced(candidates.stream().map(Tuple::v1)); + Translog.Location location = Translog.Location.EMPTY; + long processGlobalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; + for (Tuple, Consumer> syncMarkers : candidates) { + Tuple marker = syncMarkers.v1(); + long globalCheckpointToSync = marker.v1(); + if (globalCheckpointToSync != SequenceNumbers.UNASSIGNED_SEQ_NO) { + processGlobalCheckpoint = SequenceNumbers.max(processGlobalCheckpoint, globalCheckpointToSync); + } + location = location.compareTo(marker.v2()) >= 0 ? location : marker.v2(); + } + + final boolean synced = translog.ensureSynced(location, processGlobalCheckpoint); if (synced) { revisitIndexDeletionPolicyOnTranslogSynced(); } @@ -636,7 +650,12 @@ protected void write(List>> candida @Override public void asyncEnsureTranslogSynced(Translog.Location location, Consumer listener) { - translogSyncProcessor.put(location, listener); + translogSyncProcessor.put(new Tuple<>(SequenceNumbers.NO_OPS_PERFORMED, location), listener); + } + + @Override + public void asyncEnsureGlobalCheckpointSynced(long globalCheckpoint, Consumer listener) { + translogSyncProcessor.put(new Tuple<>(globalCheckpoint, Translog.Location.EMPTY), listener); } @Override @@ -848,7 +867,7 @@ protected GetResult realtimeGetUnderLock( } } assert versionValue.seqNo >= 0 : versionValue; - refreshIfNeeded("realtime_get", versionValue.seqNo); + refreshIfNeeded(REAL_TIME_GET_REFRESH_SOURCE, versionValue.seqNo); } if (getFromSearcherIfNotInTranslog) { return getFromSearcher(get, acquireSearcher("realtime_get", SearcherScope.INTERNAL, searcherWrapper), false); @@ -960,7 +979,7 @@ private VersionValue getVersionFromMap(BytesRef id) { // map so once we pass this point we can safely lookup from the version map. if (versionMap.isUnsafe()) { lastUnsafeSegmentGenerationForGets.set(lastCommittedSegmentInfos.getGeneration() + 1); - refresh("unsafe_version_map", SearcherScope.INTERNAL, true); + refreshInternalSearcher(UNSAFE_VERSION_MAP_REFRESH_SOURCE, true); } versionMap.enforceSafeAccess(); } @@ -1929,6 +1948,10 @@ public RefreshResult maybeRefresh(String source) throws EngineException { return refresh(source, SearcherScope.EXTERNAL, false); } + protected RefreshResult refreshInternalSearcher(String source, boolean block) throws EngineException { + return refresh(source, SearcherScope.INTERNAL, block); + } + final RefreshResult refresh(String source, SearcherScope scope, boolean block) throws EngineException { // both refresh types will result in an internal refresh but only the external will also // pass the new reader reference to the external reader manager. @@ -3052,7 +3075,7 @@ protected final void refreshIfNeeded(String source, long requestingSeqNo) { if (lastRefreshedCheckpoint() < requestingSeqNo) { synchronized (refreshIfNeededMutex) { if (lastRefreshedCheckpoint() < requestingSeqNo) { - refresh(source, SearcherScope.INTERNAL, true); + refreshInternalSearcher(source, true); } } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index b75d0906debea..71c9e2ed294cc 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -338,6 +338,11 @@ public void asyncEnsureTranslogSynced(Translog.Location location, Consumer listener) { + listener.accept(null); + } + @Override public void syncTranslog() {} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 1a8c2a9252c26..139a4ec3e7d64 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -121,8 +121,7 @@ public enum MergeReason { private final DocumentParser documentParser; private final Version indexVersionCreated; private final MapperRegistry mapperRegistry; - private final MappingParserContext mappingParserContext; - + private final Supplier mappingParserContextSupplier; private volatile DocumentMapper mapper; public MapperService( @@ -164,7 +163,7 @@ public MapperService( this.indexVersionCreated = indexSettings.getIndexVersionCreated(); this.indexAnalyzers = indexAnalyzers; this.mapperRegistry = mapperRegistry; - this.mappingParserContext = new MappingParserContext( + this.mappingParserContextSupplier = () -> new MappingParserContext( similarityService::getSimilarity, type -> mapperRegistry.getMapperParser(type, indexVersionCreated), mapperRegistry.getRuntimeFieldParsers()::get, @@ -176,12 +175,12 @@ public MapperService( indexSettings, idFieldMapper ); - this.documentParser = new DocumentParser(parserConfiguration, this.mappingParserContext); + this.documentParser = new DocumentParser(parserConfiguration, this.mappingParserContextSupplier.get()); Map metadataMapperParsers = mapperRegistry.getMetadataMapperParsers( indexSettings.getIndexVersionCreated() ); this.mappingParser = new MappingParser( - mappingParserContext, + mappingParserContextSupplier, metadataMapperParsers, this::getMetadataMappers, this::resolveDocumentType @@ -197,7 +196,7 @@ public IndexAnalyzers getIndexAnalyzers() { } public MappingParserContext parserContext() { - return this.mappingParserContext; + return mappingParserContextSupplier.get(); } /** @@ -209,6 +208,7 @@ public DocumentParser documentParser() { } Map, MetadataFieldMapper> getMetadataMappers() { + final MappingParserContext mappingParserContext = parserContext(); final DocumentMapper existingMapper = mapper; final Map metadataMapperParsers = mapperRegistry.getMetadataMapperParsers( indexSettings.getIndexVersionCreated() @@ -216,7 +216,7 @@ Map, MetadataFieldMapper> getMetadataMapper Map, MetadataFieldMapper> metadataMappers = new LinkedHashMap<>(); if (existingMapper == null) { for (MetadataFieldMapper.TypeParser parser : metadataMapperParsers.values()) { - MetadataFieldMapper metadataFieldMapper = parser.getDefault(parserContext()); + MetadataFieldMapper metadataFieldMapper = parser.getDefault(mappingParserContext); // A MetadataFieldMapper may choose to not be added to the metadata mappers // of an index (eg TimeSeriesIdFieldMapper is only added to time series indices) // In this case its TypeParser will return null instead of the MetadataFieldMapper diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java b/server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java index c6b780a94c552..9cfe6ea1c5410 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java @@ -25,18 +25,18 @@ * Parser for {@link Mapping} provided in {@link CompressedXContent} format */ public final class MappingParser { - private final MappingParserContext mappingParserContext; + private final Supplier mappingParserContextSupplier; private final Supplier, MetadataFieldMapper>> metadataMappersSupplier; private final Map metadataMapperParsers; private final Function documentTypeResolver; MappingParser( - MappingParserContext mappingParserContext, + Supplier mappingParserContextSupplier, Map metadataMapperParsers, Supplier, MetadataFieldMapper>> metadataMappersSupplier, Function documentTypeResolver ) { - this.mappingParserContext = mappingParserContext; + this.mappingParserContextSupplier = mappingParserContextSupplier; this.metadataMapperParsers = metadataMapperParsers; this.metadataMappersSupplier = metadataMappersSupplier; this.documentTypeResolver = documentTypeResolver; @@ -97,8 +97,9 @@ Mapping parse(@Nullable String type, CompressedXContent source) throws MapperPar } private Mapping parse(String type, Map mapping) throws MapperParsingException { + final MappingParserContext mappingParserContext = mappingParserContextSupplier.get(); - RootObjectMapper.Builder rootObjectMapper = RootObjectMapper.parse(type, mapping, this.mappingParserContext); + RootObjectMapper.Builder rootObjectMapper = RootObjectMapper.parse(type, mapping, mappingParserContext); Map, MetadataFieldMapper> metadataMappers = metadataMappersSupplier.get(); Map meta = null; @@ -118,7 +119,7 @@ private Mapping parse(String type, Map mapping) throws MapperPar } @SuppressWarnings("unchecked") Map fieldNodeMap = (Map) fieldNode; - MetadataFieldMapper metadataFieldMapper = typeParser.parse(fieldName, fieldNodeMap, this.mappingParserContext) + MetadataFieldMapper metadataFieldMapper = typeParser.parse(fieldName, fieldNodeMap, mappingParserContext) .build(MapperBuilderContext.forMetadata()); metadataMappers.put(metadataFieldMapper.getClass(), metadataFieldMapper); assert fieldNodeMap.isEmpty(); @@ -147,7 +148,7 @@ private Mapping parse(String type, Map mapping) throws MapperPar */ meta = Collections.unmodifiableMap(new HashMap<>(removed)); } - if (this.mappingParserContext.indexVersionCreated().isLegacyIndexVersion() == false) { + if (mappingParserContext.indexVersionCreated().isLegacyIndexVersion() == false) { // legacy indices are allowed to have extra definitions that we ignore (we will drop them on import) checkNoRemainingFields(mapping, "Root mapping definition has unsupported parameters: "); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappingParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/MappingParserContext.java index 1776f3a8d96dd..238a115c59d0e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappingParserContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappingParserContext.java @@ -37,6 +37,8 @@ public class MappingParserContext { private final IndexAnalyzers indexAnalyzers; private final IndexSettings indexSettings; private final IdFieldMapper idFieldMapper; + private final long mappingObjectDepthLimit; + private long mappingObjectDepth = 0; public MappingParserContext( Function similarityLookupService, @@ -60,6 +62,7 @@ public MappingParserContext( this.indexAnalyzers = indexAnalyzers; this.indexSettings = indexSettings; this.idFieldMapper = idFieldMapper; + this.mappingObjectDepthLimit = indexSettings.getMappingDepthLimit(); } public IndexAnalyzers getIndexAnalyzers() { @@ -129,6 +132,17 @@ public ScriptCompiler scriptCompiler() { return scriptCompiler; } + void incrementMappingObjectDepth() throws MapperParsingException { + mappingObjectDepth++; + if (mappingObjectDepth > mappingObjectDepthLimit) { + throw new MapperParsingException("Limit of mapping depth [" + mappingObjectDepthLimit + "] has been exceeded"); + } + } + + void decrementMappingObjectDepth() throws MapperParsingException { + mappingObjectDepth--; + } + public MappingParserContext createMultiFieldContext() { return new MultiFieldParserContext(this); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index 23fc86c213f75..49d2468bc80d4 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -182,7 +182,6 @@ public ObjectMapper build(MapperBuilderContext context) { } public static class TypeParser implements Mapper.TypeParser { - @Override public boolean supportsVersion(Version indexCreatedVersion) { return true; @@ -191,6 +190,7 @@ public boolean supportsVersion(Version indexCreatedVersion) { @Override public Mapper.Builder parse(String name, Map node, MappingParserContext parserContext) throws MapperParsingException { + parserContext.incrementMappingObjectDepth(); // throws MapperParsingException if depth limit is exceeded Explicit subobjects = parseSubobjects(node); ObjectMapper.Builder builder = new Builder(name, subobjects); for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { @@ -201,6 +201,7 @@ public Mapper.Builder parse(String name, Map node, MappingParser iterator.remove(); } } + parserContext.decrementMappingObjectDepth(); return builder; } diff --git a/server/src/main/java/org/elasticsearch/index/query/DateRangeIncludingNowQuery.java b/server/src/main/java/org/elasticsearch/index/query/DateRangeIncludingNowQuery.java index 3c1bbefcacca7..449275fecc3c9 100644 --- a/server/src/main/java/org/elasticsearch/index/query/DateRangeIncludingNowQuery.java +++ b/server/src/main/java/org/elasticsearch/index/query/DateRangeIncludingNowQuery.java @@ -8,8 +8,8 @@ package org.elasticsearch.index.query; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryVisitor; @@ -34,7 +34,7 @@ public Query getQuery() { } @Override - public Query rewrite(IndexReader reader) throws IOException { + public Query rewrite(IndexSearcher searcher) throws IOException { return in; } diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java index f92116a483866..84cb6ca4e2a5b 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.fielddata.FieldData; +import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.fielddata.MultiGeoPointValues; @@ -35,7 +36,6 @@ import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.GeoPointFieldMapper.GeoPointFieldType; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -217,15 +217,8 @@ private AbstractDistanceScoreFunction parseVariable( return parseDateVariable(parser, context, fieldType, mode); } else if (fieldType instanceof GeoPointFieldType) { return parseGeoVariable(parser, context, fieldType, mode); - } else if (fieldType instanceof NumberFieldMapper.NumberFieldType) { - return parseNumberVariable(parser, context, fieldType, mode); } else { - throw new ParsingException( - parser.getTokenLocation(), - "field [{}] is of type [{}], but only numeric types are supported.", - fieldName, - fieldType - ); + return parseNumberVariable(parser, context, fieldType, mode); } } @@ -267,8 +260,15 @@ private AbstractDistanceScoreFunction parseNumberVariable( DecayFunctionBuilder.ORIGIN ); } - IndexNumericFieldData numericFieldData = context.getForField(fieldType, MappedFieldType.FielddataOperation.SEARCH); - return new NumericFieldDataScoreFunction(origin, scale, decay, offset, getDecayFunction(), numericFieldData, mode); + + IndexFieldData indexFieldData = context.getForField(fieldType, MappedFieldType.FielddataOperation.SEARCH); + if (indexFieldData instanceof IndexNumericFieldData numericFieldData) { + return new NumericFieldDataScoreFunction(origin, scale, decay, offset, getDecayFunction(), numericFieldData, mode); + } else { + throw new IllegalArgumentException( + "field [" + fieldName + "] is of type [" + fieldType + "], but only numeric types are supported." + ); + } } private AbstractDistanceScoreFunction parseGeoVariable( diff --git a/server/src/main/java/org/elasticsearch/index/search/ESToParentBlockJoinQuery.java b/server/src/main/java/org/elasticsearch/index/search/ESToParentBlockJoinQuery.java index 67ec03b291548..f798ba6270271 100644 --- a/server/src/main/java/org/elasticsearch/index/search/ESToParentBlockJoinQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/ESToParentBlockJoinQuery.java @@ -8,7 +8,6 @@ package org.elasticsearch.index.search; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -54,8 +53,8 @@ public ScoreMode getScoreMode() { } @Override - public Query rewrite(IndexReader reader) throws IOException { - Query innerRewrite = query.rewrite(reader); + public Query rewrite(IndexSearcher searcher) throws IOException { + Query innerRewrite = query.rewrite(searcher); if (innerRewrite != query) { // Right now ToParentBlockJoinQuery always rewrites to a ToParentBlockJoinQuery // so the else block will never be used. It is useful in the case that @@ -69,7 +68,7 @@ public Query rewrite(IndexReader reader) throws IOException { return innerRewrite; } } - return super.rewrite(reader); + return super.rewrite(searcher); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java index d466d0988abfb..782a49ab7228a 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java @@ -62,7 +62,9 @@ public GlobalCheckpointSyncAction( actionFilters, Request::new, Request::new, - ThreadPool.Names.MANAGEMENT + ThreadPool.Names.WRITE, + false, + true ); } @@ -77,24 +79,26 @@ protected void shardOperationOnPrimary( IndexShard indexShard, ActionListener> listener ) { - ActionListener.completeWith(listener, () -> { - maybeSyncTranslog(indexShard); - return new PrimaryResult<>(request, new ReplicationResponse()); - }); + maybeSyncTranslog(indexShard, listener.map(v -> new PrimaryResult<>(request, new ReplicationResponse()))); } @Override protected void shardOperationOnReplica(Request shardRequest, IndexShard replica, ActionListener listener) { - ActionListener.completeWith(listener, () -> { - maybeSyncTranslog(replica); - return new ReplicaResult(); - }); + maybeSyncTranslog(replica, listener.map(v -> new ReplicaResult())); } - private static void maybeSyncTranslog(final IndexShard indexShard) throws IOException { + private static void maybeSyncTranslog(IndexShard indexShard, ActionListener listener) { if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST && indexShard.getLastSyncedGlobalCheckpoint() < indexShard.getLastKnownGlobalCheckpoint()) { - indexShard.sync(); + indexShard.syncGlobalCheckpoint(indexShard.getLastKnownGlobalCheckpoint(), e -> { + if (e == null) { + listener.onResponse(null); + } else { + listener.onFailure(e); + } + }); + } else { + listener.onResponse(null); } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/GlobalCheckpointSyncer.java b/server/src/main/java/org/elasticsearch/index/shard/GlobalCheckpointSyncer.java new file mode 100644 index 0000000000000..7b598794a41db --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/shard/GlobalCheckpointSyncer.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.shard; + +public interface GlobalCheckpointSyncer { + /** + * Synchronize the global checkpoints across the replication group. This is used when indexing traffic stops and the primary's global + * checkpoint reaches the max seqno, because in this state the replicas will have an older global checkpoint as carried by the earlier + * indexing traffic, and may not receive any further updates without the explicit sync that this method triggers. + *

+ * It's also used if {@link org.elasticsearch.index.translog.Translog.Durability#ASYNC} is selected, because in that case indexing + * traffic does not advance the persisted global checkpoint. + *

+ * In production this triggers a {@link org.elasticsearch.index.seqno.GlobalCheckpointSyncAction}. + * + * @param shardId The ID of the shard to synchronize. + */ + void syncGlobalCheckpoints(ShardId shardId); +} diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 8392bb118e351..fd53b07e04013 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -231,11 +231,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl final EngineFactory engineFactory; private final IndexingOperationListener indexingOperationListeners; - private final Runnable globalCheckpointSyncer; - - Runnable getGlobalCheckpointSyncer() { - return globalCheckpointSyncer; - } + private final GlobalCheckpointSyncer globalCheckpointSyncer; private final RetentionLeaseSyncer retentionLeaseSyncer; @@ -307,7 +303,7 @@ public IndexShard( final Engine.Warmer warmer, final List searchOperationListener, final List listeners, - final Runnable globalCheckpointSyncer, + final GlobalCheckpointSyncer globalCheckpointSyncer, final RetentionLeaseSyncer retentionLeaseSyncer, final CircuitBreakerService circuitBreakerService, final IndexStorePlugin.SnapshotCommitSupplier snapshotCommitSupplier, @@ -2749,12 +2745,21 @@ public void maybeSyncGlobalCheckpoint(final String reason) { || trackedGlobalCheckpointsNeedSync; // only sync if index is not closed and there is a shard lagging the primary if (syncNeeded && indexSettings.getIndexMetadata().getState() == IndexMetadata.State.OPEN) { - logger.trace("syncing global checkpoint for [{}]", reason); - globalCheckpointSyncer.run(); + syncGlobalCheckpoints(reason); } } } + private void syncGlobalCheckpoints(String reason) { + logger.trace("syncing global checkpoint for [{}]", reason); + globalCheckpointSyncer.syncGlobalCheckpoints(shardId); + } + + // exposed for tests + GlobalCheckpointSyncer getGlobalCheckpointSyncer() { + return globalCheckpointSyncer; + } + /** * Returns the current replication group for the shard. * @@ -3610,6 +3615,17 @@ public final void sync(Translog.Location location, Consumer syncListe getEngine().asyncEnsureTranslogSynced(location, syncListener); } + /** + * This method provides the same behavior as #sync but for persisting the global checkpoint. It will initiate a sync + * if the request global checkpoint is greater than the currently persisted global checkpoint. However, same as #sync it + * will not ensure that the request global checkpoint is available to be synced. It is the caller's duty to only call this + * method with a valid processed global checkpoint that is available to sync. + */ + public void syncGlobalCheckpoint(long globalCheckpoint, Consumer syncListener) { + verifyNotClosed(); + getEngine().asyncEnsureGlobalCheckpointSynced(globalCheckpoint, syncListener); + } + public void sync() throws IOException { verifyNotClosed(); getEngine().syncTranslog(); diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java b/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java index 5010c281a753d..41adb860817f1 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java @@ -8,6 +8,7 @@ package org.elasticsearch.index.snapshots; +import org.elasticsearch.common.Strings; import org.elasticsearch.repositories.ShardGeneration; import org.elasticsearch.repositories.ShardSnapshotResult; import org.elasticsearch.snapshots.AbortedSnapshotException; @@ -61,7 +62,6 @@ public enum Stage { private long totalSize; private long incrementalSize; private long processedSize; - private long indexVersion; private String failure; private IndexShardSnapshotStatus( @@ -115,18 +115,20 @@ public synchronized Copy moveToStarted( return asCopy(); } - public synchronized Copy moveToFinalize(final long indexVersion) { - if (stage.compareAndSet(Stage.STARTED, Stage.FINALIZE)) { - this.indexVersion = indexVersion; - } else if (isAborted()) { - throw new AbortedSnapshotException(); - } else { - assert false : "Should not try to move stage [" + stage.get() + "] to [FINALIZE]"; - throw new IllegalStateException( - "Unable to move the shard snapshot status to [FINALIZE]: " + "expecting [STARTED] but got [" + stage.get() + "]" - ); - } - return asCopy(); + public synchronized Copy moveToFinalize() { + final var prevStage = stage.compareAndExchange(Stage.STARTED, Stage.FINALIZE); + return switch (prevStage) { + case STARTED -> asCopy(); + case ABORTED -> throw new AbortedSnapshotException(); + default -> { + final var message = Strings.format( + "Unable to move the shard snapshot status to [FINALIZE]: expecting [STARTED] but got [%s]", + prevStage + ); + assert false : message; + throw new IllegalStateException(message); + } + }; } public synchronized void moveToDone(final long endTime, final ShardSnapshotResult shardSnapshotResult) { @@ -206,7 +208,6 @@ public synchronized IndexShardSnapshotStatus.Copy asCopy() { incrementalSize, totalSize, processedSize, - indexVersion, failure ); } @@ -262,7 +263,6 @@ public static class Copy { private final long totalSize; private final long processedSize; private final long incrementalSize; - private final long indexVersion; private final String failure; public Copy( @@ -275,7 +275,6 @@ public Copy( final long incrementalSize, final long totalSize, final long processedSize, - final long indexVersion, final String failure ) { this.stage = stage; @@ -287,7 +286,6 @@ public Copy( this.totalSize = totalSize; this.processedSize = processedSize; this.incrementalSize = incrementalSize; - this.indexVersion = indexVersion; this.failure = failure; } @@ -327,10 +325,6 @@ public long getProcessedSize() { return processedSize; } - public long getIndexVersion() { - return indexVersion; - } - public String getFailure() { return failure; } @@ -356,8 +350,6 @@ public String toString() { + totalSize + ", processedSize=" + processedSize - + ", indexVersion=" - + indexVersion + ", failure='" + failure + '\'' diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java index baf0c9ad08059..543fe09c91aeb 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java @@ -360,8 +360,6 @@ public String toString() { */ private final String snapshot; - private final long indexVersion; - private final long startTime; private final long time; @@ -376,7 +374,6 @@ public String toString() { * Constructs new shard snapshot metadata from snapshot metadata * * @param snapshot snapshot name - * @param indexVersion index version * @param indexFiles list of files in the shard * @param startTime snapshot start time * @param time snapshot running time @@ -385,7 +382,6 @@ public String toString() { */ public BlobStoreIndexShardSnapshot( String snapshot, - long indexVersion, List indexFiles, long startTime, long time, @@ -393,9 +389,7 @@ public BlobStoreIndexShardSnapshot( long incrementalSize ) { assert snapshot != null; - assert indexVersion >= 0; this.snapshot = snapshot; - this.indexVersion = indexVersion; this.indexFiles = List.copyOf(indexFiles); this.startTime = startTime; this.time = time; @@ -412,7 +406,7 @@ public BlobStoreIndexShardSnapshot( * @param time time it took to create the clone */ public BlobStoreIndexShardSnapshot asClone(String targetSnapshotName, long startTime, long time) { - return new BlobStoreIndexShardSnapshot(targetSnapshotName, indexVersion, indexFiles, startTime, time, 0, 0); + return new BlobStoreIndexShardSnapshot(targetSnapshotName, indexFiles, startTime, time, 0, 0); } /** @@ -480,7 +474,6 @@ public static long totalSize(List indexFiles) { } private static final String NAME = "name"; - private static final String INDEX_VERSION = "index_version"; private static final String START_TIME = "start_time"; private static final String TIME = "time"; private static final String FILES = "files"; @@ -490,13 +483,16 @@ public static long totalSize(List indexFiles) { private static final String INCREMENTAL_SIZE = "total_size"; private static final ParseField PARSE_NAME = new ParseField(NAME); - private static final ParseField PARSE_INDEX_VERSION = new ParseField(INDEX_VERSION, "index-version"); private static final ParseField PARSE_START_TIME = new ParseField(START_TIME); private static final ParseField PARSE_TIME = new ParseField(TIME); private static final ParseField PARSE_INCREMENTAL_FILE_COUNT = new ParseField(INCREMENTAL_FILE_COUNT); private static final ParseField PARSE_INCREMENTAL_SIZE = new ParseField(INCREMENTAL_SIZE); private static final ParseField PARSE_FILES = new ParseField(FILES); + // pre-8.9.0 versions included this (unused) field so we must accept its existence + private static final String INDEX_VERSION = "index_version"; + private static final ParseField PARSE_INDEX_VERSION = new ParseField(INDEX_VERSION, "index-version"); + /** * Serializes shard snapshot metadata info into JSON * @@ -506,7 +502,7 @@ public static long totalSize(List indexFiles) { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field(NAME, snapshot); - builder.field(INDEX_VERSION, indexVersion); + builder.field(INDEX_VERSION, 0); // pre-8.9.0 versions require this field to be present and non-negative builder.field(START_TIME, startTime); builder.field(TIME, time); builder.field(INCREMENTAL_FILE_COUNT, incrementalFileCount); @@ -527,7 +523,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws */ public static BlobStoreIndexShardSnapshot fromXContent(XContentParser parser) throws IOException { String snapshot = null; - long indexVersion = -1; long startTime = 0; long time = 0; int incrementalFileCount = 0; @@ -547,8 +542,8 @@ public static BlobStoreIndexShardSnapshot fromXContent(XContentParser parser) th if (PARSE_NAME.match(currentFieldName, parser.getDeprecationHandler())) { snapshot = parser.text(); } else if (PARSE_INDEX_VERSION.match(currentFieldName, parser.getDeprecationHandler())) { - // The index-version is needed for backward compatibility with v 1.0 - indexVersion = parser.longValue(); + // pre-8.9.0 versions included this (unused) field so we must accept its existence + parser.longValue(); } else if (PARSE_START_TIME.match(currentFieldName, parser.getDeprecationHandler())) { startTime = parser.longValue(); } else if (PARSE_TIME.match(currentFieldName, parser.getDeprecationHandler())) { @@ -573,7 +568,6 @@ public static BlobStoreIndexShardSnapshot fromXContent(XContentParser parser) th return new BlobStoreIndexShardSnapshot( snapshot, - indexVersion, indexFiles == null ? List.of() : indexFiles, startTime, time, diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index 011b68b38a9c1..8759f1cf4b9d1 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -51,7 +51,6 @@ import java.util.Iterator; import java.util.List; import java.util.Objects; -import java.util.Optional; import java.util.OptionalLong; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReadWriteLock; @@ -839,15 +838,18 @@ public void trimOperations(long belowTerm, long aboveSeqNo) throws IOException { } /** - * Ensures that the given location has be synced / written to the underlying storage. + * Ensures that the given location and global checkpoint has be synced / written to the underlying storage. * * @return Returns true iff this call caused an actual sync operation otherwise false */ - public boolean ensureSynced(Location location) throws IOException { + public boolean ensureSynced(Location location, long globalCheckpoint) throws IOException { try (ReleasableLock lock = readLock.acquire()) { - if (location.generation == current.getGeneration()) { // if we have a new one it's already synced + // if we have a new generation and the persisted global checkpoint is greater than or equal to the sync global checkpoint it's + // already synced + long persistedGlobalCheckpoint = current.getLastSyncedCheckpoint().globalCheckpoint; + if (location.generation == current.getGeneration() || persistedGlobalCheckpoint < globalCheckpoint) { ensureOpen(); - return current.syncUpTo(location.translogLocation + location.size); + return current.syncUpTo(location.translogLocation + location.size, globalCheckpoint); } } catch (final Exception ex) { closeOnTragicEvent(ex); @@ -856,24 +858,6 @@ public boolean ensureSynced(Location location) throws IOException { return false; } - /** - * Ensures that all locations in the given stream have been synced / written to the underlying storage. - * This method allows for internal optimization to minimize the amount of fsync operations if multiple - * locations must be synced. - * - * @return Returns true iff this call caused an actual sync operation otherwise false - */ - public boolean ensureSynced(Stream locations) throws IOException { - final Optional max = locations.max(Location::compareTo); - // we only need to sync the max location since it will sync all other - // locations implicitly - if (max.isPresent()) { - return ensureSynced(max.get()); - } else { - return false; - } - } - /** * Closes the translog if the current translog writer experienced a tragic exception. * @@ -929,6 +913,8 @@ public TranslogDeletionPolicy getDeletionPolicy() { public static class Location implements Comparable { + public static Location EMPTY = new Location(0, 0, 0); + public final long generation; public final long translogLocation; public final int size; diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index c390ace777d3b..c3715e17efae8 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -346,7 +346,7 @@ synchronized boolean assertNoSeqAbove(long belowTerm, long aboveSeqNo) { * raising the exception. */ public void sync() throws IOException { - syncUpTo(Long.MAX_VALUE); + syncUpTo(Long.MAX_VALUE, SequenceNumbers.UNASSIGNED_SEQ_NO); } /** @@ -462,10 +462,17 @@ private long getWrittenOffset() throws IOException { * * @return true if this call caused an actual sync operation */ - final boolean syncUpTo(long offset) throws IOException { - if (lastSyncedCheckpoint.offset < offset && syncNeeded()) { + final boolean syncUpTo(long offset, long globalCheckpointToPersist) throws IOException { + if ((lastSyncedCheckpoint.offset < offset || lastSyncedCheckpoint.globalCheckpoint < globalCheckpointToPersist) && syncNeeded()) { + assert globalCheckpointToPersist <= globalCheckpointSupplier.getAsLong() + : "globalCheckpointToPersist [" + + globalCheckpointToPersist + + "] greater than global checkpoint [" + + globalCheckpointSupplier.getAsLong() + + "]"; synchronized (syncLock) { // only one sync/checkpoint should happen concurrently but we wait - if (lastSyncedCheckpoint.offset < offset && syncNeeded()) { + if ((lastSyncedCheckpoint.offset < offset || lastSyncedCheckpoint.globalCheckpoint < globalCheckpointToPersist) + && syncNeeded()) { // double checked locking - we don't want to fsync unless we have to and now that we have // the lock we should check again since if this code is busy we might have fsynced enough already final Checkpoint checkpointToSync; diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 600623763dac3..4a4d40b7516d8 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -108,6 +108,7 @@ import org.elasticsearch.index.seqno.RetentionLeaseStats; import org.elasticsearch.index.seqno.RetentionLeaseSyncer; import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.index.shard.GlobalCheckpointSyncer; import org.elasticsearch.index.shard.IllegalIndexShardStateException; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; @@ -848,7 +849,7 @@ public IndexShard createShard( final PeerRecoveryTargetService.RecoveryListener recoveryListener, final RepositoriesService repositoriesService, final Consumer onShardFailure, - final Consumer globalCheckpointSyncer, + final GlobalCheckpointSyncer globalCheckpointSyncer, final RetentionLeaseSyncer retentionLeaseSyncer, final DiscoveryNode targetNode, final DiscoveryNode sourceNode diff --git a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index 30d23f7321594..a81be7fb037f8 100644 --- a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -51,6 +51,7 @@ import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction; import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.RetentionLeaseSyncer; +import org.elasticsearch.index.shard.GlobalCheckpointSyncer; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardClosedException; @@ -1097,7 +1098,7 @@ T createShard( PeerRecoveryTargetService.RecoveryListener recoveryListener, RepositoriesService repositoriesService, Consumer onShardFailure, - Consumer globalCheckpointSyncer, + GlobalCheckpointSyncer globalCheckpointSyncer, RetentionLeaseSyncer retentionLeaseSyncer, DiscoveryNode targetNode, @Nullable DiscoveryNode sourceNode diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index d6ef685b7a0c3..d26cf79594425 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -356,6 +356,11 @@ public Releasable tryAcquireSnapshotDownloadPermits() { return recoverySettings.tryAcquireSnapshotDownloadPermits(); } + // Visible for testing + public int ongoingRecoveryCount() { + return onGoingRecoveries.size(); + } + /** * Prepare the start recovery request. * diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java index 7a0e6985daacb..8ed5f78009e1e 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java @@ -37,6 +37,7 @@ import java.util.Locale; import java.util.Map; +import static org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING; import static org.elasticsearch.common.settings.Setting.parseInt; import static org.elasticsearch.common.unit.ByteSizeValue.ofBytes; import static org.elasticsearch.core.Strings.format; @@ -393,6 +394,7 @@ public Iterator> settings() { private final boolean nodeBandwidthSettingsExist; private volatile int maxConcurrentSnapshotFileDownloads; private volatile int maxConcurrentSnapshotFileDownloadsPerNode; + private volatile int maxConcurrentIncomingRecoveries; private final AdjustableSemaphore maxSnapshotFileDownloadsPerNodeSemaphore; @@ -417,6 +419,7 @@ public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { this.useSnapshotsDuringRecovery = INDICES_RECOVERY_USE_SNAPSHOTS_SETTING.get(settings); this.maxConcurrentSnapshotFileDownloads = INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS.get(settings); this.maxConcurrentSnapshotFileDownloadsPerNode = INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS_PER_NODE.get(settings); + this.maxConcurrentIncomingRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.get(settings); this.maxSnapshotFileDownloadsPerNodeSemaphore = new AdjustableSemaphore(this.maxConcurrentSnapshotFileDownloadsPerNode, true); this.availableNetworkBandwidth = NODE_BANDWIDTH_RECOVERY_NETWORK_SETTING.get(settings); this.availableDiskReadBandwidth = NODE_BANDWIDTH_RECOVERY_DISK_READ_SETTING.get(settings); @@ -466,6 +469,10 @@ public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS_PER_NODE, this::setMaxConcurrentSnapshotFileDownloadsPerNode ); + clusterSettings.addSettingsUpdateConsumer( + CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, + this::setMaxConcurrentIncomingRecoveries + ); } private void computeMaxBytesPerSec(Settings settings) { @@ -652,6 +659,10 @@ public void setMaxConcurrentSnapshotFileDownloads(int maxConcurrentSnapshotFileD this.maxConcurrentSnapshotFileDownloads = maxConcurrentSnapshotFileDownloads; } + private void setMaxConcurrentIncomingRecoveries(int maxConcurrentIncomingRecoveries) { + this.maxConcurrentIncomingRecoveries = maxConcurrentIncomingRecoveries; + } + private void setMaxConcurrentSnapshotFileDownloadsPerNode(int maxConcurrentSnapshotFileDownloadsPerNode) { this.maxConcurrentSnapshotFileDownloadsPerNode = maxConcurrentSnapshotFileDownloadsPerNode; this.maxSnapshotFileDownloadsPerNodeSemaphore.setMaxPermits(maxConcurrentSnapshotFileDownloadsPerNode); @@ -666,16 +677,39 @@ Releasable tryAcquireSnapshotDownloadPermits() { final int maxConcurrentSnapshotFileDownloads = getMaxConcurrentSnapshotFileDownloads(); final boolean permitAcquired = maxSnapshotFileDownloadsPerNodeSemaphore.tryAcquire(maxConcurrentSnapshotFileDownloads); if (permitAcquired == false) { - logger.warn( - String.format( - Locale.ROOT, - "Unable to acquire permit to use snapshot files during recovery, " - + "this recovery will recover index files from the source node. " - + "Ensure snapshot files can be used during recovery by setting [%s] to be no greater than [%d]", - INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS.getKey(), - this.maxConcurrentSnapshotFileDownloadsPerNode - ) - ); + if (this.maxConcurrentIncomingRecoveries <= this.maxConcurrentSnapshotFileDownloadsPerNode) { + logger.warn( + String.format( + Locale.ROOT, + """ + Unable to acquire permit to use snapshot files during recovery, so this recovery will recover index files from \ + the source node. Ensure snapshot files can be used during recovery by setting [%s] to be no greater than [%d]. \ + Current values of [%s] = [%d], [%s] = [%d] + """, + INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS.getKey(), + this.maxConcurrentSnapshotFileDownloadsPerNode / Math.max(1, this.maxConcurrentIncomingRecoveries), + INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS_PER_NODE.getKey(), + this.maxConcurrentSnapshotFileDownloadsPerNode, + CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), + this.maxConcurrentIncomingRecoveries + ) + ); + } else { + logger.warn( + String.format( + Locale.ROOT, + """ + Unable to acquire permit to use snapshot files during recovery, so this recovery will recover index files from \ + the source node. Ensure snapshot files can be used during recovery by reducing [%s] from its current value of \ + [%d] to be no greater than [%d], or disable snapshot-based recovery by setting [%s] to [false] + """, + CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), + this.maxConcurrentIncomingRecoveries, + this.maxConcurrentSnapshotFileDownloadsPerNode, + INDICES_RECOVERY_USE_SNAPSHOTS_SETTING.getKey() + ) + ); + } return null; } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 19f0a42deb1f1..456d988cd6364 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -42,6 +42,7 @@ import org.elasticsearch.index.translog.Translog; import org.elasticsearch.repositories.IndexId; +import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; import java.nio.file.Path; @@ -586,7 +587,19 @@ public void restoreFileFromSnapshot( ) { StoreFileMetadata metadata = fileInfo.metadata(); int readSnapshotFileBufferSize = snapshotFilesProvider.getReadSnapshotFileBufferSizeForRepo(repository); - multiFileWriter.writeFile(metadata, readSnapshotFileBufferSize, inputStream); + multiFileWriter.writeFile(metadata, readSnapshotFileBufferSize, new FilterInputStream(inputStream) { + @Override + public int read() throws IOException { + cancellableThreads.checkForCancel(); + return super.read(); + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + cancellableThreads.checkForCancel(); + return super.read(b, off, len); + } + }); listener.onResponse(null); } catch (Exception e) { logger.debug(() -> format("Unable to recover snapshot file %s from repository %s", fileInfo, repository), e); diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestStats.java b/server/src/main/java/org/elasticsearch/ingest/IngestStats.java index 3e5826bc0f6d2..96c272d6bc6c6 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestStats.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestStats.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -34,6 +35,19 @@ public record IngestStats(Stats totalStats, List pipelineStats, Ma Writeable, ChunkedToXContent { + private static final Comparator PIPELINE_STAT_COMPARATOR = (p1, p2) -> { + final Stats p2Stats = p2.stats; + final Stats p1Stats = p1.stats; + final int ingestTimeCompare = Long.compare(p2Stats.ingestTimeInMillis, p1Stats.ingestTimeInMillis); + if (ingestTimeCompare == 0) { + return Long.compare(p2Stats.ingestCount, p1Stats.ingestCount); + } else { + return ingestTimeCompare; + } + }; + + public static final IngestStats IDENTITY = new IngestStats(Stats.IDENTITY, List.of(), Map.of()); + /** * @param totalStats - The total stats for Ingest. This is logically the sum of all pipeline stats, * and pipeline stats are logically the sum of the processor stats. @@ -41,16 +55,7 @@ public record IngestStats(Stats totalStats, List pipelineStats, Ma * @param processorStats - The per-processor stats for a given pipeline. A map keyed by the pipeline identifier. */ public IngestStats { - pipelineStats = pipelineStats.stream().sorted((p1, p2) -> { - final IngestStats.Stats p2Stats = p2.stats; - final IngestStats.Stats p1Stats = p1.stats; - final int ingestTimeCompare = Long.compare(p2Stats.ingestTimeInMillis, p1Stats.ingestTimeInMillis); - if (ingestTimeCompare == 0) { - return Long.compare(p2Stats.ingestCount, p1Stats.ingestCount); - } else { - return ingestTimeCompare; - } - }).toList(); + pipelineStats = pipelineStats.stream().sorted(PIPELINE_STAT_COMPARATOR).toList(); } /** @@ -153,11 +158,30 @@ public Iterator toXContentChunked(ToXContent.Params outerP ); } + public static IngestStats merge(IngestStats first, IngestStats second) { + return new IngestStats( + Stats.merge(first.totalStats, second.totalStats), + PipelineStat.merge(first.pipelineStats, second.pipelineStats), + merge(first.processorStats, second.processorStats) + ); + } + + static Map> merge(Map> first, Map> second) { + var totalsPerPipelineProcessor = new HashMap>(); + + first.forEach((pipelineId, stats) -> totalsPerPipelineProcessor.merge(pipelineId, stats, ProcessorStat::merge)); + second.forEach((pipelineId, stats) -> totalsPerPipelineProcessor.merge(pipelineId, stats, ProcessorStat::merge)); + + return totalsPerPipelineProcessor; + } + public record Stats(long ingestCount, long ingestTimeInMillis, long ingestCurrent, long ingestFailedCount) implements Writeable, ToXContentFragment { + public static final Stats IDENTITY = new Stats(0, 0, 0, 0); + /** * Read from a stream. */ @@ -181,6 +205,15 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("failed", ingestFailedCount); return builder; } + + static Stats merge(Stats first, Stats second) { + return new Stats( + first.ingestCount + second.ingestCount, + first.ingestTimeInMillis + second.ingestTimeInMillis, + first.ingestCurrent + second.ingestCurrent, + first.ingestFailedCount + second.ingestFailedCount + ); + } } /** @@ -216,10 +249,34 @@ IngestStats build() { /** * Container for pipeline stats. */ - public record PipelineStat(String pipelineId, Stats stats) {} + public record PipelineStat(String pipelineId, Stats stats) { + static List merge(List first, List second) { + var totalsPerPipeline = new HashMap(); + + first.forEach(ps -> totalsPerPipeline.merge(ps.pipelineId, ps.stats, Stats::merge)); + second.forEach(ps -> totalsPerPipeline.merge(ps.pipelineId, ps.stats, Stats::merge)); + + return totalsPerPipeline.entrySet() + .stream() + .map(v -> new PipelineStat(v.getKey(), v.getValue())) + .sorted(PIPELINE_STAT_COMPARATOR) + .toList(); + } + } /** * Container for processor stats. */ - public record ProcessorStat(String name, String type, Stats stats) {} + public record ProcessorStat(String name, String type, Stats stats) { + + // The list of ProcessorStats has *always* stats for each processor (even if processor was executed or not), so it's safe to zip + // both lists using a common index iterator. + private static List merge(List first, List second) { + var merged = new ArrayList(); + for (var i = 0; i < first.size(); i++) { + merged.add(new ProcessorStat(first.get(i).name, first.get(i).type, Stats.merge(first.get(i).stats, second.get(i).stats))); + } + return merged; + } + } } diff --git a/server/src/main/java/org/elasticsearch/lucene/queries/BlendedTermQuery.java b/server/src/main/java/org/elasticsearch/lucene/queries/BlendedTermQuery.java index 20cc5b411b63b..a49f02acf4c4d 100644 --- a/server/src/main/java/org/elasticsearch/lucene/queries/BlendedTermQuery.java +++ b/server/src/main/java/org/elasticsearch/lucene/queries/BlendedTermQuery.java @@ -16,6 +16,7 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryVisitor; import org.apache.lucene.search.TermQuery; @@ -67,11 +68,12 @@ public BlendedTermQuery(Term[] terms, float[] boosts) { } @Override - public Query rewrite(IndexReader reader) throws IOException { - Query rewritten = super.rewrite(reader); + public Query rewrite(IndexSearcher searcher) throws IOException { + Query rewritten = super.rewrite(searcher); if (rewritten != this) { return rewritten; } + IndexReader reader = searcher.getIndexReader(); IndexReaderContext context = reader.getContext(); TermStates[] ctx = new TermStates[terms.length]; int[] docFreqs = new int[ctx.length]; diff --git a/server/src/main/java/org/elasticsearch/lucene/queries/MinDocQuery.java b/server/src/main/java/org/elasticsearch/lucene/queries/MinDocQuery.java index b65762374ebca..0e2f05cc12e4f 100644 --- a/server/src/main/java/org/elasticsearch/lucene/queries/MinDocQuery.java +++ b/server/src/main/java/org/elasticsearch/lucene/queries/MinDocQuery.java @@ -59,7 +59,8 @@ public boolean equals(Object obj) { } @Override - public Query rewrite(IndexReader reader) throws IOException { + public Query rewrite(IndexSearcher searcher) throws IOException { + IndexReader reader = searcher.getIndexReader(); if (Objects.equals(reader.getContext().id(), readerId) == false) { return new MinDocQuery(minDoc, reader.getContext().id()); } diff --git a/server/src/main/java/org/elasticsearch/lucene/search/vectorhighlight/CustomFieldQuery.java b/server/src/main/java/org/elasticsearch/lucene/search/vectorhighlight/CustomFieldQuery.java index a15cf181fd584..7e1478f28dddc 100644 --- a/server/src/main/java/org/elasticsearch/lucene/search/vectorhighlight/CustomFieldQuery.java +++ b/server/src/main/java/org/elasticsearch/lucene/search/vectorhighlight/CustomFieldQuery.java @@ -14,6 +14,7 @@ import org.apache.lucene.sandbox.search.CombinedFieldQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; @@ -45,43 +46,43 @@ public CustomFieldQuery(Query query, IndexReader reader, boolean phraseHighlight } @Override - protected void flatten(Query sourceQuery, IndexReader reader, Collection flatQueries, float boost) throws IOException { + protected void flatten(Query sourceQuery, IndexSearcher searcher, Collection flatQueries, float boost) throws IOException { if (sourceQuery instanceof BoostQuery bq) { sourceQuery = bq.getQuery(); boost *= bq.getBoost(); - flatten(sourceQuery, reader, flatQueries, boost); + flatten(sourceQuery, searcher, flatQueries, boost); } else if (sourceQuery instanceof SpanTermQuery) { - super.flatten(new TermQuery(((SpanTermQuery) sourceQuery).getTerm()), reader, flatQueries, boost); + super.flatten(new TermQuery(((SpanTermQuery) sourceQuery).getTerm()), searcher, flatQueries, boost); } else if (sourceQuery instanceof ConstantScoreQuery) { - flatten(((ConstantScoreQuery) sourceQuery).getQuery(), reader, flatQueries, boost); + flatten(((ConstantScoreQuery) sourceQuery).getQuery(), searcher, flatQueries, boost); } else if (sourceQuery instanceof FunctionScoreQuery) { - flatten(((FunctionScoreQuery) sourceQuery).getSubQuery(), reader, flatQueries, boost); + flatten(((FunctionScoreQuery) sourceQuery).getSubQuery(), searcher, flatQueries, boost); } else if (sourceQuery instanceof MultiPhrasePrefixQuery) { - flatten(sourceQuery.rewrite(reader), reader, flatQueries, boost); + flatten(sourceQuery.rewrite(searcher), searcher, flatQueries, boost); } else if (sourceQuery instanceof MultiPhraseQuery q) { - convertMultiPhraseQuery(0, new int[q.getTermArrays().length], q, q.getTermArrays(), q.getPositions(), reader, flatQueries); + convertMultiPhraseQuery(0, new int[q.getTermArrays().length], q, q.getTermArrays(), q.getPositions(), searcher, flatQueries); } else if (sourceQuery instanceof BlendedTermQuery blendedTermQuery) { - flatten(blendedTermQuery.rewrite(reader), reader, flatQueries, boost); + flatten(blendedTermQuery.rewrite(searcher), searcher, flatQueries, boost); } else if (sourceQuery instanceof org.apache.lucene.queries.function.FunctionScoreQuery funcScoreQuery) { // flatten query with query boost - flatten(funcScoreQuery.getWrappedQuery(), reader, flatQueries, boost); + flatten(funcScoreQuery.getWrappedQuery(), searcher, flatQueries, boost); } else if (sourceQuery instanceof SynonymQuery synQuery) { // SynonymQuery should be handled by the parent class directly. // This statement should be removed when https://issues.apache.org/jira/browse/LUCENE-7484 is merged. for (Term term : synQuery.getTerms()) { - flatten(new TermQuery(term), reader, flatQueries, boost); + flatten(new TermQuery(term), searcher, flatQueries, boost); } } else if (sourceQuery instanceof CombinedFieldQuery combinedFieldQuery) { for (Term term : combinedFieldQuery.getTerms()) { - flatten(new TermQuery(term), reader, flatQueries, boost); + flatten(new TermQuery(term), searcher, flatQueries, boost); } } else if (sourceQuery instanceof ESToParentBlockJoinQuery) { Query childQuery = ((ESToParentBlockJoinQuery) sourceQuery).getChildQuery(); if (childQuery != null) { - flatten(childQuery, reader, flatQueries, boost); + flatten(childQuery, searcher, flatQueries, boost); } } else { - super.flatten(sourceQuery, reader, flatQueries, boost); + super.flatten(sourceQuery, searcher, flatQueries, boost); } } @@ -91,7 +92,7 @@ private void convertMultiPhraseQuery( MultiPhraseQuery orig, Term[][] terms, int[] pos, - IndexReader reader, + IndexSearcher searcher, Collection flatQueries ) throws IOException { if (currentPos == 0) { @@ -103,7 +104,7 @@ private void convertMultiPhraseQuery( if (numTerms > 16) { for (Term[] currentPosTerm : terms) { for (Term term : currentPosTerm) { - super.flatten(new TermQuery(term), reader, flatQueries, 1F); + super.flatten(new TermQuery(term), searcher, flatQueries, 1F); } } return; @@ -120,12 +121,12 @@ private void convertMultiPhraseQuery( queryBuilder.add(terms[i][termsIdx[i]], pos[i]); } Query query = queryBuilder.build(); - this.flatten(query, reader, flatQueries, 1F); + this.flatten(query, searcher, flatQueries, 1F); } else { Term[] t = terms[currentPos]; for (int i = 0; i < t.length; i++) { termsIdx[currentPos] = i; - convertMultiPhraseQuery(currentPos + 1, termsIdx, orig, terms, pos, reader, flatQueries); + convertMultiPhraseQuery(currentPos + 1, termsIdx, orig, terms, pos, searcher, flatQueries); } } } diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java b/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java index 357f5f0716237..e02a3a40b77ef 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java @@ -313,7 +313,9 @@ String innerDetect(ThreadMXBean threadBean, SunThreadInfo sunThreadInfo, long cu ); case CPU -> { double percentCpu = getTimeSharePercentage(topThread.getCpuTime()); - double percentOther = getTimeSharePercentage(topThread.getOtherTime()); + double percentOther = Transports.isTransportThread(threadName) && topThread.getCpuTime() == 0L + ? 100.0 + : getTimeSharePercentage(topThread.getOtherTime()); double percentTotal = (Transports.isTransportThread(threadName)) ? percentCpu : percentOther + percentCpu; String otherLabel = (Transports.isTransportThread(threadName)) ? "idle" : "other"; sb.append( diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java index d7a8c6cccd247..e2ca66f3835a9 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java @@ -19,7 +19,6 @@ import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; @@ -344,7 +343,7 @@ private Assignment createAssignment( // leaving the cluster final List candidateNodes = currentState.nodes() .stream() - .filter(dn -> NodesShutdownMetadata.isNodeShuttingDown(currentState, dn.getId()) == false) + .filter(dn -> currentState.metadata().nodeShutdowns().contains(dn.getId()) == false) .collect(Collectors.toCollection(ArrayList::new)); // Task assignment should not rely on node order Randomness.shuffle(candidateNodes); @@ -352,7 +351,7 @@ private Assignment createAssignment( final Assignment assignment = persistentTasksExecutor.getAssignment(taskParams, candidateNodes, currentState); assert assignment != null : "getAssignment() should always return an Assignment object, containing a node or a reason why not"; assert (assignment.getExecutorNode() == null - || NodesShutdownMetadata.isNodeShuttingDown(currentState, assignment.getExecutorNode()) == false) + || currentState.metadata().nodeShutdowns().contains(assignment.getExecutorNode()) == false) : "expected task [" + taskName + "] to be assigned to a node that is not marked as shutting down, but " diff --git a/server/src/main/java/org/elasticsearch/repositories/SnapshotIndexCommit.java b/server/src/main/java/org/elasticsearch/repositories/SnapshotIndexCommit.java new file mode 100644 index 0000000000000..a4f2dccca243b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/repositories/SnapshotIndexCommit.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories; + +import org.apache.lucene.index.IndexCommit; +import org.elasticsearch.common.util.concurrent.RunOnce; +import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.engine.Engine; + +/** + * A (closeable) {@link IndexCommit} plus ref-counting to keep track of active users, and with the facility to drop the "main" initial ref + * early if the shard snapshot is aborted. + */ +public class SnapshotIndexCommit extends AbstractRefCounted { + + private final Engine.IndexCommitRef commitRef; + private final Runnable releaseInitialRef; + @Nullable + private Exception closeException; + + public SnapshotIndexCommit(Engine.IndexCommitRef commitRef) { + this.commitRef = commitRef; + this.releaseInitialRef = new RunOnce(this::decRef); + } + + @Override + protected void closeInternal() { + assert closeException == null : closeException; + try { + commitRef.close(); + } catch (Exception e) { + closeException = e; + } + } + + /** + * Called after all other refs are released, to release the initial ref (if not already released) and re-throw any exception thrown + * when the inner {@link IndexCommit} was closed. + */ + public void onCompletion() throws Exception { + releaseInitialRef.run(); + assert hasReferences() == false; + // closeInternal happens-before here so no need for synchronization + if (closeException != null) { + throw closeException; + } + } + + /** + * Called to abort the snapshot while it's running: release the initial ref (if not already released). + */ + public void onAbort() { + releaseInitialRef.run(); + } + + public IndexCommit indexCommit() { + assert hasReferences(); + return commitRef.getIndexCommit(); + } +} diff --git a/server/src/main/java/org/elasticsearch/repositories/SnapshotShardContext.java b/server/src/main/java/org/elasticsearch/repositories/SnapshotShardContext.java index bc26af67a3a32..62b8eab26550f 100644 --- a/server/src/main/java/org/elasticsearch/repositories/SnapshotShardContext.java +++ b/server/src/main/java/org/elasticsearch/repositories/SnapshotShardContext.java @@ -13,8 +13,10 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DelegatingActionListener; import org.elasticsearch.core.Nullable; -import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; import org.elasticsearch.snapshots.SnapshotId; @@ -30,7 +32,7 @@ public final class SnapshotShardContext extends DelegatingActionListener listener ) { - super(ActionListener.runBefore(listener, commitRef::close)); + super(ActionListener.runBefore(listener, commitRef::onCompletion)); this.store = store; this.mapperService = mapperService; this.snapshotId = snapshotId; @@ -93,7 +95,7 @@ public IndexId indexId() { } public IndexCommit indexCommit() { - return commitRef.getIndexCommit(); + return commitRef.indexCommit(); } @Nullable @@ -117,4 +119,15 @@ public long snapshotStartTime() { public void onResponse(ShardSnapshotResult result) { delegate.onResponse(result); } + + public Releasable withCommitRef() { + snapshotStatus.ensureNotAborted(); // check this first to avoid acquiring a ref when aborted even if refs are available + if (commitRef.tryIncRef()) { + return Releasables.releaseOnce(commitRef::decRef); + } else { + snapshotStatus.ensureNotAborted(); + assert false : "commit ref closed early in state " + snapshotStatus; + throw new IndexShardSnapshotFailedException(store.shardId(), "Store got closed concurrently"); + } + } } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 22bde89852fec..02ecb84ef2388 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -2712,7 +2712,7 @@ private void doSnapshotShard(SnapshotShardContext context) { indexCommitPointFiles = new ArrayList<>(); final Collection fileNames; final Store.MetadataSnapshot metadataFromStore; - try (Releasable ignored = incrementStoreRef(store, snapshotStatus, shardId)) { + try (Releasable ignored = context.withCommitRef()) { // TODO apparently we don't use the MetadataSnapshot#.recoveryDiff(...) here but we should try { logger.trace("[{}] [{}] Loading store metadata using index commit [{}]", shardId, snapshotId, snapshotIndexCommit); @@ -2866,13 +2866,12 @@ private void doSnapshotShard(SnapshotShardContext context) { final StepListener> allFilesUploadedListener = new StepListener<>(); allFilesUploadedListener.whenComplete(v -> { - final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize(snapshotIndexCommit.getGeneration()); + final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize(); // now create and write the commit point logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId); final BlobStoreIndexShardSnapshot blobStoreIndexShardSnapshot = new BlobStoreIndexShardSnapshot( snapshotId.getName(), - lastSnapshotStatus.getIndexVersion(), indexCommitPointFiles, lastSnapshotStatus.getStartTime(), threadPool.absoluteTimeInMillis() - lastSnapshotStatus.getStartTime(), @@ -2926,15 +2925,6 @@ protected void snapshotFiles( } } - private static Releasable incrementStoreRef(Store store, IndexShardSnapshotStatus snapshotStatus, ShardId shardId) { - if (store.tryIncRef() == false) { - snapshotStatus.ensureNotAborted(); - assert false : "Store should not be closed concurrently unless snapshot is aborted"; - throw new IndexShardSnapshotFailedException(shardId, "Store got closed concurrently"); - } - return store::decRef; - } - private static boolean assertFileContentsMatchHash( IndexShardSnapshotStatus snapshotStatus, BlobStoreIndexShardSnapshot.FileInfo fileInfo, @@ -3439,7 +3429,7 @@ protected void snapshotFile(SnapshotShardContext context, FileInfo fileInfo) thr final BlobContainer shardContainer = shardContainer(indexId, shardId); final String file = fileInfo.physicalName(); try ( - Releasable ignored = BlobStoreRepository.incrementStoreRef(store, snapshotStatus, store.shardId()); + Releasable ignored = context.withCommitRef(); IndexInput indexInput = store.openVerifyingInput(file, IOContext.READONCE, fileInfo.metadata()) ) { for (int i = 0; i < fileInfo.numberOfParts(); i++) { diff --git a/server/src/main/java/org/elasticsearch/rest/RestRequest.java b/server/src/main/java/org/elasticsearch/rest/RestRequest.java index 8835b8ca1ad61..d6ef6b72c4c8b 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestRequest.java +++ b/server/src/main/java/org/elasticsearch/rest/RestRequest.java @@ -48,7 +48,7 @@ public class RestRequest implements ToXContent.Params { // tchar pattern as defined by RFC7230 section 3.2.6 - private static final Pattern TCHAR_PATTERN = Pattern.compile("[a-zA-z0-9!#$%&'*+\\-.\\^_`|~]+"); + private static final Pattern TCHAR_PATTERN = Pattern.compile("[a-zA-Z0-9!#$%&'*+\\-.\\^_`|~]+"); private static final AtomicLong requestIdGenerator = new AtomicLong(); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java index 99417fbc962b7..cbf8baa9a2ea9 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestCancellableNodeClient; import org.elasticsearch.rest.action.RestChunkedToXContentListener; import org.elasticsearch.tasks.TaskId; @@ -49,7 +50,9 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final ListTasksRequest listTasksRequest = generateListTasksRequest(request); final String groupBy = request.param("group_by", "nodes"); - return channel -> client.admin().cluster().listTasks(listTasksRequest, listTasksResponseListener(nodesInCluster, groupBy, channel)); + return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() + .cluster() + .listTasks(listTasksRequest, listTasksResponseListener(nodesInCluster, groupBy, channel)); } public static ListTasksRequest generateListTasksRequest(RestRequest request) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java index 78717eff37850..727bd361e5def 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java @@ -140,7 +140,7 @@ private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoR final Set candidates = new HashSet<>(); for (final NodeStats nodeStats : nodesStats.getNodes()) { for (final ThreadPoolStats.Stats threadPoolStats : nodeStats.getThreadPool()) { - candidates.add(threadPoolStats.getName()); + candidates.add(threadPoolStats.name()); } } @@ -169,7 +169,7 @@ private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoR ThreadPoolStats threadPoolStats = stats.getThreadPool(); for (ThreadPoolStats.Stats threadPoolStat : threadPoolStats) { - poolThreadStats.put(threadPoolStat.getName(), threadPoolStat); + poolThreadStats.put(threadPoolStat.name(), threadPoolStat); } if (info != null) { for (ThreadPool.Info threadPoolInfo : info.getInfo(ThreadPoolInfo.class)) { @@ -222,13 +222,13 @@ private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoR table.addCell(entry.getKey()); table.addCell(poolInfo == null ? null : poolInfo.getThreadPoolType().getType()); - table.addCell(poolStats == null ? null : poolStats.getActive()); - table.addCell(poolStats == null ? null : poolStats.getThreads()); - table.addCell(poolStats == null ? null : poolStats.getQueue()); + table.addCell(poolStats == null ? null : poolStats.active()); + table.addCell(poolStats == null ? null : poolStats.threads()); + table.addCell(poolStats == null ? null : poolStats.queue()); table.addCell(maxQueueSize == null ? -1 : maxQueueSize); - table.addCell(poolStats == null ? null : poolStats.getRejected()); - table.addCell(poolStats == null ? null : poolStats.getLargest()); - table.addCell(poolStats == null ? null : poolStats.getCompleted()); + table.addCell(poolStats == null ? null : poolStats.rejected()); + table.addCell(poolStats == null ? null : poolStats.largest()); + table.addCell(poolStats == null ? null : poolStats.completed()); table.addCell(core); table.addCell(max); table.addCell(size); diff --git a/server/src/main/java/org/elasticsearch/rest/action/info/AbstractInfoAction.java b/server/src/main/java/org/elasticsearch/rest/action/info/AbstractInfoAction.java deleted file mode 100644 index 6520cdc88c296..0000000000000 --- a/server/src/main/java/org/elasticsearch/rest/action/info/AbstractInfoAction.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.rest.action.info; - -import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; -import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; -import org.elasticsearch.client.internal.node.NodeClient; -import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.common.xcontent.ChunkedToXContent; -import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.ChunkedRestResponseBody; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.rest.action.RestResponseListener; - -import java.io.IOException; - -import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; - -public abstract class AbstractInfoAction extends BaseRestHandler { - - public abstract NodesStatsRequest buildNodeStatsRequest(); - - public abstract ChunkedToXContent xContentChunks(NodesStatsResponse nodesStatsResponse); - - @Override - protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - return channel -> client.admin().cluster().nodesStats(buildNodeStatsRequest(), new RestResponseListener<>(channel) { - @Override - public RestResponse buildResponse(NodesStatsResponse nodesStatsResponse) throws Exception { - return new RestResponse( - RestStatus.OK, - ChunkedRestResponseBody.fromXContent( - outerParams -> Iterators.concat( - ChunkedToXContentHelper.startObject(), - Iterators.single( - (builder, params) -> builder.field("cluster_name", nodesStatsResponse.getClusterName().value()) - ), - xContentChunks(nodesStatsResponse).toXContentChunked(outerParams), - ChunkedToXContentHelper.endObject() - ), - EMPTY_PARAMS, - channel - ) - ); - } - }); - } -} diff --git a/server/src/main/java/org/elasticsearch/rest/action/info/RestClusterInfoAction.java b/server/src/main/java/org/elasticsearch/rest/action/info/RestClusterInfoAction.java index e0baed20db550..9411de950673a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/info/RestClusterInfoAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/info/RestClusterInfoAction.java @@ -8,7 +8,6 @@ package org.elasticsearch.rest.action.info; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -18,11 +17,13 @@ import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.http.HttpStats; +import org.elasticsearch.ingest.IngestStats; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.ChunkedRestResponseBody; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestCancellableNodeClient; import org.elasticsearch.rest.action.RestResponseListener; import java.io.IOException; @@ -35,13 +36,21 @@ import java.util.function.Predicate; import java.util.stream.Collectors; +import static org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest.Metric.HTTP; +import static org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest.Metric.INGEST; import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; public class RestClusterInfoAction extends BaseRestHandler { static final Map> RESPONSE_MAPPER = Map.of( - NodesInfoRequest.Metric.HTTP.metricName(), - nodesStatsResponse -> nodesStatsResponse.getNodes().stream().map(NodeStats::getHttp).reduce(HttpStats.IDENTITY, HttpStats::merge) + HTTP.metricName(), + nodesStatsResponse -> nodesStatsResponse.getNodes().stream().map(NodeStats::getHttp).reduce(HttpStats.IDENTITY, HttpStats::merge), + // + INGEST.metricName(), + nodesStatsResponse -> nodesStatsResponse.getNodes() + .stream() + .map(NodeStats::getIngestStats) + .reduce(IngestStats.IDENTITY, IngestStats::merge) ); static final Set AVAILABLE_TARGETS = RESPONSE_MAPPER.keySet(); @@ -84,25 +93,27 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client targets.forEach(nodesStatsRequest::addMetric); } - return channel -> client.admin().cluster().nodesStats(nodesStatsRequest, new RestResponseListener<>(channel) { - @Override - public RestResponse buildResponse(NodesStatsResponse response) throws Exception { - var chunkedResponses = targets.stream().map(RESPONSE_MAPPER::get).map(mapper -> mapper.apply(response)).iterator(); - - return new RestResponse( - RestStatus.OK, - ChunkedRestResponseBody.fromXContent( - outerParams -> Iterators.concat( - ChunkedToXContentHelper.startObject(), - Iterators.single((builder, params) -> builder.field("cluster_name", response.getClusterName().value())), - Iterators.flatMap(chunkedResponses, chunk -> chunk.toXContentChunked(outerParams)), - ChunkedToXContentHelper.endObject() - ), - EMPTY_PARAMS, - channel - ) - ); - } - }); + return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() + .cluster() + .nodesStats(nodesStatsRequest, new RestResponseListener<>(channel) { + @Override + public RestResponse buildResponse(NodesStatsResponse response) throws Exception { + var chunkedResponses = targets.stream().map(RESPONSE_MAPPER::get).map(mapper -> mapper.apply(response)).iterator(); + + return new RestResponse( + RestStatus.OK, + ChunkedRestResponseBody.fromXContent( + outerParams -> Iterators.concat( + ChunkedToXContentHelper.startObject(), + Iterators.single((builder, params) -> builder.field("cluster_name", response.getClusterName().value())), + Iterators.flatMap(chunkedResponses, chunk -> chunk.toXContentChunked(outerParams)), + ChunkedToXContentHelper.endObject() + ), + EMPTY_PARAMS, + channel + ) + ); + } + }); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/synonyms/RestGetSynonymsAction.java b/server/src/main/java/org/elasticsearch/rest/action/synonyms/RestGetSynonymsAction.java new file mode 100644 index 0000000000000..68b5cce6caedc --- /dev/null +++ b/server/src/main/java/org/elasticsearch/rest/action/synonyms/RestGetSynonymsAction.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.rest.action.synonyms; + +import org.elasticsearch.action.synonyms.GetSynonymsAction; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; +import org.elasticsearch.rest.action.RestToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +@ServerlessScope(Scope.PUBLIC) +public class RestGetSynonymsAction extends BaseRestHandler { + + private static final Integer DEFAULT_FROM_PARAM = 0; + private static final Integer DEFAULT_SIZE_PARAM = 10; + + @Override + public String getName() { + return "synonyms_get_action"; + } + + @Override + public List routes() { + return List.of(new Route(GET, "/_synonyms/{synonymsSet}")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + GetSynonymsAction.Request request = new GetSynonymsAction.Request( + restRequest.param("synonymsSet"), + restRequest.paramAsInt("from", DEFAULT_FROM_PARAM), + restRequest.paramAsInt("size", DEFAULT_SIZE_PARAM) + ); + return channel -> client.execute(GetSynonymsAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index b5154787b70e4..e0ee2f178e4ef 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -11,8 +11,6 @@ import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; -import org.apache.lucene.search.Collector; -import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; @@ -129,7 +127,6 @@ final class DefaultSearchContext extends SearchContext { private Profilers profilers; private final Map searchExtBuilders = new HashMap<>(); - private CollectorManager aggCollectorManager; private final SearchExecutionContext searchExecutionContext; private final FetchPhase fetchPhase; @@ -763,16 +760,6 @@ public long getRelativeTimeInMillis() { return relativeTimeSupplier.getAsLong(); } - @Override - public CollectorManager getAggsCollectorManager() { - return aggCollectorManager; - } - - @Override - public void registerAggsCollectorManager(CollectorManager collectorManager) { - this.aggCollectorManager = collectorManager; - } - @Override public SearchExecutionContext getSearchExecutionContext() { return searchExecutionContext; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java index da5af0e6ea6cc..2d5fb6b1c5cb1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java @@ -56,9 +56,9 @@ public static void preProcess(SearchContext context) { } if (context.getProfilers() != null) { InternalProfileCollector profileCollector = new InternalProfileCollector(collector, CollectorResult.REASON_AGGREGATION); - context.registerAggsCollectorManager(new InternalProfileCollectorManager(profileCollector)); + context.aggregations().registerAggsCollectorManager(new InternalProfileCollectorManager(profileCollector)); } else { - context.registerAggsCollectorManager(new SingleThreadCollectorManager(collector)); + context.aggregations().registerAggsCollectorManager(new SingleThreadCollectorManager(collector)); } } @@ -110,6 +110,5 @@ public static void execute(SearchContext context) { // disable aggregations so that they don't run on next pages in case of scrolling context.aggregations(null); - context.registerAggsCollectorManager(null); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/SearchContextAggregations.java b/server/src/main/java/org/elasticsearch/search/aggregations/SearchContextAggregations.java index 4a1009fedd9cc..c6ea3d4cfe69d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/SearchContextAggregations.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/SearchContextAggregations.java @@ -7,6 +7,9 @@ */ package org.elasticsearch.search.aggregations; +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.CollectorManager; + /** * The aggregation context that is part of the search context. */ @@ -14,6 +17,7 @@ public class SearchContextAggregations { private final AggregatorFactories factories; private Aggregator[] aggregators; + private CollectorManager aggCollectorManager; /** * Creates a new aggregation context with the parsed aggregator factories @@ -38,4 +42,18 @@ public Aggregator[] aggregators() { public void aggregators(Aggregator[] aggregators) { this.aggregators = aggregators; } + + /** + * Registers the collector to be run for the aggregations phase + */ + public void registerAggsCollectorManager(CollectorManager aggCollectorManager) { + this.aggCollectorManager = aggCollectorManager; + } + + /** + * Returns the collector to be run for the aggregations phase + */ + public CollectorManager getAggsCollectorManager() { + return aggCollectorManager; + } } diff --git a/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java index fda7e2a0d2b43..89845c21439c3 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java @@ -8,8 +8,6 @@ package org.elasticsearch.search.internal; -import org.apache.lucene.search.Collector; -import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.Query; import org.apache.lucene.search.TotalHits; @@ -440,16 +438,6 @@ public Profilers getProfilers() { return in.getProfilers(); } - @Override - public CollectorManager getAggsCollectorManager() { - return in.getAggsCollectorManager(); - } - - @Override - public void registerAggsCollectorManager(CollectorManager collectorManager) { - in.registerAggsCollectorManager(collectorManager); - } - @Override public SearchExecutionContext getSearchExecutionContext() { return in.getSearchExecutionContext(); diff --git a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java index 37cca8e829cd5..1e4c9835ff829 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -7,8 +7,6 @@ */ package org.elasticsearch.search.internal; -import org.apache.lucene.search.Collector; -import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.Query; import org.apache.lucene.search.TotalHits; @@ -375,16 +373,6 @@ public final boolean hasOnlySuggest() { */ public abstract long getRelativeTimeInMillis(); - /** - * Registers the collector to be run for the aggregations phase - */ - public abstract void registerAggsCollectorManager(CollectorManager collectorManager); - - /** - * Returns the collector to be run for the aggregations phase - */ - public abstract CollectorManager getAggsCollectorManager(); - public abstract SearchExecutionContext getSearchExecutionContext(); @Override diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java index 50104acda0dd4..a11277a1787b1 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -237,9 +237,9 @@ static void addCollectorsAndSearch(SearchContext searchContext) throws QueryPhas collector ); } - if (searchContext.getAggsCollectorManager() != null) { + if (searchContext.aggregations() != null) { final Collector collector = collectorManager.newCollector(); - final Collector aggsCollector = searchContext.getAggsCollectorManager().newCollector(); + final Collector aggsCollector = searchContext.aggregations().getAggsCollectorManager().newCollector(); collectorManager = wrapWithProfilerCollectorManagerIfNeeded( searchContext.getProfilers(), new SingleThreadCollectorManager(MultiCollector.wrap(collector, aggsCollector)), diff --git a/server/src/main/java/org/elasticsearch/search/rank/RankSearchContext.java b/server/src/main/java/org/elasticsearch/search/rank/RankSearchContext.java index 037c66f44dabf..f38d89c000380 100644 --- a/server/src/main/java/org/elasticsearch/search/rank/RankSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/rank/RankSearchContext.java @@ -8,8 +8,6 @@ package org.elasticsearch.search.rank; -import org.apache.lucene.search.Collector; -import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.Query; import org.apache.lucene.search.TotalHits; @@ -204,14 +202,6 @@ public long getRelativeTimeInMillis() { return parent.getRelativeTimeInMillis(); } - /** - * Aggregations are run as a separate query, so do not add any aggregations collectors. - */ - @Override - public CollectorManager getAggsCollectorManager() { - return null; - } - /* ---- ALL METHODS ARE UNSUPPORTED BEYOND HERE ---- */ @Override @@ -544,11 +534,6 @@ public void addFetchResult() { throw new UnsupportedOperationException(); } - @Override - public void registerAggsCollectorManager(CollectorManager collectorManager) { - throw new UnsupportedOperationException(); - } - @Override public SearchExecutionContext getSearchExecutionContext() { throw new UnsupportedOperationException(); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java index c2a233645a596..c967d7683f0b2 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java @@ -74,7 +74,7 @@ protected Suggest.Suggestion plugins) { * Return all nodes shutting down from the given cluster state */ public static Set shutdownNodes(final ClusterState clusterState) { - return NodesShutdownMetadata.getShutdowns(clusterState) - .map(NodesShutdownMetadata::getAllNodeMetadataMap) - .map(Map::keySet) - .orElse(Collections.emptySet()); + return clusterState.metadata().nodeShutdowns().getAllNodeIds(); } /** * Return all nodes shutting down with the given shutdown types from the given cluster state */ public static Set shutdownTypeNodes(final ClusterState clusterState, final SingleNodeShutdownMetadata.Type... shutdownTypes) { - Set types = Arrays.stream(shutdownTypes).collect(Collectors.toSet()); - return NodesShutdownMetadata.getShutdowns(clusterState) - .map(NodesShutdownMetadata::getAllNodeMetadataMap) - .map( - m -> m.entrySet() - .stream() - .filter(e -> types.contains(e.getValue().getType())) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)) - ) - .map(Map::keySet) - .orElse(Collections.emptySet()); + Set types = Arrays.stream(shutdownTypes).collect(toSet()); + return clusterState.metadata() + .nodeShutdowns() + .getAll() + .entrySet() + .stream() + .filter(e -> types.contains(e.getValue().getType())) + .map(Map.Entry::getKey) + .collect(toSet()); } /** diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 9e58c4436d749..dc52a6cd8be37 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -45,6 +45,7 @@ import org.elasticsearch.repositories.ShardGeneration; import org.elasticsearch.repositories.ShardGenerations; import org.elasticsearch.repositories.ShardSnapshotResult; +import org.elasticsearch.repositories.SnapshotIndexCommit; import org.elasticsearch.repositories.SnapshotShardContext; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -374,7 +375,7 @@ private void snapshot( indexShard.mapperService(), snapshot.getSnapshotId(), indexId, - snapshotRef, + new SnapshotIndexCommit(snapshotRef), getShardStateId(indexShard, snapshotRef.getIndexCommit()), snapshotStatus, version, diff --git a/server/src/main/java/org/elasticsearch/synonyms/SynonymRule.java b/server/src/main/java/org/elasticsearch/synonyms/SynonymRule.java index d0bd15e4d047d..c09861d7db516 100644 --- a/server/src/main/java/org/elasticsearch/synonyms/SynonymRule.java +++ b/server/src/main/java/org/elasticsearch/synonyms/SynonymRule.java @@ -26,7 +26,7 @@ public class SynonymRule implements Writeable, ToXContentObject { public static final ParseField SYNONYMS_FIELD = new ParseField("synonyms"); public static final ParseField ID_FIELD = new ParseField("id"); - public static final String SYNONYMS_SET_FIELD = "synonyms_set"; + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("synonym_rule", args -> { @SuppressWarnings("unchecked") final String id = (String) args[0]; diff --git a/server/src/main/java/org/elasticsearch/synonyms/SynonymsManagementAPIService.java b/server/src/main/java/org/elasticsearch/synonyms/SynonymsManagementAPIService.java index e3a4efca9ba14..423ffc882eebe 100644 --- a/server/src/main/java/org/elasticsearch/synonyms/SynonymsManagementAPIService.java +++ b/server/src/main/java/org/elasticsearch/synonyms/SynonymsManagementAPIService.java @@ -9,6 +9,7 @@ package org.elasticsearch.synonyms; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; @@ -17,20 +18,22 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.action.synonyms.PutSynonymsAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.reindex.DeleteByQueryAction; import org.elasticsearch.index.reindex.DeleteByQueryRequest; import org.elasticsearch.indices.SystemIndexDescriptor; +import org.elasticsearch.search.SearchHit; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import java.io.IOException; import java.io.UncheckedIOException; +import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; @@ -45,6 +48,7 @@ public class SynonymsManagementAPIService { public static final String SYNONYMS_FEATURE_NAME = "synonyms"; public static final String SYNONYMS_SET_FIELD = "synonyms_set"; public static final String SYNONYMS_FIELD = "synonyms"; + public static final String SYNONYM_RULE_ID_SEPARATOR = "|"; private final Client client; @@ -101,7 +105,49 @@ private static XContentBuilder mappings() { } } - public void putSynonymsset(String resourceName, SynonymsSet synonymsset, ActionListener listener) { + public void getSynonymsSet(String resourceName, int from, int size, ActionListener listener) { + client.prepareSearch(SYNONYMS_ALIAS_NAME) + .setQuery(QueryBuilders.termQuery(SYNONYMS_SET_FIELD, resourceName)) + .setFrom(from) + .setSize(size) + .execute(listener.delegateFailure((searchResponseListener, searchResponse) -> { + final long totalSynonymRules = searchResponse.getHits().getTotalHits().value; + if (totalSynonymRules == 0) { + listener.onFailure(new ResourceNotFoundException("Synonym set [" + resourceName + "] not found")); + return; + } + final SynonymRule[] synonymRules = Arrays.stream(searchResponse.getHits().getHits()) + .map(SynonymsManagementAPIService::hitToSynonymRule) + .toArray(SynonymRule[]::new); + listener.onResponse(new SynonymsSetResult(totalSynonymRules, synonymRules)); + })); + } + + private static SynonymRule hitToSynonymRule(SearchHit hit) { + return new SynonymRule( + externalSynonymRuleId(hit.getId()), + (String) hit.getSourceAsMap().get(SynonymRule.SYNONYMS_FIELD.getPreferredName()) + ); + } + + private static String externalSynonymRuleId(String internalId) { + int index = internalId.indexOf(SYNONYM_RULE_ID_SEPARATOR); + if (index == -1) { + throw new IllegalStateException("Synonym Rule ID [" + internalId + "] is incorrect"); + } + return internalId.substring(index + 1); + } + + private static String internalSynonymRuleId(String resourceName, SynonymRule synonymRule) { + String synonymRuleId = synonymRule.id(); + if (synonymRuleId == null) { + synonymRuleId = UUIDs.base64UUID(); + } + final String id = resourceName + SYNONYM_RULE_ID_SEPARATOR + synonymRuleId; + return id; + } + + public void putSynonymsSet(String resourceName, SynonymRule[] synonymsSet, ActionListener listener) { // TODO Add synonym rules validation @@ -128,7 +174,7 @@ public void putSynonymsset(String resourceName, SynonymsSet synonymsset, ActionL // Insert as bulk requests BulkRequestBuilder bulkRequestBuilder = client.prepareBulk(); try { - for (SynonymRule synonymRule : synonymsset.synonyms()) { + for (SynonymRule synonymRule : synonymsSet) { try (XContentBuilder builder = XContentFactory.jsonBuilder()) { builder.startObject(); @@ -140,10 +186,7 @@ public void putSynonymsset(String resourceName, SynonymsSet synonymsset, ActionL final IndexRequest indexRequest = new IndexRequest(SYNONYMS_ALIAS_NAME).opType(DocWriteRequest.OpType.INDEX) .source(builder); - final String synonymRuleId = synonymRule.id(); - if (synonymRuleId != null) { - indexRequest.id(synonymRuleId); - } + indexRequest.id(internalSynonymRuleId(resourceName, synonymRule)); bulkRequestBuilder.add(indexRequest); } @@ -155,10 +198,8 @@ public void putSynonymsset(String resourceName, SynonymsSet synonymsset, ActionL bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .execute(deleteByQueryResponseListener.delegateFailure((bulkResponseListener, bulkResponse) -> { if (bulkResponse.hasFailures() == false) { - PutSynonymsAction.Response.Result result = created - ? PutSynonymsAction.Response.Result.CREATED - : PutSynonymsAction.Response.Result.UPDATED; - bulkResponseListener.onResponse(new PutSynonymsAction.Response(result)); + UpdateSynonymsResult result = created ? UpdateSynonymsResult.CREATED : UpdateSynonymsResult.UPDATED; + bulkResponseListener.onResponse(result); } else { bulkResponseListener.onFailure( new ElasticsearchException("Couldn't update synonyms: " + bulkResponse.buildFailureMessage()) @@ -177,4 +218,11 @@ static Settings settings() { .build(); } + public enum UpdateSynonymsResult { + CREATED, + UPDATED + } + + public record SynonymsSetResult(long totalSynonymRules, SynonymRule[] synonymRules) {} + } diff --git a/server/src/main/java/org/elasticsearch/synonyms/SynonymsSet.java b/server/src/main/java/org/elasticsearch/synonyms/SynonymsSet.java deleted file mode 100644 index fac48e60130cf..0000000000000 --- a/server/src/main/java/org/elasticsearch/synonyms/SynonymsSet.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.synonyms; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.util.Arrays; -import java.util.List; -import java.util.Objects; - -// TODO review the need for this class, we might use just SynonymRule as this is a just a holder of SynonymRule -public class SynonymsSet implements Writeable, ToXContentObject { - - public static final ParseField SYNONYMS_SET_FIELD = new ParseField("synonyms_set"); - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("synonyms_set", args -> { - @SuppressWarnings("unchecked") - final List synonyms = (List) args[0]; - return new SynonymsSet(synonyms.toArray(new SynonymRule[synonyms.size()])); - }); - - static { - PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), (p, c) -> SynonymRule.fromXContent(p), SYNONYMS_SET_FIELD); - } - - private final SynonymRule[] synonyms; - - public SynonymsSet(SynonymRule[] synonyms) { - Objects.requireNonNull(synonyms, "synonyms cannot be null"); - this.synonyms = synonyms; - } - - public SynonymsSet(StreamInput in) throws IOException { - this.synonyms = in.readArray(SynonymRule::new, SynonymRule[]::new); - } - - public SynonymRule[] synonyms() { - return synonyms; - } - - public static SynonymsSet fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - { - builder.array(SYNONYMS_SET_FIELD.getPreferredName(), (Object[]) synonyms); - } - builder.endObject(); - - return builder; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeArray(synonyms); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - SynonymsSet that = (SynonymsSet) o; - return Arrays.equals(synonyms, that.synonyms); - } - - @Override - public int hashCode() { - return Arrays.hashCode(synonyms); - } -} diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java index 5c973f246307f..8301dacb1898a 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java @@ -8,48 +8,32 @@ package org.elasticsearch.threadpool; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.ToXContentFragment; -import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.Collections; import java.util.Iterator; import java.util.List; -public class ThreadPoolStats implements Writeable, ToXContentFragment, Iterable { - - public static class Stats implements Writeable, ToXContentFragment, Comparable { - - private final String name; - private final int threads; - private final int queue; - private final int active; - private final long rejected; - private final int largest; - private final long completed; - - public Stats(String name, int threads, int queue, int active, long rejected, int largest, long completed) { - this.name = name; - this.threads = threads; - this.queue = queue; - this.active = active; - this.rejected = rejected; - this.largest = largest; - this.completed = completed; - } +import static java.util.Collections.emptyIterator; +import static org.elasticsearch.common.collect.Iterators.single; + +public record ThreadPoolStats(List stats) implements Writeable, ChunkedToXContent, Iterable { + + public record Stats(String name, int threads, int queue, int active, long rejected, int largest, long completed) + implements + Writeable, + ChunkedToXContent, + Comparable { public Stats(StreamInput in) throws IOException { - name = in.readString(); - threads = in.readInt(); - queue = in.readInt(); - active = in.readInt(); - rejected = in.readLong(); - largest = in.readInt(); - completed = in.readLong(); + this(in.readString(), in.readInt(), in.readInt(), in.readInt(), in.readLong(), in.readInt(), in.readLong()); } @Override @@ -63,86 +47,45 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(completed); } - public String getName() { - return this.name; - } - - public int getThreads() { - return this.threads; - } - - public int getQueue() { - return this.queue; - } - - public int getActive() { - return this.active; - } - - public long getRejected() { - return rejected; - } - - public int getLargest() { - return largest; - } - - public long getCompleted() { - return this.completed; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(name); - if (threads != -1) { - builder.field(Fields.THREADS, threads); - } - if (queue != -1) { - builder.field(Fields.QUEUE, queue); - } - if (active != -1) { - builder.field(Fields.ACTIVE, active); - } - if (rejected != -1) { - builder.field(Fields.REJECTED, rejected); - } - if (largest != -1) { - builder.field(Fields.LARGEST, largest); - } - if (completed != -1) { - builder.field(Fields.COMPLETED, completed); - } - builder.endObject(); - return builder; - } - @Override public int compareTo(Stats other) { - if ((getName() == null) && (other.getName() == null)) { + if ((name() == null) && (other.name() == null)) { return 0; - } else if ((getName() != null) && (other.getName() == null)) { + } else if ((name() != null) && (other.name() == null)) { return 1; - } else if (getName() == null) { + } else if (name() == null) { return -1; } else { - int compare = getName().compareTo(other.getName()); + int compare = name().compareTo(other.name()); if (compare == 0) { - compare = Integer.compare(getThreads(), other.getThreads()); + compare = Integer.compare(threads(), other.threads()); } return compare; } } - } - private List stats; + @Override + public Iterator toXContentChunked(ToXContent.Params outerParams) { + return Iterators.concat( + ChunkedToXContentHelper.startObject(name), + threads != -1 ? single((builder, params) -> builder.field(Fields.THREADS, threads)) : emptyIterator(), + queue != -1 ? single((builder, params) -> builder.field(Fields.QUEUE, queue)) : emptyIterator(), + active != -1 ? single((builder, params) -> builder.field(Fields.ACTIVE, active)) : emptyIterator(), + rejected != -1 ? single((builder, params) -> builder.field(Fields.REJECTED, rejected)) : emptyIterator(), + largest != -1 ? single((builder, params) -> builder.field(Fields.LARGEST, largest)) : emptyIterator(), + completed != -1 ? single((builder, params) -> builder.field(Fields.COMPLETED, completed)) : emptyIterator(), + ChunkedToXContentHelper.endObject() + ); + } + } - public ThreadPoolStats(List stats) { + public ThreadPoolStats { Collections.sort(stats); - this.stats = stats; + stats = Collections.unmodifiableList(stats); } public ThreadPoolStats(StreamInput in) throws IOException { - stats = in.readList(Stats::new); + this(in.readList(Stats::new)); } @Override @@ -166,12 +109,11 @@ static final class Fields { } @Override - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startObject(Fields.THREAD_POOL); - for (Stats stat : stats) { - stat.toXContent(builder, params); - } - builder.endObject(); - return builder; + public Iterator toXContentChunked(ToXContent.Params params) { + return Iterators.concat( + ChunkedToXContentHelper.startObject(Fields.THREAD_POOL), + Iterators.flatMap(stats.iterator(), s -> s.toXContentChunked(params)), + ChunkedToXContentHelper.endObject() + ); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java index 4ef1cff90c040..d123cb0fb5e35 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -69,10 +69,10 @@ import org.elasticsearch.script.ScriptStats; import org.elasticsearch.script.TimeSeries; import org.elasticsearch.search.suggest.completion.CompletionStats; -import org.elasticsearch.test.AbstractChunkedSerializingTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.threadpool.ThreadPoolStats; +import org.elasticsearch.threadpool.ThreadPoolStatsTests; import org.elasticsearch.transport.TransportActionStats; import org.elasticsearch.transport.TransportStats; import org.elasticsearch.xcontent.ToXContent; @@ -83,13 +83,13 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.stream.IntStream; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.elasticsearch.test.AbstractChunkedSerializingTestCase.assertChunkCount; public class NodeStatsTests extends ESTestCase { public void testSerialization() throws IOException { @@ -231,20 +231,10 @@ public void testSerialization() throws IOException { if (nodeStats.getThreadPool() == null) { assertNull(deserializedNodeStats.getThreadPool()); } else { - Iterator threadPoolIterator = nodeStats.getThreadPool().iterator(); - Iterator deserializedThreadPoolIterator = deserializedNodeStats.getThreadPool().iterator(); - while (threadPoolIterator.hasNext()) { - ThreadPoolStats.Stats stats = threadPoolIterator.next(); - ThreadPoolStats.Stats deserializedStats = deserializedThreadPoolIterator.next(); - assertEquals(stats.getName(), deserializedStats.getName()); - assertEquals(stats.getThreads(), deserializedStats.getThreads()); - assertEquals(stats.getActive(), deserializedStats.getActive()); - assertEquals(stats.getLargest(), deserializedStats.getLargest()); - assertEquals(stats.getCompleted(), deserializedStats.getCompleted()); - assertEquals(stats.getQueue(), deserializedStats.getQueue()); - assertEquals(stats.getRejected(), deserializedStats.getRejected()); - } + assertNotSame(nodeStats.getThreadPool(), deserializedNodeStats.getThreadPool()); + assertEquals(nodeStats.getThreadPool(), deserializedNodeStats.getThreadPool()); } + FsInfo fs = nodeStats.getFs(); FsInfo deserializedFs = deserializedNodeStats.getFs(); if (fs == null) { @@ -467,34 +457,8 @@ public void testSerialization() throws IOException { if (ingestStats == null) { assertNull(deserializedIngestStats); } else { - IngestStats.Stats totalStats = ingestStats.totalStats(); - assertEquals(totalStats.ingestCount(), deserializedIngestStats.totalStats().ingestCount()); - assertEquals(totalStats.ingestCurrent(), deserializedIngestStats.totalStats().ingestCurrent()); - assertEquals(totalStats.ingestFailedCount(), deserializedIngestStats.totalStats().ingestFailedCount()); - assertEquals(totalStats.ingestTimeInMillis(), deserializedIngestStats.totalStats().ingestTimeInMillis()); - assertEquals(ingestStats.pipelineStats().size(), deserializedIngestStats.pipelineStats().size()); - for (IngestStats.PipelineStat pipelineStat : ingestStats.pipelineStats()) { - String pipelineId = pipelineStat.pipelineId(); - IngestStats.Stats deserializedPipelineStats = getPipelineStats(deserializedIngestStats.pipelineStats(), pipelineId); - assertEquals(pipelineStat.stats().ingestFailedCount(), deserializedPipelineStats.ingestFailedCount()); - assertEquals(pipelineStat.stats().ingestTimeInMillis(), deserializedPipelineStats.ingestTimeInMillis()); - assertEquals(pipelineStat.stats().ingestCurrent(), deserializedPipelineStats.ingestCurrent()); - assertEquals(pipelineStat.stats().ingestCount(), deserializedPipelineStats.ingestCount()); - List processorStats = ingestStats.processorStats().get(pipelineId); - // intentionally validating identical order - Iterator it = deserializedIngestStats.processorStats().get(pipelineId).iterator(); - for (IngestStats.ProcessorStat processorStat : processorStats) { - IngestStats.ProcessorStat deserializedProcessorStat = it.next(); - assertEquals(processorStat.stats().ingestFailedCount(), deserializedProcessorStat.stats().ingestFailedCount()); - assertEquals( - processorStat.stats().ingestTimeInMillis(), - deserializedProcessorStat.stats().ingestTimeInMillis() - ); - assertEquals(processorStat.stats().ingestCurrent(), deserializedProcessorStat.stats().ingestCurrent()); - assertEquals(processorStat.stats().ingestCount(), deserializedProcessorStat.stats().ingestCount()); - } - assertFalse(it.hasNext()); - } + assertNotSame(ingestStats, deserializedIngestStats); + assertEquals(ingestStats, deserializedIngestStats); } AdaptiveSelectionStats adaptiveStats = nodeStats.getAdaptiveSelectionStats(); AdaptiveSelectionStats deserializedAdaptiveStats = deserializedNodeStats.getAdaptiveSelectionStats(); @@ -545,17 +509,17 @@ public void testSerialization() throws IOException { } public void testChunking() { - AbstractChunkedSerializingTestCase.assertChunkCount( + assertChunkCount( createNodeStats(), randomFrom(ToXContent.EMPTY_PARAMS, new ToXContent.MapParams(Map.of("level", "node"))), nodeStats -> expectedChunks(nodeStats, NodeStatsLevel.NODE) ); - AbstractChunkedSerializingTestCase.assertChunkCount( + assertChunkCount( createNodeStats(), new ToXContent.MapParams(Map.of("level", "indices")), nodeStats -> expectedChunks(nodeStats, NodeStatsLevel.INDICES) ); - AbstractChunkedSerializingTestCase.assertChunkCount( + assertChunkCount( createNodeStats(), new ToXContent.MapParams(Map.of("level", "shards")), nodeStats -> expectedChunks(nodeStats, NodeStatsLevel.SHARDS) @@ -563,9 +527,25 @@ public void testChunking() { } private static int expectedChunks(NodeStats nodeStats, NodeStatsLevel level) { - return 4 + expectedChunks(nodeStats.getHttp()) + expectedChunks(nodeStats.getIndices(), level) + expectedChunks( - nodeStats.getTransport() - ) + expectedChunks(nodeStats.getIngestStats()); + return 5 // one per each chunkeable object + + expectedChunks(nodeStats.getHttp()) // + + expectedChunks(nodeStats.getIndices(), level) // + + expectedChunks(nodeStats.getTransport()) // + + expectedChunks(nodeStats.getIngestStats()) // + + expectedChunks(nodeStats.getThreadPool()); + } + + private static int expectedChunks(ThreadPoolStats threadPool) { + return threadPool == null ? 0 : 2 + threadPool.stats().stream().mapToInt(s -> { + var chunks = 0; + chunks += s.threads() == -1 ? 0 : 1; + chunks += s.queue() == -1 ? 0 : 1; + chunks += s.active() == -1 ? 0 : 1; + chunks += s.rejected() == -1 ? 0 : 1; + chunks += s.largest() == -1 ? 0 : 1; + chunks += s.completed() == -1 ? 0 : 1; + return 2 + chunks; // start + endObject + chunks + }).sum(); } private static int expectedChunks(@Nullable IngestStats ingestStats) { @@ -815,20 +795,10 @@ public static NodeStats createNodeStats() { } ThreadPoolStats threadPoolStats = null; if (frequently()) { - int numThreadPoolStats = randomIntBetween(0, 10); - List threadPoolStatsList = new ArrayList<>(); + var numThreadPoolStats = randomIntBetween(0, 10); + var threadPoolStatsList = new ArrayList(); for (int i = 0; i < numThreadPoolStats; i++) { - threadPoolStatsList.add( - new ThreadPoolStats.Stats( - randomAlphaOfLengthBetween(3, 10), - randomIntBetween(1, 1000), - randomIntBetween(1, 1000), - randomIntBetween(1, 1000), - randomNonNegativeLong(), - randomIntBetween(1, 1000), - randomIntBetween(1, 1000) - ) - ); + threadPoolStatsList.add(ThreadPoolStatsTests.randomStats(randomAlphaOfLengthBetween(3, 10))); } threadPoolStats = new ThreadPoolStats(threadPoolStatsList); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index cbd4a10bda3d2..f0f2e8c174ac6 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.action.admin.cluster.node.tasks; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; @@ -40,6 +41,7 @@ import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; +import org.elasticsearch.test.ReachabilityChecker; import org.elasticsearch.test.tasks.MockTaskManager; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportRequest; @@ -55,9 +57,12 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.stream.Collectors; import static org.elasticsearch.action.support.PlainActionFuture.newFuture; @@ -68,6 +73,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; public class TransportTasksActionTests extends TaskManagerTestCase { @@ -674,6 +680,152 @@ protected void taskOperation( assertEquals(0, responses.failureCount()); } + public void testTaskResponsesDiscardedOnCancellation() throws Exception { + setupTestNodes(Settings.EMPTY); + connectNodes(testNodes); + CountDownLatch blockedActionLatch = new CountDownLatch(1); + ActionFuture future = startBlockingTestNodesAction(blockedActionLatch); + + final var taskResponseListeners = new LinkedBlockingQueue>(); + final var taskResponseListenersCountDown = new CountDownLatch(2); // test action plus the list[n] action + + final TestTasksAction tasksAction = new TestTasksAction( + "internal:testTasksAction", + testNodes[0].clusterService, + testNodes[0].transportService + ) { + @Override + protected void taskOperation( + CancellableTask actionTask, + TestTasksRequest request, + Task task, + ActionListener listener + ) { + taskResponseListeners.add(listener); + taskResponseListenersCountDown.countDown(); + } + }; + + TestTasksRequest testTasksRequest = new TestTasksRequest(); + testTasksRequest.setNodes(testNodes[0].getNodeId()); // only local node + PlainActionFuture taskFuture = newFuture(); + CancellableTask task = (CancellableTask) testNodes[0].transportService.getTaskManager() + .registerAndExecute( + "direct", + tasksAction, + testTasksRequest, + testNodes[0].transportService.getLocalNodeConnection(), + taskFuture + ); + safeAwait(taskResponseListenersCountDown); + + final var reachabilityChecker = new ReachabilityChecker(); + + final var listener0 = Objects.requireNonNull(taskResponseListeners.poll()); + if (randomBoolean()) { + listener0.onResponse(reachabilityChecker.register(new TestTaskResponse("status"))); + } else { + listener0.onFailure(reachabilityChecker.register(new ElasticsearchException("simulated"))); + } + reachabilityChecker.checkReachable(); + + PlainActionFuture.get( + fut -> testNodes[0].transportService.getTaskManager().cancelTaskAndDescendants(task, "test", false, fut), + 10, + TimeUnit.SECONDS + ); + + reachabilityChecker.ensureUnreachable(); + + while (true) { + final var listener = taskResponseListeners.poll(); + if (listener == null) { + break; + } + if (randomBoolean()) { + listener.onResponse(reachabilityChecker.register(new TestTaskResponse("status"))); + } else { + listener.onFailure(reachabilityChecker.register(new ElasticsearchException("simulated"))); + } + reachabilityChecker.ensureUnreachable(); + } + + expectThrows(TaskCancelledException.class, taskFuture::actionGet); + + blockedActionLatch.countDown(); + NodesResponse responses = future.get(10, TimeUnit.SECONDS); + assertEquals(0, responses.failureCount()); + } + + public void testNodeResponsesDiscardedOnCancellation() { + setupTestNodes(Settings.EMPTY); + connectNodes(testNodes); + + final var taskResponseListeners = new AtomicReferenceArray>(testNodes.length); + final var taskResponseListenersCountDown = new CountDownLatch(testNodes.length); // one list[n] action per node + final var tasksActions = new TestTasksAction[testNodes.length]; + for (int i = 0; i < testNodes.length; i++) { + final var nodeIndex = i; + tasksActions[i] = new TestTasksAction("internal:testTasksAction", testNodes[i].clusterService, testNodes[i].transportService) { + @Override + protected void taskOperation( + CancellableTask actionTask, + TestTasksRequest request, + Task task, + ActionListener listener + ) { + assertThat(taskResponseListeners.getAndSet(nodeIndex, ActionListener.notifyOnce(listener)), nullValue()); + taskResponseListenersCountDown.countDown(); + } + }; + } + + TestTasksRequest testTasksRequest = new TestTasksRequest(); + testTasksRequest.setActions("internal:testTasksAction[n]"); + PlainActionFuture taskFuture = newFuture(); + CancellableTask task = (CancellableTask) testNodes[0].transportService.getTaskManager() + .registerAndExecute( + "direct", + tasksActions[0], + testTasksRequest, + testNodes[0].transportService.getLocalNodeConnection(), + taskFuture + ); + safeAwait(taskResponseListenersCountDown); + + final var reachabilityChecker = new ReachabilityChecker(); + + if (randomBoolean()) { + // local node does not de/serialize node-level response so retains references to the task-level response + if (randomBoolean()) { + taskResponseListeners.get(0).onResponse(reachabilityChecker.register(new TestTaskResponse("status"))); + } else { + taskResponseListeners.get(0).onFailure(reachabilityChecker.register(new ElasticsearchException("simulated"))); + } + reachabilityChecker.checkReachable(); + } + + PlainActionFuture.get( + fut -> testNodes[0].transportService.getTaskManager().cancelTaskAndDescendants(task, "test", false, fut), + 10, + TimeUnit.SECONDS + ); + + reachabilityChecker.ensureUnreachable(); + assertFalse(taskFuture.isDone()); + + for (int i = 0; i < testNodes.length; i++) { + if (randomBoolean()) { + taskResponseListeners.get(i).onResponse(reachabilityChecker.register(new TestTaskResponse("status"))); + } else { + taskResponseListeners.get(i).onFailure(reachabilityChecker.register(new ElasticsearchException("simulated"))); + } + reachabilityChecker.ensureUnreachable(); + } + + expectThrows(TaskCancelledException.class, taskFuture::actionGet); + } + public void testTaskLevelActionFailures() throws Exception { setupTestNodes(Settings.EMPTY); connectNodes(testNodes); diff --git a/server/src/test/java/org/elasticsearch/action/support/CancellableFanOutTests.java b/server/src/test/java/org/elasticsearch/action/support/CancellableFanOutTests.java new file mode 100644 index 0000000000000..db48b09e95a08 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/support/CancellableFanOutTests.java @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskCancelHelper; +import org.elasticsearch.tasks.TaskCancelledException; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.ReachabilityChecker; +import org.hamcrest.Matchers; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +public class CancellableFanOutTests extends ESTestCase { + + public void testFanOutWithoutCancellation() { + final var task = randomFrom( + new Task(1, "test", "test", "", TaskId.EMPTY_TASK_ID, Map.of()), + new CancellableTask(1, "test", "test", "", TaskId.EMPTY_TASK_ID, Map.of()), + null + ); + final var future = new PlainActionFuture(); + + final var itemListeners = new HashMap>(); + final var finalFailure = randomBoolean(); + + new CancellableFanOut() { + int counter; + + @Override + protected void sendItemRequest(String item, ActionListener listener) { + itemListeners.put(item, listener); + } + + @Override + protected void onItemResponse(String item, String itemResponse) { + assertThat(item, Matchers.oneOf("a", "c")); + assertEquals(item + "-response", itemResponse); + counter += 1; + } + + @Override + protected void onItemFailure(String item, Exception e) { + assertEquals("b", item); + counter += 1; + } + + @Override + protected String onCompletion() { + assertEquals(3, counter); + if (finalFailure) { + throw new ElasticsearchException("failed"); + } else { + return "completed"; + } + } + }.run(task, List.of("a", "b", "c").iterator(), future); + + itemListeners.remove("a").onResponse("a-response"); + assertFalse(future.isDone()); + itemListeners.remove("b").onFailure(new ElasticsearchException("b-response")); + assertFalse(future.isDone()); + itemListeners.remove("c").onResponse("c-response"); + assertTrue(future.isDone()); + if (finalFailure) { + assertEquals("failed", expectThrows(ElasticsearchException.class, future::actionGet).getMessage()); + } else { + assertEquals("completed", future.actionGet()); + } + } + + public void testReleaseOnCancellation() { + final var task = new CancellableTask(1, "test", "test", "", TaskId.EMPTY_TASK_ID, Map.of()); + final var future = new PlainActionFuture(); + + final var itemListeners = new HashMap>(); + final var handledItemResponse = new AtomicBoolean(); + + final var reachabilityChecker = new ReachabilityChecker(); + reachabilityChecker.register(new CancellableFanOut() { + @Override + protected void sendItemRequest(String item, ActionListener listener) { + itemListeners.put(item, listener); + } + + @Override + protected void onItemResponse(String item, String itemResponse) { + assertEquals("a", item); + assertEquals("a-response", itemResponse); + assertTrue(handledItemResponse.compareAndSet(false, true)); + } + + @Override + protected void onItemFailure(String item, Exception e) { + fail(item); + } + + @Override + protected String onCompletion() { + throw new AssertionError("onCompletion"); + } + }).run(task, List.of("a", "b", "c").iterator(), future); + + itemListeners.remove("a").onResponse("a-response"); + assertTrue(handledItemResponse.get()); + reachabilityChecker.checkReachable(); + + TaskCancelHelper.cancel(task, "test"); + reachabilityChecker.ensureUnreachable(); // even though we're still holding on to some item listeners. + assertFalse(future.isDone()); + + itemListeners.remove("b").onResponse("b-response"); + assertFalse(future.isDone()); + + itemListeners.remove("c").onFailure(new ElasticsearchException("c-response")); + assertTrue(itemListeners.isEmpty()); + assertTrue(future.isDone()); + expectThrows(TaskCancelledException.class, future::actionGet); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/synonyms/GetSynonymsActionRequestSerializingTests.java b/server/src/test/java/org/elasticsearch/action/synonyms/GetSynonymsActionRequestSerializingTests.java new file mode 100644 index 0000000000000..423f0d515fd0d --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/synonyms/GetSynonymsActionRequestSerializingTests.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.synonyms; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; + +public class GetSynonymsActionRequestSerializingTests extends AbstractWireSerializingTestCase { + + @Override + protected Writeable.Reader instanceReader() { + return GetSynonymsAction.Request::new; + } + + @Override + protected GetSynonymsAction.Request createTestInstance() { + return new GetSynonymsAction.Request( + randomIdentifier(), + randomIntBetween(0, Integer.MAX_VALUE), + randomIntBetween(0, Integer.MAX_VALUE) + ); + } + + @Override + protected GetSynonymsAction.Request mutateInstance(GetSynonymsAction.Request instance) throws IOException { + return randomValueOtherThan(instance, this::createTestInstance); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/synonyms/GetSynonymsActionResponseSerializingTests.java b/server/src/test/java/org/elasticsearch/action/synonyms/GetSynonymsActionResponseSerializingTests.java new file mode 100644 index 0000000000000..5394da0061f40 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/synonyms/GetSynonymsActionResponseSerializingTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.synonyms; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.synonyms.SynonymsManagementAPIService; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; + +import static org.elasticsearch.action.synonyms.SynonymsTestUtils.randomSynonymsSet; + +public class GetSynonymsActionResponseSerializingTests extends AbstractWireSerializingTestCase { + + @Override + protected Writeable.Reader instanceReader() { + return GetSynonymsAction.Response::new; + } + + @Override + protected GetSynonymsAction.Response createTestInstance() { + return new GetSynonymsAction.Response( + new SynonymsManagementAPIService.SynonymsSetResult(randomLongBetween(0, Long.MAX_VALUE), randomSynonymsSet()) + ); + } + + @Override + protected GetSynonymsAction.Response mutateInstance(GetSynonymsAction.Response instance) throws IOException { + return randomValueOtherThan(instance, this::createTestInstance); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymsActionRequestSerializingTests.java b/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymsActionRequestSerializingTests.java index 78bdd6448fae1..6f6790fecf5f4 100644 --- a/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymsActionRequestSerializingTests.java +++ b/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymsActionRequestSerializingTests.java @@ -9,26 +9,13 @@ package org.elasticsearch.action.synonyms; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.synonyms.SynonymRule; -import org.elasticsearch.synonyms.SynonymsSet; import org.elasticsearch.test.AbstractWireSerializingTestCase; import java.io.IOException; -import java.util.Arrays; -import java.util.stream.Collectors; -public class PutSynonymsActionRequestSerializingTests extends AbstractWireSerializingTestCase { - - private static SynonymsSet randomSynonymsset() { - return new SynonymsSet(randomArray(10, SynonymRule[]::new, PutSynonymsActionRequestSerializingTests::randomSynonymRule)); - } +import static org.elasticsearch.action.synonyms.SynonymsTestUtils.randomSynonymsSet; - private static SynonymRule randomSynonymRule() { - return new SynonymRule( - randomBoolean() ? null : randomIdentifier(), - Arrays.stream(randomArray(1, 10, String[]::new, () -> randomAlphaOfLengthBetween(1, 10))).collect(Collectors.joining(", ")) - ); - } +public class PutSynonymsActionRequestSerializingTests extends AbstractWireSerializingTestCase { @Override protected Writeable.Reader instanceReader() { @@ -37,7 +24,7 @@ protected Writeable.Reader instanceReader() { @Override protected PutSynonymsAction.Request createTestInstance() { - return new PutSynonymsAction.Request(randomIdentifier(), randomSynonymsset()); + return new PutSynonymsAction.Request(randomIdentifier(), randomSynonymsSet()); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymsActionResponseSerializingTests.java b/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymsActionResponseSerializingTests.java index bba8f3e40d752..36d6aa3112cbd 100644 --- a/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymsActionResponseSerializingTests.java +++ b/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymsActionResponseSerializingTests.java @@ -13,8 +13,8 @@ import java.io.IOException; -import static org.elasticsearch.action.synonyms.PutSynonymsAction.Response.Result.CREATED; -import static org.elasticsearch.action.synonyms.PutSynonymsAction.Response.Result.UPDATED; +import static org.elasticsearch.synonyms.SynonymsManagementAPIService.UpdateSynonymsResult.CREATED; +import static org.elasticsearch.synonyms.SynonymsManagementAPIService.UpdateSynonymsResult.UPDATED; public class PutSynonymsActionResponseSerializingTests extends AbstractWireSerializingTestCase { diff --git a/server/src/test/java/org/elasticsearch/action/synonyms/SynonymsTestUtils.java b/server/src/test/java/org/elasticsearch/action/synonyms/SynonymsTestUtils.java new file mode 100644 index 0000000000000..500ec1b67d399 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/synonyms/SynonymsTestUtils.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.synonyms; + +import org.elasticsearch.synonyms.SynonymRule; + +import static org.elasticsearch.test.ESTestCase.randomAlphaOfLengthBetween; +import static org.elasticsearch.test.ESTestCase.randomArray; +import static org.elasticsearch.test.ESTestCase.randomBoolean; +import static org.elasticsearch.test.ESTestCase.randomIdentifier; + +class SynonymsTestUtils { + + private SynonymsTestUtils() { + throw new UnsupportedOperationException(); + } + + static SynonymRule[] randomSynonymsSet() { + return randomArray(10, SynonymRule[]::new, SynonymsTestUtils::randomSynonymRule); + } + + static SynonymRule randomSynonymRule() { + return new SynonymRule( + randomBoolean() ? null : randomIdentifier(), + String.join(", ", randomArray(1, 10, String[]::new, () -> randomAlphaOfLengthBetween(1, 10))) + ); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index d94794cb7048d..1016d3d99b3b3 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -2140,13 +2140,69 @@ public void testDataStreamsUsingTemplates() throws Exception { containsString("unable to remove composable templates [logs, logs2] as they are in use by a data streams [logs-mysql-default]") ); - assertThat(MetadataIndexTemplateService.dataStreamsUsingTemplates(state, Set.of("logs")), equalTo(Set.of())); - assertThat(MetadataIndexTemplateService.dataStreamsUsingTemplates(state, Set.of("logs2")), equalTo(Set.of("logs-mysql-default"))); + assertThat(MetadataIndexTemplateService.dataStreamsExclusivelyUsingTemplates(state, Set.of("logs")), equalTo(Set.of())); + assertThat(MetadataIndexTemplateService.findV2Template(state.metadata(), "logs-mysql-default", false), equalTo("logs2")); // The unreferenced template can be removed without an exception MetadataIndexTemplateService.innerRemoveIndexTemplateV2(stateWithTwoTemplates, "logs"); } + public void testRemovingHigherOrderTemplateOfDataStreamWithMultipleTemplates() throws Exception { + ClusterState state = ClusterState.EMPTY_STATE; + final MetadataIndexTemplateService service = getMetadataIndexTemplateService(); + + ComposableIndexTemplate template = new ComposableIndexTemplate( + Collections.singletonList("logs-*"), + null, + null, + 100L, + null, + null, + new ComposableIndexTemplate.DataStreamTemplate(), + null + ); + + state = service.addIndexTemplateV2(state, false, "logs", template); + + ClusterState stateWithDS = ClusterState.builder(state) + .metadata( + Metadata.builder(state.metadata()) + .put( + DataStreamTestHelper.newInstance( + "logs-mysql-default", + Collections.singletonList(new Index(".ds-logs-mysql-default-000001", "uuid")) + ) + ) + .put( + IndexMetadata.builder(".ds-logs-mysql-default-000001") + .settings(indexSettings(Version.CURRENT, 1, 0).put(IndexMetadata.SETTING_INDEX_UUID, "uuid")) + ) + ) + .build(); + + ComposableIndexTemplate fineGrainedLogsTemplate = new ComposableIndexTemplate( + Collections.singletonList("logs-mysql-*"), + null, + null, + 200L, // Higher priority + null, + null, + new ComposableIndexTemplate.DataStreamTemplate(), + null + ); + + state = service.addIndexTemplateV2(stateWithDS, false, "logs-test", fineGrainedLogsTemplate); + + // Verify that the data stream now matches to the higher order template + assertThat(MetadataIndexTemplateService.dataStreamsExclusivelyUsingTemplates(state, Set.of("logs")), equalTo(Set.of())); + assertThat(MetadataIndexTemplateService.findV2Template(state.metadata(), "logs-mysql-default", false), equalTo("logs-test")); + + // Test removing the higher order template + state = MetadataIndexTemplateService.innerRemoveIndexTemplateV2(state, "logs-test"); + + assertThat(MetadataIndexTemplateService.findV2Template(state.metadata(), "logs-mysql-default", false), equalTo("logs")); + } + public void testV2TemplateOverlaps() throws Exception { { ComposableIndexTemplate template = new ComposableIndexTemplate( diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadataTests.java index 9f0b26393ce54..90e243ada19a3 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadataTests.java @@ -46,8 +46,8 @@ public void testInsertNewNodeShutdownMetadata() { nodesShutdownMetadata = nodesShutdownMetadata.putSingleNodeMetadata(newNodeMetadata); - assertThat(nodesShutdownMetadata.getAllNodeMetadataMap().get(newNodeMetadata.getNodeId()), equalTo(newNodeMetadata)); - assertThat(nodesShutdownMetadata.getAllNodeMetadataMap().values(), contains(newNodeMetadata)); + assertThat(nodesShutdownMetadata.get(newNodeMetadata.getNodeId()), equalTo(newNodeMetadata)); + assertThat(nodesShutdownMetadata.getAll().values(), contains(newNodeMetadata)); } public void testRemoveShutdownMetadata() { @@ -61,9 +61,9 @@ public void testRemoveShutdownMetadata() { SingleNodeShutdownMetadata nodeToRemove = randomFrom(nodes); nodesShutdownMetadata = nodesShutdownMetadata.removeSingleNodeMetadata(nodeToRemove.getNodeId()); - assertThat(nodesShutdownMetadata.getAllNodeMetadataMap().get(nodeToRemove.getNodeId()), nullValue()); - assertThat(nodesShutdownMetadata.getAllNodeMetadataMap().values(), hasSize(nodes.size() - 1)); - assertThat(nodesShutdownMetadata.getAllNodeMetadataMap().values(), not(hasItem(nodeToRemove))); + assertThat(nodesShutdownMetadata.get(nodeToRemove.getNodeId()), nullValue()); + assertThat(nodesShutdownMetadata.getAll().values(), hasSize(nodes.size() - 1)); + assertThat(nodesShutdownMetadata.getAll().values(), not(hasItem(nodeToRemove))); } public void testIsNodeShuttingDown() { @@ -101,8 +101,8 @@ public void testIsNodeShuttingDown() { .nodes(DiscoveryNodes.builder(state.nodes()).add(TestDiscoveryNode.create("_node_1")).build()) .build(); - assertThat(NodesShutdownMetadata.isNodeShuttingDown(state, "this_node"), equalTo(true)); - assertThat(NodesShutdownMetadata.isNodeShuttingDown(state, "_node_1"), equalTo(false)); + assertThat(state.metadata().nodeShutdowns().contains("this_node"), equalTo(true)); + assertThat(state.metadata().nodeShutdowns().contains("_node_1"), equalTo(false)); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java index 78712b904d85a..7bfd65c2f16c7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.TestShardRoutingRoleStrategies; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -40,10 +41,8 @@ import java.nio.ByteBuffer; import java.time.Instant; import java.util.EnumSet; -import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -615,32 +614,41 @@ public void testFailedShard() { /** * Verifies that delayed allocation calculation are correct when there are no registered node shutdowns. */ - public void testRemainingDelayCalculationWithNoShutdowns() throws Exception { - checkRemainingDelayCalculation("bogusNodeId", TimeValue.timeValueNanos(10), Map.of(), TimeValue.timeValueNanos(10), false); + public void testRemainingDelayCalculationWithNoShutdowns() { + checkRemainingDelayCalculation( + "bogusNodeId", + TimeValue.timeValueNanos(10), + NodesShutdownMetadata.EMPTY, + TimeValue.timeValueNanos(10), + false + ); } /** * Verifies that delayed allocation calculations are correct when there are registered node shutdowns for nodes which are not relevant * to the shard currently being evaluated. */ - public void testRemainingDelayCalculationsWithUnrelatedShutdowns() throws Exception { + public void testRemainingDelayCalculationsWithUnrelatedShutdowns() { String lastNodeId = "bogusNodeId"; - Map shutdowns = new HashMap<>(); + NodesShutdownMetadata shutdowns = NodesShutdownMetadata.EMPTY; int numberOfShutdowns = randomIntBetween(1, 15); for (int i = 0; i <= numberOfShutdowns; i++) { final SingleNodeShutdownMetadata.Type type = randomFrom(EnumSet.allOf(SingleNodeShutdownMetadata.Type.class)); final String targetNodeName = type == SingleNodeShutdownMetadata.Type.REPLACE ? randomAlphaOfLengthBetween(10, 20) : null; - SingleNodeShutdownMetadata shutdown = SingleNodeShutdownMetadata.builder() - .setNodeId(randomValueOtherThan(lastNodeId, () -> randomAlphaOfLengthBetween(5, 10))) - .setReason(this.getTestName()) - .setStartedAtMillis(randomNonNegativeLong()) - .setType(type) - .setTargetNodeName(targetNodeName) - .setGracePeriod( - type == SingleNodeShutdownMetadata.Type.SIGTERM ? TimeValue.parseTimeValue(randomTimeValue(), this.getTestName()) : null - ) - .build(); - shutdowns.put(shutdown.getNodeId(), shutdown); + shutdowns = shutdowns.putSingleNodeMetadata( + SingleNodeShutdownMetadata.builder() + .setNodeId(randomValueOtherThan(lastNodeId, () -> randomAlphaOfLengthBetween(5, 10))) + .setReason(this.getTestName()) + .setStartedAtMillis(randomNonNegativeLong()) + .setType(type) + .setTargetNodeName(targetNodeName) + .setGracePeriod( + type == SingleNodeShutdownMetadata.Type.SIGTERM + ? TimeValue.parseTimeValue(randomTimeValue(), this.getTestName()) + : null + ) + .build() + ); } checkRemainingDelayCalculation(lastNodeId, TimeValue.timeValueNanos(10), shutdowns, TimeValue.timeValueNanos(10), false); } @@ -648,23 +656,25 @@ public void testRemainingDelayCalculationsWithUnrelatedShutdowns() throws Except /** * Verifies that delay calculation is not impacted when the node the shard was last assigned to was registered for removal. */ - public void testRemainingDelayCalculationWhenNodeIsShuttingDownForRemoval() throws Exception { + public void testRemainingDelayCalculationWhenNodeIsShuttingDownForRemoval() { for (SingleNodeShutdownMetadata.Type type : List.of( SingleNodeShutdownMetadata.Type.REMOVE, SingleNodeShutdownMetadata.Type.SIGTERM )) { String lastNodeId = "bogusNodeId"; - Map shutdowns = new HashMap<>(); - SingleNodeShutdownMetadata shutdown = SingleNodeShutdownMetadata.builder() - .setNodeId(lastNodeId) - .setReason(this.getTestName()) - .setStartedAtMillis(randomNonNegativeLong()) - .setType(type) - .setGracePeriod( - type == SingleNodeShutdownMetadata.Type.SIGTERM ? TimeValue.parseTimeValue(randomTimeValue(), this.getTestName()) : null - ) - .build(); - shutdowns.put(shutdown.getNodeId(), shutdown); + NodesShutdownMetadata shutdowns = NodesShutdownMetadata.EMPTY.putSingleNodeMetadata( + SingleNodeShutdownMetadata.builder() + .setNodeId(lastNodeId) + .setReason(this.getTestName()) + .setStartedAtMillis(randomNonNegativeLong()) + .setType(type) + .setGracePeriod( + type == SingleNodeShutdownMetadata.Type.SIGTERM + ? TimeValue.parseTimeValue(randomTimeValue(), this.getTestName()) + : null + ) + .build() + ); checkRemainingDelayCalculation(lastNodeId, TimeValue.timeValueNanos(10), shutdowns, TimeValue.timeValueNanos(10), false); } @@ -674,17 +684,17 @@ public void testRemainingDelayCalculationWhenNodeIsShuttingDownForRemoval() thro * Verifies that the delay calculation uses the configured delay value for nodes known to be restarting, because they are registered for * a `RESTART`-type shutdown, rather than the default global delay. */ - public void testRemainingDelayCalculationWhenNodeIsKnownToBeRestartingWithCustomDelay() throws Exception { + public void testRemainingDelayCalculationWhenNodeIsKnownToBeRestartingWithCustomDelay() { String lastNodeId = "bogusNodeId"; - Map shutdowns = new HashMap<>(); - SingleNodeShutdownMetadata shutdown = SingleNodeShutdownMetadata.builder() - .setNodeId(lastNodeId) - .setReason(this.getTestName()) - .setStartedAtMillis(randomNonNegativeLong()) - .setType(SingleNodeShutdownMetadata.Type.RESTART) - .setAllocationDelay(TimeValue.timeValueMinutes(1)) - .build(); - shutdowns.put(shutdown.getNodeId(), shutdown); + NodesShutdownMetadata shutdowns = NodesShutdownMetadata.EMPTY.putSingleNodeMetadata( + SingleNodeShutdownMetadata.builder() + .setNodeId(lastNodeId) + .setReason(this.getTestName()) + .setStartedAtMillis(randomNonNegativeLong()) + .setType(SingleNodeShutdownMetadata.Type.RESTART) + .setAllocationDelay(TimeValue.timeValueMinutes(1)) + .build() + ); // Use a different index-level delay so this test will fail if that one gets used instead of the one from the shutdown metadata checkRemainingDelayCalculation(lastNodeId, TimeValue.timeValueNanos(10), shutdowns, TimeValue.timeValueMinutes(1), true); @@ -694,18 +704,18 @@ public void testRemainingDelayCalculationWhenNodeIsKnownToBeRestartingWithCustom * Verifies that the delay calculation uses the default delay value for nodes known to be restarting, because they are registered for * a `RESTART`-type shutdown, rather than the default global delay. */ - public void testRemainingDelayCalculationWhenNodeIsKnownToBeRestartingWithDefaultDelay() throws Exception { + public void testRemainingDelayCalculationWhenNodeIsKnownToBeRestartingWithDefaultDelay() { String lastNodeId = "bogusNodeId"; - Map shutdowns = new HashMap<>(); // Note that we do not explicitly configure the reallocation delay here. - SingleNodeShutdownMetadata shutdown = SingleNodeShutdownMetadata.builder() - .setNodeId(lastNodeId) - .setReason(this.getTestName()) - .setStartedAtMillis(randomNonNegativeLong()) - .setType(SingleNodeShutdownMetadata.Type.RESTART) - .build(); - shutdowns.put(shutdown.getNodeId(), shutdown); + NodesShutdownMetadata shutdowns = NodesShutdownMetadata.EMPTY.putSingleNodeMetadata( + SingleNodeShutdownMetadata.builder() + .setNodeId(lastNodeId) + .setReason(this.getTestName()) + .setStartedAtMillis(randomNonNegativeLong()) + .setType(SingleNodeShutdownMetadata.Type.RESTART) + .build() + ); // Use a different index-level delay so this test will fail if that one gets used instead of the one from the shutdown metadata checkRemainingDelayCalculation( @@ -717,23 +727,23 @@ public void testRemainingDelayCalculationWhenNodeIsKnownToBeRestartingWithDefaul ); } - public void testRemainingDelayUsesIndexLevelDelayIfNodeWasNotRestartingWhenShardBecameUnassigned() throws Exception { + public void testRemainingDelayUsesIndexLevelDelayIfNodeWasNotRestartingWhenShardBecameUnassigned() { String lastNodeId = "bogusNodeId"; - Map shutdowns = new HashMap<>(); // Generate a random time value - but don't use nanos as extremely small values of nanos can break assertion calculations final TimeValue shutdownDelay = TimeValue.parseTimeValue( randomTimeValue(100, 1000, "d", "h", "ms", "s", "m", "micros"), this.getTestName() ); - SingleNodeShutdownMetadata shutdown = SingleNodeShutdownMetadata.builder() - .setNodeId(lastNodeId) - .setReason(this.getTestName()) - .setStartedAtMillis(randomNonNegativeLong()) - .setType(SingleNodeShutdownMetadata.Type.RESTART) - .setAllocationDelay(shutdownDelay) - .build(); - shutdowns.put(shutdown.getNodeId(), shutdown); + NodesShutdownMetadata shutdowns = NodesShutdownMetadata.EMPTY.putSingleNodeMetadata( + SingleNodeShutdownMetadata.builder() + .setNodeId(lastNodeId) + .setReason(this.getTestName()) + .setStartedAtMillis(randomNonNegativeLong()) + .setType(SingleNodeShutdownMetadata.Type.RESTART) + .setAllocationDelay(shutdownDelay) + .build() + ); // We want an index level delay that's less than the shutdown delay to avoid picking the index-level delay because it's larger final TimeValue indexLevelDelay = randomValueOtherThanMany( @@ -748,10 +758,10 @@ public void testRemainingDelayUsesIndexLevelDelayIfNodeWasNotRestartingWhenShard private void checkRemainingDelayCalculation( String lastNodeId, TimeValue indexLevelTimeoutSetting, - Map nodeShutdowns, + NodesShutdownMetadata nodeShutdowns, TimeValue expectedTotalDelay, boolean nodeRestarting - ) throws Exception { + ) { final long baseTime = System.nanoTime(); UnassignedInfo unassignedInfo = new UnassignedInfo( nodeRestarting ? UnassignedInfo.Reason.NODE_RESTARTING : UnassignedInfo.Reason.NODE_LEFT, diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java index 2464a3515ff1c..b183d46182cc0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java @@ -9,8 +9,6 @@ package org.elasticsearch.cluster.routing.allocation.allocator; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterInfo.NodeAndShard; @@ -41,7 +39,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; @@ -70,6 +67,7 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED; import static org.elasticsearch.cluster.routing.TestShardRouting.newShardRouting; import static org.elasticsearch.common.settings.ClusterSettings.createBuiltInClusterSettings; +import static org.elasticsearch.test.MockLogAppender.assertThatLogger; import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; @@ -942,14 +940,7 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing } }); - MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); - mockAppender.addExpectation(expectation); - - Logger logger = LogManager.getLogger(DesiredBalanceComputer.class); - Loggers.addAppender(logger, mockAppender); - - try { + assertThatLogger(() -> { var iteration = new AtomicInteger(0); desiredBalanceComputer.compute( DesiredBalance.INITIAL, @@ -957,12 +948,7 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing queue(), input -> iteration.incrementAndGet() < iterations ); - - mockAppender.assertAllExpectationsMatched(); - } finally { - Loggers.removeAppender(logger, mockAppender); - mockAppender.stop(); - } + }, DesiredBalanceComputer.class, expectation); } private static Map.Entry indexSize(ClusterState clusterState, String name, long size, boolean primary) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java index 7336c428bc70f..8363656295474 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.cluster.routing.allocation.allocator; +import org.apache.logging.log4j.Level; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterInfo; @@ -62,6 +63,8 @@ import org.elasticsearch.snapshots.SnapshotShardSizeInfo; import org.elasticsearch.snapshots.SnapshotsInfoService; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.threadpool.ThreadPool; import org.junit.BeforeClass; import java.util.Comparator; @@ -87,11 +90,14 @@ import static org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING; import static org.elasticsearch.common.settings.ClusterSettings.createBuiltInClusterSettings; +import static org.elasticsearch.test.MockLogAppender.assertThatLogger; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.oneOf; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class DesiredBalanceReconcilerTests extends ESAllocationTestCase { @@ -1083,8 +1089,7 @@ public void testRebalanceDoesNotCauseHotSpots() { new ConcurrentRebalanceAllocationDecider(clusterSettings), new ThrottlingAllocationDecider(clusterSettings) }; - var allocationOrdering = new NodeAllocationOrdering(); - var moveOrdering = new NodeAllocationOrdering(); + var reconciler = new DesiredBalanceReconciler(clusterSettings, mock(ThreadPool.class)); var totalOutgoingMoves = new HashMap(); for (int i = 0; i < numberOfNodes; i++) { @@ -1097,7 +1102,7 @@ public void testRebalanceDoesNotCauseHotSpots() { while (true) { var allocation = createRoutingAllocationFrom(clusterState, deciders); - new DesiredBalanceReconciler(balance, allocation, allocationOrdering, moveOrdering).run(); + reconciler.reconcile(balance, allocation); var initializing = shardsWithState(allocation.routingNodes(), ShardRoutingState.INITIALIZING); if (initializing.isEmpty()) { @@ -1124,8 +1129,52 @@ public void testRebalanceDoesNotCauseHotSpots() { } } + public void testShouldLogOnTooManyUndesiredAllocations() { + + var indexMetadata = IndexMetadata.builder("index-1").settings(indexSettings(Version.CURRENT, 1, 0)).build(); + final var index = indexMetadata.getIndex(); + final var shardId = new ShardId(index, 0); + + final var clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(newNode("data-node-1")).add(newNode("data-node-2"))) + .metadata(Metadata.builder().put(indexMetadata, true)) + .routingTable( + RoutingTable.builder() + .add(IndexRoutingTable.builder(index).addShard(newShardRouting(shardId, "data-node-2", true, STARTED))) + ) + .build(); + + final var balance = new DesiredBalance(1, Map.of(shardId, new ShardAssignment(Set.of("data-node-1"), 1, 0, 0))); + + var threadPool = mock(ThreadPool.class); + when(threadPool.relativeTimeInMillis()).thenReturn(1L).thenReturn(2L); + + var reconciler = new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool); + + assertThatLogger( + () -> reconciler.reconcile(balance, createRoutingAllocationFrom(clusterState)), + DesiredBalanceReconciler.class, + new MockLogAppender.SeenEventExpectation( + "Should log first too many shards on undesired locations", + DesiredBalanceReconciler.class.getCanonicalName(), + Level.WARN, + "[100.0%] of assigned shards (1/1) are not on their desired nodes, which exceeds the warn threshold of [10.0%]" + ) + ); + assertThatLogger( + () -> reconciler.reconcile(balance, createRoutingAllocationFrom(clusterState)), + DesiredBalanceReconciler.class, + new MockLogAppender.UnseenEventExpectation( + "Should not log immediate second too many shards on undesired locations", + DesiredBalanceReconciler.class.getCanonicalName(), + Level.WARN, + "[100.0%] of assigned shards (1/1) are not on their desired nodes, which exceeds the warn threshold of [10.0%]" + ) + ); + } + private static void reconcile(RoutingAllocation routingAllocation, DesiredBalance desiredBalance) { - new DesiredBalanceReconciler(desiredBalance, routingAllocation, new NodeAllocationOrdering(), new NodeAllocationOrdering()).run(); + new DesiredBalanceReconciler(createBuiltInClusterSettings(), mock(ThreadPool.class)).reconcile(desiredBalance, routingAllocation); } private static boolean isReconciled(RoutingNode node, DesiredBalance balance) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java index 676da408023d0..7718b9a871067 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java @@ -332,11 +332,12 @@ public ClusterState apply(ClusterState clusterState, RerouteStrategy routingAllo var gatewayAllocator = createGatewayAllocator(); var shardsAllocator = createShardsAllocator(); + var clusterSettings = createBuiltInClusterSettings(); var desiredBalanceShardsAllocator = new DesiredBalanceShardsAllocator( shardsAllocator, threadPool, clusterService, - new DesiredBalanceComputer(createBuiltInClusterSettings(), threadPool, shardsAllocator) { + new DesiredBalanceComputer(clusterSettings, threadPool, shardsAllocator) { @Override public DesiredBalance compute( DesiredBalance previousDesiredBalance, @@ -433,11 +434,12 @@ public ClusterState apply(ClusterState clusterState, RerouteStrategy routingAllo var gatewayAllocator = createGatewayAllocator(); var shardsAllocator = createShardsAllocator(); + var clusterSettings = createBuiltInClusterSettings(); var desiredBalanceShardsAllocator = new DesiredBalanceShardsAllocator( shardsAllocator, threadPool, clusterService, - new DesiredBalanceComputer(createBuiltInClusterSettings(), threadPool, shardsAllocator) { + new DesiredBalanceComputer(clusterSettings, threadPool, shardsAllocator) { @Override public DesiredBalance compute( DesiredBalance previousDesiredBalance, @@ -520,10 +522,10 @@ public void testResetDesiredBalance() { var threadPool = new TestThreadPool(getTestName()); var clusterService = ClusterServiceUtils.createClusterService(clusterState, threadPool); - var delegateAllocator = createShardsAllocator(); + var clusterSettings = createBuiltInClusterSettings(); - var desiredBalanceComputer = new DesiredBalanceComputer(createBuiltInClusterSettings(), threadPool, delegateAllocator) { + var desiredBalanceComputer = new DesiredBalanceComputer(clusterSettings, threadPool, delegateAllocator) { final AtomicReference lastComputationInput = new AtomicReference<>(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/FrequencyCappedActionTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/FrequencyCappedActionTests.java new file mode 100644 index 0000000000000..0e730c3647db7 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/FrequencyCappedActionTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing.allocation.allocator; + +import org.elasticsearch.test.ESTestCase; + +import java.util.concurrent.atomic.AtomicLong; + +import static org.elasticsearch.core.TimeValue.timeValueMillis; +import static org.hamcrest.Matchers.equalTo; + +public class FrequencyCappedActionTests extends ESTestCase { + + public void testFrequencyCapExecution() { + + var executions = new AtomicLong(0); + var currentTime = new AtomicLong(); + var action = new FrequencyCappedAction(currentTime::get); + + var minInterval = timeValueMillis(randomNonNegativeInt()); + action.setMinInterval(minInterval); + + // initial execution should happen + action.maybeExecute(executions::incrementAndGet); + assertThat(executions.get(), equalTo(1L)); + + // should not execute again too soon + currentTime.set(randomLongBetween(0, minInterval.millis() - 1)); + action.maybeExecute(executions::incrementAndGet); + assertThat(executions.get(), equalTo(1L)); + + // should execute min interval elapsed + currentTime.set(randomLongBetween(minInterval.millis(), Long.MAX_VALUE)); + action.maybeExecute(executions::incrementAndGet); + assertThat(executions.get(), equalTo(2L)); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeReplacementAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeReplacementAllocationDeciderTests.java index 7d7f091e58f5c..203255cca32f7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeReplacementAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeReplacementAllocationDeciderTests.java @@ -9,11 +9,8 @@ package org.elasticsearch.cluster.routing.allocation.decider; import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ESAllocationTestCase; -import org.elasticsearch.cluster.EmptyClusterInfoService; -import org.elasticsearch.cluster.TestShardRoutingRoleStrategies; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; @@ -26,14 +23,9 @@ import org.elasticsearch.cluster.routing.RoutingNodesHelper; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; -import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.snapshots.EmptySnapshotsInfoService; -import org.elasticsearch.test.gateway.TestGatewayAllocator; import java.util.Arrays; import java.util.Collections; @@ -63,14 +55,6 @@ public class NodeReplacementAllocationDeciderTests extends ESAllocationTestCase new NodeShutdownAllocationDecider() ) ); - private final AllocationService service = new AllocationService( - allocationDeciders, - new TestGatewayAllocator(), - new BalancedShardsAllocator(Settings.EMPTY), - EmptyClusterInfoService.INSTANCE, - EmptySnapshotsInfoService.INSTANCE, - TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY - ); private final String idxName = "test-idx"; private final String idxUuid = "test-idx-uuid"; @@ -98,11 +82,7 @@ public void testNoReplacements() { } public void testCanForceAllocate() { - ClusterState state = prepareState( - service.reroute(ClusterState.EMPTY_STATE, "initial state", ActionListener.noop()), - NODE_A.getId(), - NODE_B.getName() - ); + ClusterState state = prepareState(ClusterState.EMPTY_STATE, NODE_A.getId(), NODE_B.getName()); RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); RoutingNode routingNode = RoutingNodesHelper.routingNode(NODE_A.getId(), NODE_A, shard); allocation.debugDecision(true); @@ -146,11 +126,7 @@ public void testCanForceAllocate() { } public void testCannotRemainOnReplacedNode() { - ClusterState state = prepareState( - service.reroute(ClusterState.EMPTY_STATE, "initial state", ActionListener.noop()), - NODE_A.getId(), - NODE_B.getName() - ); + ClusterState state = prepareState(ClusterState.EMPTY_STATE, NODE_A.getId(), NODE_B.getName()); RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); RoutingNode routingNode = RoutingNodesHelper.routingNode(NODE_A.getId(), NODE_A, shard); allocation.debugDecision(true); @@ -176,11 +152,7 @@ public void testCannotRemainOnReplacedNode() { } public void testCanAllocateToNeitherSourceNorTarget() { - ClusterState state = prepareState( - service.reroute(ClusterState.EMPTY_STATE, "initial state", ActionListener.noop()), - NODE_A.getId(), - NODE_B.getName() - ); + ClusterState state = prepareState(ClusterState.EMPTY_STATE, NODE_A.getId(), NODE_B.getName()); RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); RoutingNode routingNode = RoutingNodesHelper.routingNode(NODE_A.getId(), NODE_A, shard); allocation.debugDecision(true); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDeciderTests.java index e25f5a87236bb..12f5192bec50a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDeciderTests.java @@ -9,11 +9,8 @@ package org.elasticsearch.cluster.routing.allocation.decider; import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ESAllocationTestCase; -import org.elasticsearch.cluster.EmptyClusterInfoService; -import org.elasticsearch.cluster.TestShardRoutingRoleStrategies; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; @@ -26,15 +23,10 @@ import org.elasticsearch.cluster.routing.RoutingNodesHelper; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; -import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.snapshots.EmptySnapshotsInfoService; -import org.elasticsearch.test.gateway.TestGatewayAllocator; import java.util.Arrays; import java.util.Collections; @@ -58,14 +50,6 @@ public class NodeShutdownAllocationDeciderTests extends ESAllocationTestCase { private final AllocationDeciders allocationDeciders = new AllocationDeciders( Arrays.asList(decider, new SameShardAllocationDecider(clusterSettings), new ReplicaAfterPrimaryActiveAllocationDecider()) ); - private final AllocationService service = new AllocationService( - allocationDeciders, - new TestGatewayAllocator(), - new BalancedShardsAllocator(Settings.EMPTY), - EmptyClusterInfoService.INSTANCE, - EmptySnapshotsInfoService.INSTANCE, - TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY - ); private final String idxName = "test-idx"; private final String idxUuid = "test-idx-uuid"; @@ -80,10 +64,7 @@ public class NodeShutdownAllocationDeciderTests extends ESAllocationTestCase { ); public void testCanAllocateShardsToRestartingNode() { - ClusterState state = prepareState( - service.reroute(ClusterState.EMPTY_STATE, "initial state", ActionListener.noop()), - SingleNodeShutdownMetadata.Type.RESTART - ); + ClusterState state = prepareState(ClusterState.EMPTY_STATE, SingleNodeShutdownMetadata.Type.RESTART); RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); RoutingNode routingNode = RoutingNodesHelper.routingNode(DATA_NODE.getId(), DATA_NODE, shard); allocation.debugDecision(true); @@ -98,7 +79,7 @@ public void testCanAllocateShardsToRestartingNode() { public void testCannotAllocateShardsToRemovingNode() { for (SingleNodeShutdownMetadata.Type type : REMOVE_SHUTDOWN_TYPES) { - ClusterState state = prepareState(service.reroute(ClusterState.EMPTY_STATE, "initial state", ActionListener.noop()), type); + ClusterState state = prepareState(ClusterState.EMPTY_STATE, type); RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); RoutingNode routingNode = RoutingNodesHelper.routingNode(DATA_NODE.getId(), DATA_NODE, shard); allocation.debugDecision(true); @@ -110,10 +91,7 @@ public void testCannotAllocateShardsToRemovingNode() { } public void testShardsCanRemainOnRestartingNode() { - ClusterState state = prepareState( - service.reroute(ClusterState.EMPTY_STATE, "initial state", ActionListener.noop()), - SingleNodeShutdownMetadata.Type.RESTART - ); + ClusterState state = prepareState(ClusterState.EMPTY_STATE, SingleNodeShutdownMetadata.Type.RESTART); RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); RoutingNode routingNode = RoutingNodesHelper.routingNode(DATA_NODE.getId(), DATA_NODE, shard); allocation.debugDecision(true); @@ -128,7 +106,7 @@ public void testShardsCanRemainOnRestartingNode() { public void testShardsCannotRemainOnRemovingNode() { for (SingleNodeShutdownMetadata.Type type : REMOVE_SHUTDOWN_TYPES) { - ClusterState state = prepareState(service.reroute(ClusterState.EMPTY_STATE, "initial state", ActionListener.noop()), type); + ClusterState state = prepareState(ClusterState.EMPTY_STATE, type); RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); RoutingNode routingNode = RoutingNodesHelper.routingNode(DATA_NODE.getId(), DATA_NODE, shard); allocation.debugDecision(true); @@ -144,10 +122,7 @@ public void testShardsCannotRemainOnRemovingNode() { } public void testCanAutoExpandToRestartingNode() { - ClusterState state = prepareState( - service.reroute(ClusterState.EMPTY_STATE, "initial state", ActionListener.noop()), - SingleNodeShutdownMetadata.Type.RESTART - ); + ClusterState state = prepareState(ClusterState.EMPTY_STATE, SingleNodeShutdownMetadata.Type.RESTART); RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); allocation.debugDecision(true); @@ -160,9 +135,7 @@ public void testCanAutoExpandToRestartingNode() { } public void testCanAutoExpandToNodeIfNoNodesShuttingDown() { - ClusterState state = service.reroute(ClusterState.EMPTY_STATE, "initial state", ActionListener.noop()); - - RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, ClusterState.EMPTY_STATE, null, null, 0); allocation.debugDecision(true); Decision decision = decider.shouldAutoExpandToNode(indexMetadata, DATA_NODE, allocation); @@ -172,11 +145,7 @@ public void testCanAutoExpandToNodeIfNoNodesShuttingDown() { public void testCanAutoExpandToNodeThatIsNotShuttingDown() { for (SingleNodeShutdownMetadata.Type type : REMOVE_SHUTDOWN_TYPES) { - ClusterState state = prepareState( - service.reroute(ClusterState.EMPTY_STATE, "initial state", ActionListener.noop()), - type, - "other-node-id" - ); + ClusterState state = prepareState(ClusterState.EMPTY_STATE, type, "other-node-id"); RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); allocation.debugDecision(true); @@ -189,7 +158,7 @@ public void testCanAutoExpandToNodeThatIsNotShuttingDown() { public void testCannotAutoExpandToRemovingNode() { for (SingleNodeShutdownMetadata.Type type : REMOVE_SHUTDOWN_TYPES) { - ClusterState state = prepareState(service.reroute(ClusterState.EMPTY_STATE, "initial state", ActionListener.noop()), type); + ClusterState state = prepareState(ClusterState.EMPTY_STATE, type); RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0); allocation.debugDecision(true); diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/KeyedLockTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/KeyedLockTests.java index 844f5c9cf48ce..20f2665f63ec7 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/KeyedLockTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/KeyedLockTests.java @@ -29,7 +29,7 @@ public class KeyedLockTests extends ESTestCase { public void testIfMapEmptyAfterLotsOfAcquireAndReleases() throws InterruptedException { ConcurrentHashMap counter = new ConcurrentHashMap<>(); ConcurrentHashMap safeCounter = new ConcurrentHashMap<>(); - KeyedLock connectionLock = new KeyedLock<>(randomBoolean()); + KeyedLock connectionLock = new KeyedLock<>(); String[] names = new String[randomIntBetween(1, 40)]; for (int i = 0; i < names.length; i++) { names[i] = randomRealisticUnicodeOfLengthBetween(10, 20); diff --git a/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java b/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java index 5debe7d2fa9d2..e0fc9f75ce1b2 100644 --- a/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java +++ b/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java @@ -101,7 +101,7 @@ public void testVectorHighlighterPrefixQuery() throws Exception { ); assertThat(fragment, nullValue()); prefixQuery = new PrefixQuery(new Term("content", "ba"), MultiTermQuery.SCORING_BOOLEAN_REWRITE); - Query rewriteQuery = prefixQuery.rewrite(reader); + Query rewriteQuery = prefixQuery.rewrite(searcher); fragment = highlighter.getBestFragment(highlighter.getFieldQuery(rewriteQuery), reader, topDocs.scoreDocs[0].doc, "content", 30); assertThat(fragment, notNullValue()); diff --git a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java index a70373a68600a..bb443d59ac111 100644 --- a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java +++ b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionModule; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; @@ -51,6 +52,7 @@ import org.junit.After; import org.junit.Assert; import org.junit.Before; +import org.mockito.ArgumentCaptor; import java.net.InetSocketAddress; import java.net.UnknownHostException; @@ -69,12 +71,16 @@ import static java.net.InetAddress.getByName; import static java.util.Arrays.asList; import static org.elasticsearch.http.AbstractHttpServerTransport.resolvePublishPort; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; public class AbstractHttpServerTransportTests extends ESTestCase { @@ -883,6 +889,66 @@ protected void stopInternal() {} } } + @SuppressWarnings("unchecked") + public void testSetGracefulClose() { + try ( + AbstractHttpServerTransport transport = new AbstractHttpServerTransport( + Settings.EMPTY, + networkService, + recycler, + threadPool, + xContentRegistry(), + new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) { + channel.sendResponse(emptyResponse(RestStatus.OK)); + } + + @Override + public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, Throwable cause) { + channel.sendResponse(emptyResponse(RestStatus.BAD_REQUEST)); + } + }, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + Tracer.NOOP + ) { + + @Override + protected HttpServerChannel bind(InetSocketAddress hostAddress) { + return null; + } + + @Override + protected void doStart() {} + + @Override + protected void stopInternal() {} + } + ) { + final TestHttpRequest httpRequest = new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"); + + HttpChannel httpChannel = mock(HttpChannel.class); + transport.incomingRequest(httpRequest, httpChannel); + + var response = ArgumentCaptor.forClass(TestHttpResponse.class); + var listener = ArgumentCaptor.forClass(ActionListener.class); + verify(httpChannel).sendResponse(response.capture(), listener.capture()); + + listener.getValue().onResponse(null); + assertThat(response.getValue().containsHeader(DefaultRestChannel.CONNECTION), is(false)); + verify(httpChannel, never()).close(); + + httpChannel = mock(HttpChannel.class); + transport.gracefullyCloseConnections(); + transport.incomingRequest(httpRequest, httpChannel); + verify(httpChannel).sendResponse(response.capture(), listener.capture()); + + listener.getValue().onResponse(null); + assertThat(response.getValue().headers().get(DefaultRestChannel.CONNECTION), containsInAnyOrder(DefaultRestChannel.CLOSE)); + verify(httpChannel).close(); + } + } + private static RestResponse emptyResponse(RestStatus status) { return new RestResponse(status, RestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY); } diff --git a/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java b/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java index ffe95b0004743..d050c2432025b 100644 --- a/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java +++ b/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java @@ -172,7 +172,8 @@ public void testHeadersSet() { threadPool.getThreadContext(), CorsHandler.fromSettings(settings), httpTracer, - tracer + tracer, + false ); RestResponse resp = testRestResponse(); final String customHeader = "custom-header"; @@ -192,6 +193,65 @@ public void testHeadersSet() { assertEquals(resp.contentType(), headers.get(DefaultRestChannel.CONTENT_TYPE).get(0)); } + public void testCloseConnection() { + Settings settings = Settings.builder().build(); + final TestHttpRequest httpRequest = new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"); + final RestRequest request = RestRequest.request(parserConfig(), httpRequest, httpChannel); + HttpHandlingSettings handlingSettings = HttpHandlingSettings.fromSettings(settings); + // send a response + DefaultRestChannel channel = new DefaultRestChannel( + httpChannel, + httpRequest, + request, + bigArrays, + handlingSettings, + threadPool.getThreadContext(), + CorsHandler.fromSettings(settings), + httpTracer, + tracer, + true + ); + + RestResponse resp = testRestResponse(); + channel.sendResponse(resp); + // inspect what was written + ArgumentCaptor responseCaptor = ArgumentCaptor.forClass(TestHttpResponse.class); + verify(httpChannel).sendResponse(responseCaptor.capture(), any()); + TestHttpResponse httpResponse = responseCaptor.getValue(); + Map> headers = httpResponse.headers(); + assertThat(headers.get(DefaultRestChannel.CONNECTION), containsInAnyOrder(DefaultRestChannel.CLOSE)); + } + + public void testNormallyNoConnectionClose() { + Settings settings = Settings.builder().build(); + final TestHttpRequest httpRequest = new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"); + final RestRequest request = RestRequest.request(parserConfig(), httpRequest, httpChannel); + HttpHandlingSettings handlingSettings = HttpHandlingSettings.fromSettings(settings); + // send a response + DefaultRestChannel channel = new DefaultRestChannel( + httpChannel, + httpRequest, + request, + bigArrays, + handlingSettings, + threadPool.getThreadContext(), + CorsHandler.fromSettings(settings), + httpTracer, + tracer, + false + ); + + RestResponse resp = testRestResponse(); + channel.sendResponse(resp); + + ArgumentCaptor responseCaptor = ArgumentCaptor.forClass(TestHttpResponse.class); + verify(httpChannel).sendResponse(responseCaptor.capture(), any()); + + TestHttpResponse httpResponse = responseCaptor.getValue(); + Map> headers = httpResponse.headers(); + assertNull(headers.get(DefaultRestChannel.CONNECTION)); + } + public void testCookiesSet() { Settings settings = Settings.builder().put(HttpTransportSettings.SETTING_HTTP_RESET_COOKIES.getKey(), true).build(); final TestHttpRequest httpRequest = new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"); @@ -209,7 +269,8 @@ public void testCookiesSet() { threadPool.getThreadContext(), CorsHandler.fromSettings(settings), httpTracer, - tracer + tracer, + false ); channel.sendResponse(testRestResponse()); @@ -238,7 +299,8 @@ public void testReleaseInListener() throws IOException { threadPool.getThreadContext(), CorsHandler.fromSettings(settings), httpTracer, - tracer + tracer, + false ); final RestResponse response = new RestResponse( RestStatus.INTERNAL_SERVER_ERROR, @@ -306,7 +368,8 @@ public void testConnectionClose() throws Exception { threadPool.getThreadContext(), CorsHandler.fromSettings(settings), httpTracer, - tracer + tracer, + false ); channel.sendResponse(testRestResponse()); Class> listenerClass = (Class>) (Class) ActionListener.class; @@ -338,7 +401,8 @@ public void testResponseHeadersFiltering() { threadPool.getThreadContext(), CorsHandler.fromSettings(Settings.EMPTY), httpTracer, - tracer + tracer, + false ); doAnswer(invocationOnMock -> { ActionListener listener = invocationOnMock.getArgument(1); @@ -385,7 +449,8 @@ public RestRequest.Method method() { threadPool.getThreadContext(), CorsHandler.fromSettings(Settings.EMPTY), httpTracer, - tracer + tracer, + false ); // ESTestCase#after will invoke ensureAllArraysAreReleased which will fail if the response content was not released @@ -432,7 +497,8 @@ public HttpResponse createResponse(RestStatus status, BytesReference content) { threadPool.getThreadContext(), CorsHandler.fromSettings(Settings.EMPTY), httpTracer, - tracer + tracer, + false ); // ESTestCase#after will invoke ensureAllArraysAreReleased which will fail if the response content was not released @@ -481,7 +547,8 @@ public void testHandleHeadRequest() { threadPool.getThreadContext(), CorsHandler.fromSettings(Settings.EMPTY), httpTracer, - tracer + tracer, + false ); ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(HttpResponse.class); { @@ -541,7 +608,8 @@ public void sendResponse(HttpResponse response, ActionListener listener) { threadPool.getThreadContext(), new CorsHandler(CorsHandler.buildConfig(Settings.EMPTY)), new HttpTracer(), - tracer + tracer, + false ); final MockLogAppender sendingResponseMockLog = new MockLogAppender(); @@ -603,7 +671,8 @@ public void sendResponse(HttpResponse response, ActionListener listener) { threadPool.getThreadContext(), new CorsHandler(CorsHandler.buildConfig(Settings.EMPTY)), new HttpTracer(), - tracer + tracer, + false ); MockLogAppender mockLogAppender = new MockLogAppender(); @@ -659,7 +728,8 @@ public HttpResponse createResponse(RestStatus status, ChunkedRestResponseBody co threadPool.getThreadContext(), CorsHandler.fromSettings(Settings.EMPTY), new HttpTracer(), - tracer + tracer, + false ); var responseBody = new BytesArray(randomUnicodeOfLengthBetween(1, 100).getBytes(StandardCharsets.UTF_8)); @@ -729,7 +799,8 @@ private TestHttpResponse executeRequest(final Settings settings, final String or threadPool.getThreadContext(), new CorsHandler(CorsHandler.buildConfig(settings)), httpTracer, - tracer + tracer, + false ); channel.sendResponse(testRestResponse()); diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 1faaae09c3a4c..28562525d1500 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -67,6 +67,7 @@ import org.elasticsearch.index.seqno.RetentionLeaseSyncer; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.index.shard.ShardId; @@ -691,7 +692,7 @@ public void onIndexCommitDelete(ShardId shardId, IndexCommit deletedCommit) { IndexService indexService = newIndexService(module); closeables.add(() -> indexService.close("close index service at end of test", false)); - IndexShard indexShard = indexService.createShard(shardRouting, s -> {}, RetentionLeaseSyncer.EMPTY); + IndexShard indexShard = indexService.createShard(shardRouting, IndexShardTestCase.NOOP_GCP_SYNCER, RetentionLeaseSyncer.EMPTY); closeables.add(() -> indexShard.close("close shard at end of test", true)); indexShard.markAsRecovering("test", new RecoveryState(shardRouting, TestDiscoveryNode.create("_node_id", "_node_id"), null)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java index be8929417622b..7023f1e946ae5 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java @@ -18,6 +18,7 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexOrDocValuesQuery; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSortSortedNumericDocValuesRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; @@ -251,11 +252,17 @@ public void testRangeQuery() throws IOException { LongPoint.newRangeQuery("field", instant1, instant2), SortedNumericDocValuesField.newSlowRangeQuery("field", instant1, instant2) ); - assertEquals(expected, ft.rangeQuery(date1, date2, true, true, null, null, null, context).rewrite(new MultiReader())); + assertEquals( + expected, + ft.rangeQuery(date1, date2, true, true, null, null, null, context).rewrite(new IndexSearcher(new MultiReader())) + ); MappedFieldType ft2 = new DateFieldType("field", false); Query expected2 = SortedNumericDocValuesField.newSlowRangeQuery("field", instant1, instant2); - assertEquals(expected2, ft2.rangeQuery(date1, date2, true, true, null, null, null, context).rewrite(new MultiReader())); + assertEquals( + expected2, + ft2.rangeQuery(date1, date2, true, true, null, null, null, context).rewrite(new IndexSearcher(new MultiReader())) + ); instant1 = nowInMillis; instant2 = instant1 + 100; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java index 448b55eeea0e2..421839701b634 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java @@ -21,6 +21,8 @@ import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.MapperService.MergeReason; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; import java.io.IOException; import java.util.ArrayList; @@ -28,10 +30,12 @@ import java.util.Comparator; import java.util.List; import java.util.Map; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; +import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING; import static org.elasticsearch.test.ListMatcher.matchesList; import static org.elasticsearch.test.MapMatcher.assertMap; import static org.hamcrest.Matchers.containsString; @@ -345,4 +349,104 @@ public void testTooManyDimensionFields() { }))); assertThat(e.getMessage(), containsString("Limit of total dimension fields [" + max + "] has been exceeded")); } + + public void testDeeplyNestedMapping() throws Exception { + final int maxDepth = INDEX_MAPPING_DEPTH_LIMIT_SETTING.get(Settings.EMPTY).intValue(); + { + // test that the depth limit is enforced for object field + XContentBuilder builder = XContentFactory.jsonBuilder().startObject().startObject("_doc").startObject("properties"); + for (int i = 0; i < maxDepth + 5; i++) { + builder.startObject("obj" + i); + builder.startObject("properties"); + } + builder.startObject("foo").field("type", "keyword").endObject(); + for (int i = 0; i < maxDepth + 5; i++) { + builder.endObject(); + builder.endObject(); + } + builder.endObject().endObject().endObject(); + + MapperParsingException exc = expectThrows( + MapperParsingException.class, + () -> createMapperService(Settings.builder().put(getIndexSettings()).build(), builder) + ); + assertThat(exc.getMessage(), containsString("Limit of mapping depth [" + maxDepth + "] has been exceeded")); + } + + { + // test that the limit is per individual field, so several object fields don't trip the limit + XContentBuilder builder = XContentFactory.jsonBuilder().startObject().startObject("_doc").startObject("properties"); + for (int i = 0; i < maxDepth - 3; i++) { + builder.startObject("obj" + i); + builder.startObject("properties"); + } + + for (int i = 0; i < 2; i++) { + builder.startObject("sub_obj1" + i); + builder.startObject("properties"); + } + builder.startObject("foo").field("type", "keyword").endObject(); + for (int i = 0; i < 2; i++) { + builder.endObject(); + builder.endObject(); + } + + for (int i = 0; i < 2; i++) { + builder.startObject("sub_obj2" + i); + builder.startObject("properties"); + } + builder.startObject("foo2").field("type", "keyword").endObject(); + for (int i = 0; i < 2; i++) { + builder.endObject(); + builder.endObject(); + } + + for (int i = 0; i < maxDepth - 3; i++) { + builder.endObject(); + builder.endObject(); + } + builder.endObject().endObject().endObject(); + + createMapperService(Settings.builder().put(getIndexSettings()).build(), builder); + } + { + // test that parsing correct objects in parallel using the same MapperService don't trip the limit + final int numThreads = randomIntBetween(2, 5); + final XContentBuilder[] builders = new XContentBuilder[numThreads]; + + for (int i = 0; i < numThreads; i++) { + builders[i] = XContentFactory.jsonBuilder().startObject().startObject("_doc").startObject("properties"); + for (int j = 0; j < maxDepth - 1; j++) { + builders[i].startObject("obj" + i + "_" + j); + builders[i].startObject("properties"); + } + builders[i].startObject("foo").field("type", "keyword").endObject(); + for (int j = 0; j < maxDepth - 1; j++) { + builders[i].endObject(); + builders[i].endObject(); + } + builders[i].endObject().endObject().endObject(); + } + + final MapperService mapperService = createMapperService(Version.CURRENT, Settings.EMPTY, () -> false); + final CountDownLatch latch = new CountDownLatch(1); + final Thread[] threads = new Thread[numThreads]; + for (int i = 0; i < threads.length; i++) { + final int threadId = i; + threads[threadId] = new Thread(() -> { + try { + latch.await(); + mapperService.parseMapping("_doc", new CompressedXContent(Strings.toString(builders[threadId]))); + } catch (Exception e) { + throw new AssertionError(e); + } + }); + threads[threadId].start(); + } + latch.countDown(); + for (Thread thread : threads) { + thread.join(); + } + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java index da5df1ae8c04c..400dce5de719b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java @@ -27,6 +27,7 @@ import java.util.Collections; import java.util.LinkedHashMap; import java.util.Map; +import java.util.function.Supplier; public class MappingParserTests extends MapperServiceTestCase { @@ -40,7 +41,7 @@ private static MappingParser createMappingParser(Settings settings, Version vers IndexAnalyzers indexAnalyzers = createIndexAnalyzers(); SimilarityService similarityService = new SimilarityService(indexSettings, scriptService, Collections.emptyMap()); MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry(); - MappingParserContext mappingParserContext = new MappingParserContext( + Supplier mappingParserContextSupplier = () -> new MappingParserContext( similarityService::getSimilarity, type -> mapperRegistry.getMapperParser(type, indexSettings.getIndexVersionCreated()), mapperRegistry.getRuntimeFieldParsers()::get, @@ -54,17 +55,18 @@ private static MappingParser createMappingParser(Settings settings, Version vers indexSettings, indexSettings.getMode().idFieldMapperWithoutFieldData() ); + Map metadataMapperParsers = mapperRegistry.getMetadataMapperParsers( indexSettings.getIndexVersionCreated() ); Map, MetadataFieldMapper> metadataMappers = new LinkedHashMap<>(); - metadataMapperParsers.values().stream().map(parser -> parser.getDefault(mappingParserContext)).forEach(m -> { + metadataMapperParsers.values().stream().map(parser -> parser.getDefault(mappingParserContextSupplier.get())).forEach(m -> { if (m != null) { metadataMappers.put(m.getClass(), m); } }); return new MappingParser( - mappingParserContext, + mappingParserContextSupplier, metadataMapperParsers, () -> metadataMappers, type -> MapperService.SINGLE_MAPPING_NAME diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java index bea3d4f6f5b97..844026b96ee31 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java @@ -10,8 +10,11 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -71,6 +74,16 @@ public void testMultiFieldWithinMultiField() throws IOException { MapperService mapperService = mock(MapperService.class); IndexAnalyzers indexAnalyzers = IndexAnalyzers.of(defaultAnalyzers()); when(mapperService.getIndexAnalyzers()).thenReturn(indexAnalyzers); + + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .build(); + IndexMetadata metadata = IndexMetadata.builder("test").settings(settings).build(); + IndexSettings indexSettings = new IndexSettings(metadata, Settings.EMPTY); + when(mapperService.getIndexSettings()).thenReturn(indexSettings); + Version olderVersion = VersionUtils.randomPreviousCompatibleVersion(random(), Version.V_8_0_0); MappingParserContext olderContext = new MappingParserContext( null, diff --git a/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java index f040fd42f772a..46f5defa99e5d 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java @@ -267,8 +267,9 @@ public void testTermExpansionExceptionOnSpanFailure() throws Exception { BooleanQuery.setMaxClauseCount(1); try { QueryBuilder queryBuilder = new SpanMultiTermQueryBuilder(QueryBuilders.prefixQuery("body", "bar")); - Query query = queryBuilder.toQuery(createSearchExecutionContext(new IndexSearcher(reader))); - RuntimeException exc = expectThrows(RuntimeException.class, () -> query.rewrite(reader)); + IndexSearcher searcher = new IndexSearcher(reader); + Query query = queryBuilder.toQuery(createSearchExecutionContext(searcher)); + RuntimeException exc = expectThrows(RuntimeException.class, () -> query.rewrite(searcher)); assertThat(exc.getMessage(), containsString("maxClauseCount")); } finally { BooleanQuery.setMaxClauseCount(origBoolMaxClauseCount); diff --git a/server/src/test/java/org/elasticsearch/index/search/ESToParentBlockJoinQueryTests.java b/server/src/test/java/org/elasticsearch/index/search/ESToParentBlockJoinQueryTests.java index 3868e87efa373..b7349f1033a5e 100644 --- a/server/src/test/java/org/elasticsearch/index/search/ESToParentBlockJoinQueryTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/ESToParentBlockJoinQueryTests.java @@ -10,6 +10,7 @@ import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; @@ -88,7 +89,7 @@ public void testRewrite() throws IOException { ScoreMode.Avg, "nested" ); - Query rewritten = q.rewrite(new MultiReader()); + Query rewritten = q.rewrite(new IndexSearcher(new MultiReader())); assertEquals(expected, rewritten); } } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java index 69b2ff769d710..934bd54720ed2 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java @@ -27,8 +27,13 @@ import org.elasticsearch.transport.TransportService; import java.util.Collections; +import java.util.function.Consumer; import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; @@ -100,6 +105,11 @@ public void testTranslogSyncAfterGlobalCheckpointSync() throws Exception { when(indexShard.getLastKnownGlobalCheckpoint()).thenReturn(globalCheckpoint); when(indexShard.getLastSyncedGlobalCheckpoint()).thenReturn(lastSyncedGlobalCheckpoint); + doAnswer(invocation -> { + Consumer argument = invocation.getArgument(1); + argument.accept(null); + return null; + }).when(indexShard).syncGlobalCheckpoint(anyLong(), any()); final GlobalCheckpointSyncAction action = new GlobalCheckpointSyncAction( Settings.EMPTY, @@ -123,9 +133,10 @@ public void testTranslogSyncAfterGlobalCheckpointSync() throws Exception { if (durability == Translog.Durability.ASYNC || lastSyncedGlobalCheckpoint == globalCheckpoint) { verify(indexShard, never()).sync(); + verify(indexShard, never()).syncGlobalCheckpoint(anyLong(), any()); } else { - verify(indexShard).sync(); + verify(indexShard, never()).sync(); + verify(indexShard).syncGlobalCheckpoint(eq(globalCheckpoint), any()); } } - } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardOperationPermitsTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardOperationPermitsTests.java index 3da0a5fca7f6a..d9ef36f3927e8 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardOperationPermitsTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardOperationPermitsTests.java @@ -476,8 +476,8 @@ public void onFailure(Exception e) { */ assertBusy(() -> { for (final ThreadPoolStats.Stats stats : threadPool.stats()) { - if (ThreadPool.Names.GENERIC.equals(stats.getName())) { - assertThat("Expected no active threads in GENERIC pool", stats.getActive(), equalTo(0)); + if (ThreadPool.Names.GENERIC.equals(stats.name())) { + assertThat("Expected no active threads in GENERIC pool", stats.active(), equalTo(0)); return; } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index e9fefbfcce942..2317b8a010e3f 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -1185,7 +1185,7 @@ public void testGlobalCheckpointSync() throws IOException { indexMetadata.build(), null, new InternalEngineFactory(), - () -> synced.set(true), + ignoredShardId -> synced.set(true), RetentionLeaseSyncer.EMPTY ); // add a replica @@ -1254,7 +1254,7 @@ public void testClosedIndicesSkipSyncGlobalCheckpoint() throws Exception { indexMetadata.build(), null, new InternalEngineFactory(), - () -> synced.set(true), + ignoredShardId -> synced.set(true), RetentionLeaseSyncer.EMPTY ); recoverShardFromStore(primaryShard); @@ -1534,6 +1534,69 @@ public void run() { closeShards(shard); } + public void testAsyncPersistGlobalCheckpointSync() throws InterruptedException, IOException { + final ShardId shardId = new ShardId("index", "_na_", 0); + final ShardRouting shardRouting = TestShardRouting.newShardRouting( + shardId, + randomAlphaOfLength(8), + true, + ShardRoutingState.INITIALIZING, + RecoverySource.EmptyStoreRecoverySource.INSTANCE + ); + final Settings settings = indexSettings(Version.CURRENT, 1, 2).build(); + final IndexMetadata.Builder indexMetadata = IndexMetadata.builder(shardRouting.getIndexName()).settings(settings).primaryTerm(0, 1); + IndexShard shard = newShard( + shardRouting, + indexMetadata.build(), + null, + new InternalEngineFactory(), + ignoredShardId -> {}, + RetentionLeaseSyncer.EMPTY + ); + recoverShardFromStore(shard); + + final int maxSeqNo = randomIntBetween(0, 128); + for (int i = 0; i <= maxSeqNo; i++) { + EngineTestCase.generateNewSeqNo(shard.getEngine()); + } + final long checkpoint = rarely() ? maxSeqNo - scaledRandomIntBetween(0, maxSeqNo) : maxSeqNo; + shard.updateLocalCheckpointForShard(shardRouting.allocationId().getId(), checkpoint); + shard.updateGlobalCheckpointForShard(shard.routingEntry().allocationId().getId(), shard.getLocalCheckpoint()); + + Semaphore semaphore = new Semaphore(Integer.MAX_VALUE); + Thread[] thread = new Thread[randomIntBetween(3, 5)]; + CountDownLatch latch = new CountDownLatch(thread.length); + for (int i = 0; i < thread.length; i++) { + thread[i] = new Thread() { + @Override + public void run() { + try { + latch.countDown(); + latch.await(); + for (int i = 0; i < 10000; i++) { + semaphore.acquire(); + shard.syncGlobalCheckpoint( + randomLongBetween(0, shard.getLastKnownGlobalCheckpoint()), + (ex) -> semaphore.release() + ); + } + } catch (Exception ex) { + throw new RuntimeException(ex); + } + } + }; + thread[i].start(); + } + + for (int i = 0; i < thread.length; i++) { + thread[i].join(); + } + assertTrue(semaphore.tryAcquire(Integer.MAX_VALUE, 10, TimeUnit.SECONDS)); + assertEquals(shard.getLastKnownGlobalCheckpoint(), shard.getLastSyncedGlobalCheckpoint()); + + closeShards(shard); + } + public void testShardStats() throws IOException { IndexShard shard = newStartedShard(); @@ -1630,7 +1693,7 @@ public String[] listAll() throws IOException { i -> store, null, new InternalEngineFactory(), - () -> {}, + NOOP_GCP_SYNCER, RetentionLeaseSyncer.EMPTY, EMPTY_EVENT_LISTENER ); @@ -2622,7 +2685,7 @@ public void testReaderWrapperIsUsed() throws IOException { null, wrapper, new InternalEngineFactory(), - () -> {}, + NOOP_GCP_SYNCER, RetentionLeaseSyncer.EMPTY, EMPTY_EVENT_LISTENER ); @@ -2757,7 +2820,7 @@ public void testSearchIsReleaseIfWrapperFails() throws IOException { null, wrapper, new InternalEngineFactory(), - () -> {}, + NOOP_GCP_SYNCER, RetentionLeaseSyncer.EMPTY, EMPTY_EVENT_LISTENER ); @@ -4538,7 +4601,7 @@ public void testShardExposesWriteLoadStats() throws Exception { null, null, new InternalEngineFactory(), - () -> {}, + NOOP_GCP_SYNCER, RetentionLeaseSyncer.EMPTY, EMPTY_EVENT_LISTENER, fakeClock diff --git a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java index 19d03086c4630..682fd9fce526b 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java @@ -170,7 +170,7 @@ public void setup() throws IOException { null, null, new InternalEngineFactory(), - () -> {}, + NOOP_GCP_SYNCER, RetentionLeaseSyncer.EMPTY, EMPTY_EVENT_LISTENER ), diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 790fed4621683..85db41f745543 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -1024,7 +1024,7 @@ public void doRun() throws BrokenBarrierException, InterruptedException, IOExcep fail("duplicate op [" + op + "], old entry at " + location); } if (id % writers.length == threadId) { - translog.ensureSynced(location); + translog.ensureSynced(location, SequenceNumbers.UNASSIGNED_SEQ_NO); } if (id % flushEveryOps == 0) { synchronized (flushMutex) { @@ -1162,67 +1162,72 @@ protected void doRun() throws Exception { logger.info("--> test done. total ops written [{}]", writtenOps.size()); } - public void testSyncUpTo() throws IOException { - int translogOperations = randomIntBetween(10, 100); - int count = 0; - for (int op = 0; op < translogOperations; op++) { - int seqNo = ++count; - final Translog.Location location = translog.add(TranslogOperationsUtils.indexOp("" + op, seqNo, primaryTerm.get())); - if (randomBoolean()) { - assertTrue("at least one operation pending", translog.syncNeeded()); - assertTrue("this operation has not been synced", translog.ensureSynced(location)); - // we are the last location so everything should be synced - assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); - seqNo = ++count; - translog.add(TranslogOperationsUtils.indexOp("" + op, seqNo, primaryTerm.get())); - assertTrue("one pending operation", translog.syncNeeded()); - assertFalse("this op has been synced before", translog.ensureSynced(location)); // not syncing now - assertTrue("we only synced a previous operation yet", translog.syncNeeded()); - } - if (rarely()) { - translog.rollGeneration(); - assertFalse("location is from a previous translog - already synced", translog.ensureSynced(location)); // not syncing now - assertFalse("no sync needed since no operations in current translog", translog.syncNeeded()); - } - - if (randomBoolean()) { - translog.sync(); - assertFalse("translog has been synced already", translog.ensureSynced(location)); - } - } - } + public void testSyncUpToLocationAndCheckpoint() throws IOException { + assertFalse( + "translog empty location and not ops performed will not require sync", + translog.ensureSynced(Location.EMPTY, SequenceNumbers.UNASSIGNED_SEQ_NO) + ); - public void testSyncUpToStream() throws IOException { - int iters = randomIntBetween(5, 10); + int iters = randomIntBetween(25, 50); + Location alreadySynced = Location.EMPTY; + long alreadySyncedCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; for (int i = 0; i < iters; i++) { int translogOperations = randomIntBetween(10, 100); int count = 0; - ArrayList locations = new ArrayList<>(); + + Location location = null; + final ArrayList locations = new ArrayList<>(); + final ArrayList locationsInCurrentGeneration = new ArrayList<>(); for (int op = 0; op < translogOperations; op++) { if (rarely()) { translog.rollGeneration(); + locationsInCurrentGeneration.clear(); } - final Translog.Location location = translog.add(indexOp("" + op, op, primaryTerm.get(), Integer.toString(++count))); + location = translog.add(indexOp("" + op, op, primaryTerm.get(), Integer.toString(++count))); + globalCheckpoint.incrementAndGet(); locations.add(location); + locationsInCurrentGeneration.add(location); } - Collections.shuffle(locations, random()); + + assertFalse("should have been synced on previous iteration", translog.ensureSynced(alreadySynced, alreadySyncedCheckpoint)); + if (randomBoolean()) { assertTrue("at least one operation pending", translog.syncNeeded()); - assertTrue("this operation has not been synced", translog.ensureSynced(locations.stream())); - // we are the last location so everything should be synced + if (randomBoolean()) { + Location randomLocationToSync = locationsInCurrentGeneration.get(randomInt(locationsInCurrentGeneration.size() - 1)); + assertTrue( + "this operation has not been synced", + translog.ensureSynced(randomLocationToSync, SequenceNumbers.UNASSIGNED_SEQ_NO) + ); + } else { + long globalCheckpointToSync = randomLongBetween(translog.getLastSyncedGlobalCheckpoint() + 1, globalCheckpoint.get()); + assertTrue( + "this global checkpoint has not been persisted", + translog.ensureSynced(Location.EMPTY, globalCheckpointToSync) + ); + } + // everything should be synced assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); } else if (rarely()) { translog.rollGeneration(); // not syncing now - assertFalse("location is from a previous translog - already synced", translog.ensureSynced(locations.stream())); + assertFalse( + "location is from a previous translog - already synced", + translog.ensureSynced(location, globalCheckpoint.get()) + ); assertFalse("no sync needed since no operations in current translog", translog.syncNeeded()); } else { translog.sync(); - assertFalse("translog has been synced already", translog.ensureSynced(locations.stream())); + assertFalse("translog has been synced already", translog.ensureSynced(location, globalCheckpoint.get())); } - for (Location location : locations) { - assertFalse("all of the locations should be synced: " + location, translog.ensureSynced(location)); + + Collections.shuffle(locations, random()); + for (Location l : locations) { + assertFalse("all of the locations should be synced: " + l, translog.ensureSynced(l, SequenceNumbers.UNASSIGNED_SEQ_NO)); } + + alreadySynced = location; + alreadySyncedCheckpoint = globalCheckpoint.get(); } } @@ -2550,7 +2555,7 @@ public void testTragicEventCanBeAnyException() throws IOException { try { Translog.Location location = translog.add(indexOp("2", 1, primaryTerm.get(), lineFileDocs.nextDoc().toString())); if (randomBoolean()) { - translog.ensureSynced(location); + translog.ensureSynced(location, SequenceNumbers.UNASSIGNED_SEQ_NO); } else { translog.sync(); } @@ -3888,7 +3893,11 @@ public void testSyncConcurrently() throws Exception { long globalCheckpoint = lastGlobalCheckpoint.get(); final boolean synced; if (randomBoolean()) { - synced = translog.ensureSynced(location); + if (randomBoolean()) { + synced = translog.ensureSynced(location, globalCheckpoint); + } else { + synced = translog.ensureSynced(location, SequenceNumbers.UNASSIGNED_SEQ_NO); + } } else { translog.sync(); synced = true; diff --git a/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java b/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java index 6e552cf5bfae9..2581fd9c43901 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java @@ -410,7 +410,7 @@ EngineConfig configWithRefreshListener(EngineConfig config, ReferenceManager.Ref ThreadPoolStats.Stats getRefreshThreadPoolStats() { final ThreadPoolStats stats = threadPool.stats(); for (ThreadPoolStats.Stats s : stats) { - if (s.getName().equals(ThreadPool.Names.REFRESH)) { + if (s.name().equals(ThreadPool.Names.REFRESH)) { return s; } } @@ -468,12 +468,12 @@ protected long getShardWritingBytes(IndexShard shard) { } assertBusy(() -> { ThreadPoolStats.Stats stats = getRefreshThreadPoolStats(); - assertThat(stats.getCompleted(), equalTo(beforeStats.getCompleted() + iterations - 1)); + assertThat(stats.completed(), equalTo(beforeStats.completed() + iterations - 1)); }); refreshLatch.get().countDown(); // allow refresh assertBusy(() -> { ThreadPoolStats.Stats stats = getRefreshThreadPoolStats(); - assertThat(stats.getCompleted(), equalTo(beforeStats.getCompleted() + iterations)); + assertThat(stats.completed(), equalTo(beforeStats.completed() + iterations)); }); assertThat(shard.refreshStats().getTotal(), equalTo(refreshStats.getTotal() + 1)); closeShards(shard); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java index d5209c936c309..7e54ceca2bc5a 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java @@ -117,7 +117,7 @@ public void afterIndexRemoved(Index index, IndexSettings indexSettings, IndexRem newRouting = newRouting.moveToUnassigned(unassignedInfo) .updateUnassigned(unassignedInfo, RecoverySource.EmptyStoreRecoverySource.INSTANCE); newRouting = ShardRoutingHelper.initialize(newRouting, nodeId); - IndexShard shard = index.createShard(newRouting, s -> {}, RetentionLeaseSyncer.EMPTY); + IndexShard shard = index.createShard(newRouting, IndexShardTestCase.NOOP_GCP_SYNCER, RetentionLeaseSyncer.EMPTY); IndexShardTestCase.updateRoutingEntry(shard, newRouting); assertEquals(5, counter.get()); final DiscoveryNode localNode = TestDiscoveryNode.create("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet()); diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index 2bafa77aca2fa..9e1bcf10a8ab4 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -23,6 +23,7 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.seqno.RetentionLeaseSyncer; +import org.elasticsearch.index.shard.GlobalCheckpointSyncer; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; @@ -234,7 +235,7 @@ public MockIndexShard createShard( final PeerRecoveryTargetService.RecoveryListener recoveryListener, final RepositoriesService repositoriesService, final Consumer onShardFailure, - final Consumer globalCheckpointSyncer, + final GlobalCheckpointSyncer globalCheckpointSyncer, final RetentionLeaseSyncer retentionLeaseSyncer, final DiscoveryNode targetNode, final DiscoveryNode sourceNode diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySettingsTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySettingsTests.java index 10e822c99e984..ab25465e77bd2 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySettingsTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySettingsTests.java @@ -26,10 +26,12 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; +import static org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING; import static org.elasticsearch.indices.recovery.RecoverySettings.DEFAULT_FACTOR_VALUE; import static org.elasticsearch.indices.recovery.RecoverySettings.DEFAULT_MAX_BYTES_PER_SEC; import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING; @@ -44,6 +46,9 @@ import static org.elasticsearch.indices.recovery.RecoverySettings.NODE_BANDWIDTH_RECOVERY_SETTINGS; import static org.elasticsearch.indices.recovery.RecoverySettings.TOTAL_PHYSICAL_MEMORY_OVERRIDING_TEST_SETTING; import static org.elasticsearch.node.NodeRoleSettings.NODE_ROLES_SETTING; +import static org.elasticsearch.test.MockLogAppender.LoggingExpectation; +import static org.elasticsearch.test.MockLogAppender.SeenEventExpectation; +import static org.elasticsearch.test.MockLogAppender.assertThatLogger; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -100,6 +105,87 @@ public void testSnapshotDownloadPermitCanBeDynamicallyUpdated() { permit.close(); } + public void testInsufficientNumberOfPermitsMessage() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RecoverySettings recoverySettings = new RecoverySettings( + Settings.builder() + .put(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS_PER_NODE.getKey(), 5) + .put(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), 2) + .put(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS.getKey(), 3) + .build(), + clusterSettings + ); + + final String expectedMessage = String.format( + Locale.ROOT, + """ + Unable to acquire permit to use snapshot files during recovery, so this recovery will recover index files from \ + the source node. Ensure snapshot files can be used during recovery by setting [%s] to be no greater than [2]. \ + Current values of [%s] = [5], [%s] = [2] + """, + INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS.getKey(), + INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS_PER_NODE.getKey(), + CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey() + ); + + final LoggingExpectation expectation = new SeenEventExpectation( + "WARN-Capture", + RecoverySettings.class.getCanonicalName(), + Level.WARN, + expectedMessage + ); + + assertThatLogger(() -> { + // Allow the first recovery to obtain a permit + Releasable permit = recoverySettings.tryAcquireSnapshotDownloadPermits(); + assertThat(permit, is(notNullValue())); + + // Deny the second recovery to get the permit + assertThat(recoverySettings.tryAcquireSnapshotDownloadPermits(), is(nullValue())); + + }, RecoverySettings.class, expectation); + } + + public void testToManyRecoveriesSettingsMessage() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RecoverySettings recoverySettings = new RecoverySettings( + Settings.builder() + .put(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS_PER_NODE.getKey(), 5) + .put(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), 20) + .put(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS.getKey(), 3) + .build(), + clusterSettings + ); + + final String expectedMessage = String.format( + Locale.ROOT, + """ + Unable to acquire permit to use snapshot files during recovery, so this recovery will recover index files from \ + the source node. Ensure snapshot files can be used during recovery by reducing [%s] from its current value of \ + [20] to be no greater than [5], or disable snapshot-based recovery by setting [%s] to [false] + """, + CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), + INDICES_RECOVERY_USE_SNAPSHOTS_SETTING.getKey() + ); + + final LoggingExpectation expectation = new SeenEventExpectation( + "WARN-Capture", + RecoverySettings.class.getCanonicalName(), + Level.WARN, + expectedMessage + ); + + assertThatLogger(() -> { + // Allow the first recovery to obtain a permit + Releasable permit = recoverySettings.tryAcquireSnapshotDownloadPermits(); + assertThat(permit, is(notNullValue())); + + // Deny the second recovery to get the permit + assertThat(recoverySettings.tryAcquireSnapshotDownloadPermits(), is(nullValue())); + + }, RecoverySettings.class, expectation); + } + public void testMaxConcurrentSnapshotFileDownloadsPerNodeIsValidated() { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); Settings settings = Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestStatsTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestStatsTests.java index 8803bdbeab5f9..fe4bbe97a53de 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestStatsTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestStatsTests.java @@ -13,10 +13,13 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.containsInAnyOrder; + public class IngestStatsTests extends ESTestCase { public void testSerialization() throws IOException { @@ -28,6 +31,110 @@ public void testSerialization() throws IOException { assertIngestStats(ingestStats, serializedStats); } + public void testStatsMerge() { + var first = randomStats(); + var second = randomStats(); + assertEquals( + new IngestStats.Stats( + first.ingestCount() + second.ingestCount(), + first.ingestTimeInMillis() + second.ingestTimeInMillis(), + first.ingestCurrent() + second.ingestCurrent(), + first.ingestFailedCount() + second.ingestFailedCount() + ), + IngestStats.Stats.merge(first, second) + ); + } + + public void testPipelineStatsMerge() { + var first = List.of( + randomPipelineStat("pipeline-1"), + randomPipelineStat("pipeline-1"), + randomPipelineStat("pipeline-2"), + randomPipelineStat("pipeline-3"), + randomPipelineStat("pipeline-5") + ); + var second = List.of( + randomPipelineStat("pipeline-2"), + randomPipelineStat("pipeline-1"), + randomPipelineStat("pipeline-4"), + randomPipelineStat("pipeline-3") + ); + + assertThat( + IngestStats.PipelineStat.merge(first, second), + containsInAnyOrder( + new IngestStats.PipelineStat("pipeline-1", merge(first.get(0).stats(), first.get(1).stats(), second.get(1).stats())), + new IngestStats.PipelineStat("pipeline-2", merge(first.get(2).stats(), second.get(0).stats())), + new IngestStats.PipelineStat("pipeline-3", merge(first.get(3).stats(), second.get(3).stats())), + new IngestStats.PipelineStat("pipeline-4", second.get(2).stats()), + new IngestStats.PipelineStat("pipeline-5", first.get(4).stats()) + ) + ); + } + + public void testProcessorStatsMerge() { + { + var first = Map.of("pipeline-1", randomPipelineProcessorStats()); + assertEquals(IngestStats.merge(Map.of(), first), first); + assertEquals(IngestStats.merge(first, Map.of()), first); + } + { + var first = Map.of( + "pipeline-1", + randomPipelineProcessorStats(), + "pipeline-2", + randomPipelineProcessorStats(), + "pipeline-3", + randomPipelineProcessorStats() + ); + var second = Map.of( + "pipeline-2", + randomPipelineProcessorStats(), + "pipeline-3", + randomPipelineProcessorStats(), + "pipeline-1", + randomPipelineProcessorStats() + ); + + assertEquals( + IngestStats.merge(first, second), + Map.of( + "pipeline-1", + expectedPipelineProcessorStats(first.get("pipeline-1"), second.get("pipeline-1")), + "pipeline-2", + expectedPipelineProcessorStats(first.get("pipeline-2"), second.get("pipeline-2")), + "pipeline-3", + expectedPipelineProcessorStats(first.get("pipeline-3"), second.get("pipeline-3")) + ) + ); + } + } + + private static List expectedPipelineProcessorStats( + List first, + List second + ) { + return List.of( + new IngestStats.ProcessorStat("proc-1", "type-1", merge(first.get(0).stats(), second.get(0).stats())), + new IngestStats.ProcessorStat("proc-1", "type-2", merge(first.get(1).stats(), second.get(1).stats())), + new IngestStats.ProcessorStat("proc-2", "type-1", merge(first.get(2).stats(), second.get(2).stats())), + new IngestStats.ProcessorStat("proc-3", "type-4", merge(first.get(3).stats(), second.get(3).stats())) + ); + } + + private static List randomPipelineProcessorStats() { + return List.of( + randomProcessorStat("proc-1", "type-1"), + randomProcessorStat("proc-1", "type-2"), + randomProcessorStat("proc-2", "type-1"), + randomProcessorStat("proc-3", "type-4") + ); + } + + private static IngestStats.Stats merge(IngestStats.Stats... stats) { + return Arrays.stream(stats).reduce(IngestStats.Stats.IDENTITY, IngestStats.Stats::merge); + } + private static List createPipelineStats() { IngestStats.PipelineStat pipeline1Stats = new IngestStats.PipelineStat("pipeline1", new IngestStats.Stats(3, 3, 3, 3)); IngestStats.PipelineStat pipeline2Stats = new IngestStats.PipelineStat("pipeline2", new IngestStats.Stats(47, 97, 197, 297)); @@ -98,4 +205,16 @@ private static IngestStats.Stats getPipelineStats(List .map(IngestStats.PipelineStat::stats) .orElse(null); } + + private static IngestStats.ProcessorStat randomProcessorStat(String name, String type) { + return new IngestStats.ProcessorStat(name, type, randomStats()); + } + + private static IngestStats.PipelineStat randomPipelineStat(String id) { + return new IngestStats.PipelineStat(id, randomStats()); + } + + private static IngestStats.Stats randomStats() { + return new IngestStats.Stats(randomLong(), randomLong(), randomLong(), randomLong()); + } } diff --git a/server/src/test/java/org/elasticsearch/lucene/queries/MinDocQueryTests.java b/server/src/test/java/org/elasticsearch/lucene/queries/MinDocQueryTests.java index f1e03c42e0487..3fa292acc50e8 100644 --- a/server/src/test/java/org/elasticsearch/lucene/queries/MinDocQueryTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/queries/MinDocQueryTests.java @@ -37,10 +37,11 @@ public void testBasics() { public void testRewrite() throws Exception { IndexReader reader = new MultiReader(); + IndexSearcher searcher = new IndexSearcher(reader); MinDocQuery query = new MinDocQuery(42); - Query rewritten = query.rewrite(reader); + Query rewritten = query.rewrite(searcher); QueryUtils.checkUnequal(query, rewritten); - Query rewritten2 = rewritten.rewrite(reader); + Query rewritten2 = rewritten.rewrite(searcher); assertSame(rewritten, rewritten2); } diff --git a/server/src/test/java/org/elasticsearch/lucene/queries/SpanMatchNoDocsQueryTests.java b/server/src/test/java/org/elasticsearch/lucene/queries/SpanMatchNoDocsQueryTests.java index 6fc5e841e4e02..1f9bf525b44b4 100644 --- a/server/src/test/java/org/elasticsearch/lucene/queries/SpanMatchNoDocsQueryTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/queries/SpanMatchNoDocsQueryTests.java @@ -33,7 +33,7 @@ public class SpanMatchNoDocsQueryTests extends ESTestCase { public void testSimple() throws Exception { SpanMatchNoDocsQuery query = new SpanMatchNoDocsQuery("field", "a good reason"); assertEquals(query.toString(), "SpanMatchNoDocsQuery(\"a good reason\")"); - Query rewrite = query.rewrite(null); + Query rewrite = query.rewrite((IndexSearcher) null); assertTrue(rewrite instanceof SpanMatchNoDocsQuery); assertEquals(rewrite.toString(), "SpanMatchNoDocsQuery(\"a good reason\")"); } @@ -68,7 +68,7 @@ public void testQuery() throws Exception { assertEquals(searcher.count(orQuery), 1); hits = searcher.search(orQuery, 1000).scoreDocs; assertEquals(1, hits.length); - Query rewrite = orQuery.rewrite(ir); + Query rewrite = orQuery.rewrite(searcher); assertEquals(rewrite, orQuery); SpanNearQuery nearQuery = new SpanNearQuery( @@ -79,7 +79,7 @@ public void testQuery() throws Exception { assertEquals(searcher.count(nearQuery), 0); hits = searcher.search(nearQuery, 1000).scoreDocs; assertEquals(0, hits.length); - rewrite = nearQuery.rewrite(ir); + rewrite = nearQuery.rewrite(searcher); assertEquals(rewrite, nearQuery); iw.close(); diff --git a/server/src/test/java/org/elasticsearch/monitor/jvm/HotThreadsTests.java b/server/src/test/java/org/elasticsearch/monitor/jvm/HotThreadsTests.java index 8fc530d45d388..569062317b7b5 100644 --- a/server/src/test/java/org/elasticsearch/monitor/jvm/HotThreadsTests.java +++ b/server/src/test/java/org/elasticsearch/monitor/jvm/HotThreadsTests.java @@ -892,19 +892,19 @@ public void testInnerDetectCPUModeTransportThreads() throws Exception { assertThat( innerResult, - containsString("0.0% [cpu=0.0%, idle=0.0%] (0s out of 10ms) cpu usage by thread '__mock_network_thread 1'") + containsString("0.0% [cpu=0.0%, idle=100.0%] (0s out of 10ms) cpu usage by thread '__mock_network_thread 1'") ); assertThat( innerResult, - containsString("0.0% [cpu=0.0%, idle=0.0%] (0s out of 10ms) cpu usage by thread '__mock_network_thread 2'") + containsString("0.0% [cpu=0.0%, idle=100.0%] (0s out of 10ms) cpu usage by thread '__mock_network_thread 2'") ); assertThat( innerResult, - containsString("0.0% [cpu=0.0%, idle=0.0%] (0s out of 10ms) cpu usage by thread '__mock_network_thread 3'") + containsString("0.0% [cpu=0.0%, idle=100.0%] (0s out of 10ms) cpu usage by thread '__mock_network_thread 3'") ); assertThat( innerResult, - containsString("0.0% [cpu=0.0%, idle=0.0%] (0s out of 10ms) cpu usage by thread '__mock_network_thread 4'") + containsString("0.0% [cpu=0.0%, idle=100.0%] (0s out of 10ms) cpu usage by thread '__mock_network_thread 4'") ); // Test with the legacy sort order diff --git a/server/src/test/java/org/elasticsearch/repositories/SnapshotIndexCommitTests.java b/server/src/test/java/org/elasticsearch/repositories/SnapshotIndexCommitTests.java new file mode 100644 index 0000000000000..5b7f9a926969f --- /dev/null +++ b/server/src/test/java/org/elasticsearch/repositories/SnapshotIndexCommitTests.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories; + +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; + +public class SnapshotIndexCommitTests extends ESTestCase { + + public void testCompleteAndCloseCleanly() throws Exception { + runCompleteTest(false); + } + + public void testCompleteAndFailOnClose() throws Exception { + runCompleteTest(true); + } + + public void testAbortAndCloseCleanly() throws Exception { + runAbortTest(false); + } + + public void testAbortAndFailOnClose() throws Exception { + runAbortTest(true); + } + + private void runCompleteTest(boolean throwOnClose) throws Exception { + final var isClosed = new AtomicBoolean(); + final var indexCommitRef = getSnapshotIndexCommit(throwOnClose, isClosed); + + assertFalse(isClosed.get()); + if (randomBoolean()) { + assertTrue(indexCommitRef.tryIncRef()); + indexCommitRef.decRef(); + } + + assertOnCompletionBehaviour(throwOnClose, indexCommitRef); + + assertTrue(isClosed.get()); + assertFalse(indexCommitRef.tryIncRef()); + + indexCommitRef.onAbort(); + assertFalse(indexCommitRef.tryIncRef()); + } + + private void runAbortTest(boolean throwOnClose) throws Exception { + final var isClosed = new AtomicBoolean(); + final var indexCommitRef = getSnapshotIndexCommit(throwOnClose, isClosed); + + assertFalse(isClosed.get()); + assertTrue(indexCommitRef.tryIncRef()); + + indexCommitRef.onAbort(); + assertFalse(isClosed.get()); + + assertTrue(indexCommitRef.tryIncRef()); + indexCommitRef.decRef(); + indexCommitRef.decRef(); + + assertTrue(isClosed.get()); + + assertOnCompletionBehaviour(throwOnClose, indexCommitRef); + } + + private SnapshotIndexCommit getSnapshotIndexCommit(boolean throwOnClose, AtomicBoolean isClosed) { + return new SnapshotIndexCommit(new Engine.IndexCommitRef(null, () -> { + assertTrue(isClosed.compareAndSet(false, true)); + if (throwOnClose) { + throw new IOException("simulated"); + } + })); + } + + private void assertOnCompletionBehaviour(boolean throwOnClose, SnapshotIndexCommit indexCommitRef) throws Exception { + if (throwOnClose) { + assertEquals("simulated", expectThrows(IOException.class, indexCommitRef::onCompletion).getMessage()); + } else { + indexCommitRef.onCompletion(); + } + } + +} diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java index 49bdd0f6c3216..2ba8175068f28 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java @@ -113,7 +113,7 @@ public void testRestoreSnapshotWithExistingFiles() throws IOException { null, null, new InternalEngineFactory(), - () -> {}, + NOOP_GCP_SYNCER, RetentionLeaseSyncer.EMPTY, EMPTY_EVENT_LISTENER ); diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/ShardSnapshotTaskRunnerTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/ShardSnapshotTaskRunnerTests.java index 7ca505fc3ef9d..b31ed00f8793c 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/ShardSnapshotTaskRunnerTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/ShardSnapshotTaskRunnerTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetadata; import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.SnapshotIndexCommit; import org.elasticsearch.repositories.SnapshotShardContext; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.test.DummyShardLock; @@ -121,7 +122,7 @@ public static SnapshotShardContext dummyContext(final SnapshotId snapshotId, fin null, snapshotId, indexId, - new Engine.IndexCommitRef(null, () -> {}), + new SnapshotIndexCommit(new Engine.IndexCommitRef(null, () -> {})), null, IndexShardSnapshotStatus.newInitializing(null), Version.CURRENT, diff --git a/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java index 6071de456813d..32431e1870dd2 100644 --- a/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java @@ -49,6 +49,7 @@ import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.ShardGeneration; import org.elasticsearch.repositories.ShardSnapshotResult; +import org.elasticsearch.repositories.SnapshotIndexCommit; import org.elasticsearch.repositories.SnapshotShardContext; import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; import org.elasticsearch.snapshots.Snapshot; @@ -108,7 +109,7 @@ public void testSnapshotAndRestore() throws IOException { null, snapshotId, indexId, - new Engine.IndexCommitRef(indexCommit, () -> {}), + new SnapshotIndexCommit(new Engine.IndexCommitRef(indexCommit, () -> {})), null, snapshotStatus, Version.CURRENT, @@ -151,7 +152,7 @@ public void testSnapshotAndRestore() throws IOException { null, incSnapshotId, indexId, - new Engine.IndexCommitRef(incIndexCommit, () -> {}), + new SnapshotIndexCommit(new Engine.IndexCommitRef(incIndexCommit, () -> {})), null, snapshotStatus2, Version.CURRENT, diff --git a/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java b/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java index d2799efef0ec7..c4e9f025f576f 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java @@ -201,6 +201,17 @@ public void testMalformedContentTypeHeader() { assertThat(e.getMessage(), equalTo("Invalid media-type value on headers [Content-Type]")); } + public void testInvalidMediaTypeCharacter() { + List headers = List.of("a/b[", "a/b]", "a/b\\"); + for (String header : headers) { + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> RestRequest.parseContentType(Collections.singletonList(header)) + ); + assertThat(e.getMessage(), equalTo("invalid Content-Type header [" + header + "]")); + } + } + public void testNoContentTypeHeader() { RestRequest contentRestRequest = contentRestRequest("", Collections.emptyMap(), Collections.emptyMap()); assertNull(contentRestRequest.getXContentType()); diff --git a/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java b/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java index 137f0c5447611..c0758531988e3 100644 --- a/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java +++ b/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java @@ -17,7 +17,6 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.FilterDirectoryReader; import org.apache.lucene.index.FilterLeafReader; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReader; @@ -576,12 +575,12 @@ public String toString(String field) { } @Override - public Query rewrite(IndexReader reader) throws IOException { - Query queryRewritten = query.rewrite(reader); + public Query rewrite(IndexSearcher searcher) throws IOException { + Query queryRewritten = query.rewrite(searcher); if (query != queryRewritten) { return new CreateScorerOnceQuery(queryRewritten); } - return super.rewrite(reader); + return super.rewrite(searcher); } @Override diff --git a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java index 163bc65a0a0bc..be1b345ceb6f7 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java @@ -50,7 +50,6 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.Weight; import org.apache.lucene.search.join.BitSetProducer; @@ -409,14 +408,10 @@ public void testTerminateAfterEarlyTermination() throws Exception { context.terminateAfter(numDocs); { context.setSize(10); - TotalHitCountCollector collector = new TotalHitCountCollector(); - context.registerAggsCollectorManager(new SingleThreadCollectorManager(collector)); QueryPhase.executeQuery(context); assertFalse(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(10)); - assertThat(collector.getTotalHits(), equalTo(numDocs)); - context.registerAggsCollectorManager(null); } context.terminateAfter(1); @@ -447,21 +442,14 @@ public void testTerminateAfterEarlyTermination() throws Exception { } { context.setSize(1); - TotalHitCountCollector collector = new TotalHitCountCollector(); - context.registerAggsCollectorManager(new SingleThreadCollectorManager(collector)); QueryPhase.executeQuery(context); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); assertThat(context.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.EQUAL_TO)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); - // TotalHitCountCollector counts num docs in the first leaf - assertThat(collector.getTotalHits(), equalTo(reader.leaves().get(0).reader().numDocs())); - context.registerAggsCollectorManager(null); } { context.setSize(0); - TotalHitCountCollector collector = new TotalHitCountCollector(); - context.registerAggsCollectorManager(new SingleThreadCollectorManager(collector)); QueryPhase.executeQuery(context); assertTrue(context.queryResult().terminatedEarly()); // TotalHitCountCollector counts num docs in the first leaf @@ -469,8 +457,6 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocsInFirstLeaf)); assertThat(context.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.EQUAL_TO)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); - assertThat(collector.getTotalHits(), equalTo(numDocsInFirstLeaf)); - context.registerAggsCollectorManager(null); } // tests with trackTotalHits and terminateAfter @@ -478,8 +464,6 @@ public void testTerminateAfterEarlyTermination() throws Exception { context.setSize(0); for (int trackTotalHits : new int[] { -1, 3, 76, 100 }) { context.trackTotalHitsUpTo(trackTotalHits); - TotalHitCountCollector collector = new TotalHitCountCollector(); - context.registerAggsCollectorManager(new SingleThreadCollectorManager(collector)); QueryPhase.executeQuery(context); assertTrue(context.queryResult().terminatedEarly()); if (trackTotalHits == -1) { @@ -493,16 +477,12 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(context.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.EQUAL_TO)); } assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); - assertThat(collector.getTotalHits(), equalTo(countDocUpTo.applyAsInt(10))); - context.registerAggsCollectorManager(null); } context.terminateAfter(7); context.setSize(10); for (int trackTotalHits : new int[] { -1, 3, 75, 100 }) { context.trackTotalHitsUpTo(trackTotalHits); - EarlyTerminatingCollector collector = new EarlyTerminatingCollector(new TotalHitCountCollector(), 1, false); - context.registerAggsCollectorManager(new SingleThreadCollectorManager(collector)); QueryPhase.executeQuery(context); assertTrue(context.queryResult().terminatedEarly()); if (trackTotalHits == -1) { @@ -516,7 +496,6 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(context.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.EQUAL_TO)); } assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(7)); - context.registerAggsCollectorManager(null); } reader.close(); dir.close(); @@ -566,16 +545,12 @@ public void testIndexSortingEarlyTermination() throws Exception { assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); context.parsedPostFilter(null); - final TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector(); - context.registerAggsCollectorManager(new SingleThreadCollectorManager(totalHitCountCollector)); QueryPhase.executeQuery(context); assertNull(context.queryResult().terminatedEarly()); assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); - assertThat(totalHitCountCollector.getTotalHits(), equalTo(numDocs)); - context.registerAggsCollectorManager(null); } { diff --git a/server/src/test/java/org/elasticsearch/snapshots/InternalSnapshotsInfoServiceTests.java b/server/src/test/java/org/elasticsearch/snapshots/InternalSnapshotsInfoServiceTests.java index 9eeaf87136bf3..8247494fe7f08 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/InternalSnapshotsInfoServiceTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/InternalSnapshotsInfoServiceTests.java @@ -398,12 +398,12 @@ private void waitForMaxActiveGenericThreads(final int nbActive) throws Exception final ThreadPoolStats threadPoolStats = clusterService.getClusterApplierService().threadPool().stats(); ThreadPoolStats.Stats generic = null; for (ThreadPoolStats.Stats threadPoolStat : threadPoolStats) { - if (ThreadPool.Names.GENERIC.equals(threadPoolStat.getName())) { + if (ThreadPool.Names.GENERIC.equals(threadPoolStat.name())) { generic = threadPoolStat; } } assertThat(generic, notNullValue()); - assertThat(generic.getActive(), equalTo(nbActive)); + assertThat(generic.active(), equalTo(nbActive)); }, 30L, TimeUnit.SECONDS); } diff --git a/server/src/test/java/org/elasticsearch/threadpool/ESThreadPoolTestCase.java b/server/src/test/java/org/elasticsearch/threadpool/ESThreadPoolTestCase.java index ccae5f946d14b..40115e1402495 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/ESThreadPoolTestCase.java +++ b/server/src/test/java/org/elasticsearch/threadpool/ESThreadPoolTestCase.java @@ -26,7 +26,7 @@ protected final ThreadPool.Info info(final ThreadPool threadPool, final String n protected final ThreadPoolStats.Stats stats(final ThreadPool threadPool, final String name) { for (final ThreadPoolStats.Stats stats : threadPool.stats()) { - if (name.equals(stats.getName())) { + if (name.equals(stats.name())) { return stats; } } diff --git a/server/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java b/server/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java index 7b50656150d97..5c355c8009d54 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java @@ -72,7 +72,7 @@ public void testRejectedExecutionCounter() throws InterruptedException { block.countDown(); assertThat(counter, equalTo(rejections)); - assertThat(stats(threadPool, threadPoolName).getRejected(), equalTo(rejections)); + assertThat(stats(threadPool, threadPoolName).rejected(), equalTo(rejections)); } finally { terminateThreadPoolIfNeeded(threadPool); } diff --git a/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java b/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java index ccc525899b502..9f770c1f34a3d 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java @@ -139,8 +139,8 @@ public void testScalingThreadPoolIsBounded() throws InterruptedException { }); } final ThreadPoolStats.Stats stats = stats(threadPool, threadPoolName); - assertThat(stats.getQueue(), equalTo(numberOfTasks - size)); - assertThat(stats.getLargest(), equalTo(size)); + assertThat(stats.queue(), equalTo(numberOfTasks - size)); + assertThat(stats.largest(), equalTo(size)); latch.countDown(); try { taskLatch.await(); @@ -170,7 +170,7 @@ public void testScalingThreadPoolThreadsAreTerminatedAfterKeepAlive() throws Int } }); } - int threads = stats(threadPool, threadPoolName).getThreads(); + int threads = stats(threadPool, threadPoolName).threads(); assertEquals(128, threads); latch.countDown(); // this while loop is the core of this test; if threads @@ -181,7 +181,7 @@ public void testScalingThreadPoolThreadsAreTerminatedAfterKeepAlive() throws Int // down do { spinForAtLeastOneMillisecond(); - } while (stats(threadPool, threadPoolName).getThreads() > min); + } while (stats(threadPool, threadPoolName).threads() > min); try { taskLatch.await(); } catch (InterruptedException e) { diff --git a/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolStatsTests.java b/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolStatsTests.java index 194c22fd6b64c..fd9abcdb2134d 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolStatsTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolStatsTests.java @@ -10,102 +10,68 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; +import static org.elasticsearch.threadpool.ThreadPool.THREAD_POOL_TYPES; import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.equalTo; public class ThreadPoolStatsTests extends ESTestCase { - public void testThreadPoolStatsSort() throws IOException { - List stats = new ArrayList<>(); - stats.add(new ThreadPoolStats.Stats("z", -1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats("m", 3, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats("m", 1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats("d", -1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats("m", 2, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats("t", -1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats("a", -1, 0, 0, 0, 0, 0L)); - - List copy = new ArrayList<>(stats); + public void testThreadPoolStatsSort() { + var stats = List.of( + new ThreadPoolStats.Stats("z", -1, 0, 0, 0, 0, 0L), + new ThreadPoolStats.Stats("m", 3, 0, 0, 0, 0, 0L), + new ThreadPoolStats.Stats("m", 1, 0, 0, 0, 0, 0L), + new ThreadPoolStats.Stats("d", -1, 0, 0, 0, 0, 0L), + new ThreadPoolStats.Stats("m", 2, 0, 0, 0, 0, 0L), + new ThreadPoolStats.Stats("t", -1, 0, 0, 0, 0, 0L), + new ThreadPoolStats.Stats("a", -1, 0, 0, 0, 0, 0L) + ); + + var copy = new ArrayList<>(stats); Collections.sort(copy); - List names = new ArrayList<>(copy.size()); - for (ThreadPoolStats.Stats stat : copy) { - names.add(stat.getName()); - } + var names = copy.stream().map(ThreadPoolStats.Stats::name).toList(); assertThat(names, contains("a", "d", "m", "m", "m", "t", "z")); - List threads = new ArrayList<>(copy.size()); - for (ThreadPoolStats.Stats stat : copy) { - threads.add(stat.getThreads()); - } + var threads = copy.stream().map(ThreadPoolStats.Stats::threads).toList(); assertThat(threads, contains(-1, -1, 1, 2, 3, -1, -1)); } - public void testThreadPoolStatsToXContent() throws IOException { - try (BytesStreamOutput os = new BytesStreamOutput()) { - - List stats = new ArrayList<>(); - stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.SEARCH, -1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.WARMER, -1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.GENERIC, -1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.FORCE_MERGE, -1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.SAME, -1, 0, 0, 0, 0, 0L)); - - ThreadPoolStats threadPoolStats = new ThreadPoolStats(stats); - try (XContentBuilder builder = new XContentBuilder(XContentType.JSON.xContent(), os)) { - builder.startObject(); - threadPoolStats.toXContent(builder, ToXContent.EMPTY_PARAMS); - builder.endObject(); - } - - try (XContentParser parser = createParser(JsonXContent.jsonXContent, os.bytes())) { - XContentParser.Token token = parser.currentToken(); - assertNull(token); - - token = parser.nextToken(); - assertThat(token, equalTo(XContentParser.Token.START_OBJECT)); + public void testSerialization() throws IOException { + var original = new ThreadPoolStats(randomList(2, ThreadPoolStatsTests::randomStats)); + var other = serialize(original); - token = parser.nextToken(); - assertThat(token, equalTo(XContentParser.Token.FIELD_NAME)); - assertThat(parser.currentName(), equalTo(ThreadPoolStats.Fields.THREAD_POOL)); - - token = parser.nextToken(); - assertThat(token, equalTo(XContentParser.Token.START_OBJECT)); + assertNotSame(original, other); + assertEquals(original, other); + } - token = parser.nextToken(); - assertThat(token, equalTo(XContentParser.Token.FIELD_NAME)); + private static ThreadPoolStats serialize(ThreadPoolStats stats) throws IOException { + var out = new BytesStreamOutput(); + stats.writeTo(out); + return new ThreadPoolStats(out.bytes().streamInput()); + } - List names = new ArrayList<>(); - while (token == XContentParser.Token.FIELD_NAME) { - names.add(parser.currentName()); + public static ThreadPoolStats.Stats randomStats() { + return randomStats(randomFrom(THREAD_POOL_TYPES.keySet())); + } - token = parser.nextToken(); - assertThat(token, equalTo(XContentParser.Token.START_OBJECT)); + public static ThreadPoolStats.Stats randomStats(String name) { + return new ThreadPoolStats.Stats( + name, + randomMinusOneOrOther(), + randomMinusOneOrOther(), + randomMinusOneOrOther(), + randomMinusOneOrOther(), + randomMinusOneOrOther(), + randomMinusOneOrOther() + ); + } - parser.skipChildren(); - token = parser.nextToken(); - } - assertThat( - names, - contains( - ThreadPool.Names.FORCE_MERGE, - ThreadPool.Names.GENERIC, - ThreadPool.Names.SAME, - ThreadPool.Names.SEARCH, - ThreadPool.Names.WARMER - ) - ); - } - } + private static int randomMinusOneOrOther() { + return randomBoolean() ? -1 : randomIntBetween(0, Integer.MAX_VALUE); } } diff --git a/test/framework/src/main/java/org/elasticsearch/action/support/ActionTestUtils.java b/test/framework/src/main/java/org/elasticsearch/action/support/ActionTestUtils.java index b9ae3d0a62e91..49c3df17d60dd 100644 --- a/test/framework/src/main/java/org/elasticsearch/action/support/ActionTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/action/support/ActionTestUtils.java @@ -15,11 +15,12 @@ import org.elasticsearch.client.ResponseListener; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.transport.Transport; -import static org.elasticsearch.action.support.PlainActionFuture.newFuture; -import static org.mockito.Mockito.mock; +import java.util.Map; +import java.util.concurrent.TimeUnit; public class ActionTestUtils { @@ -29,10 +30,11 @@ public static R TransportAction action, Request request ) { - PlainActionFuture future = newFuture(); - Task task = mock(Task.class); - action.execute(task, request, future); - return future.actionGet(); + return PlainActionFuture.get( + future -> action.execute(request.createTask(1L, "direct", action.actionName, TaskId.EMPTY_TASK_ID, Map.of()), request, future), + 10, + TimeUnit.SECONDS + ); } public static Response executeBlockingWithTask( @@ -41,9 +43,11 @@ public static R TransportAction action, Request request ) { - PlainActionFuture future = newFuture(); - taskManager.registerAndExecute("transport", action, request, localConnection, future); - return future.actionGet(); + return PlainActionFuture.get( + future -> taskManager.registerAndExecute("transport", action, request, localConnection, future), + 10, + TimeUnit.SECONDS + ); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java index 35f0066595f04..0e0f2b8053e46 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java @@ -33,7 +33,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; @@ -42,6 +41,7 @@ import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.snapshots.SnapshotShardSizeInfo; import org.elasticsearch.snapshots.SnapshotsInfoService; +import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.gateway.TestGatewayAllocator; @@ -59,7 +59,6 @@ import static org.elasticsearch.cluster.ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.common.settings.ClusterSettings.createBuiltInClusterSettings; -import static org.mockito.Mockito.mock; public abstract class ESAllocationTestCase extends ESTestCase { @@ -154,11 +153,13 @@ private static String pickShardsAllocator(Settings settings) { private static DesiredBalanceShardsAllocator createDesiredBalanceShardsAllocator(Settings settings) { var queue = new DeterministicTaskQueue(); + var clusterSettings = createBuiltInClusterSettings(settings); + var clusterService = ClusterServiceUtils.createClusterService(queue.getThreadPool(), clusterSettings); return new DesiredBalanceShardsAllocator( - createBuiltInClusterSettings(settings), + clusterSettings, new BalancedShardsAllocator(settings), queue.getThreadPool(), - mock(ClusterService.class), + clusterService, null ) { private RoutingAllocation lastAllocation; diff --git a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 9470027a7963e..6b68f13cd1ed0 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -203,7 +203,14 @@ protected class ReplicationGroup implements AutoCloseable, Iterable protected ReplicationGroup(final IndexMetadata indexMetadata) throws IOException { final ShardRouting primaryRouting = this.createShardRouting("s0", true); - primary = newShard(primaryRouting, indexMetadata, null, getEngineFactory(primaryRouting), () -> {}, retentionLeaseSyncer); + primary = newShard( + primaryRouting, + indexMetadata, + null, + getEngineFactory(primaryRouting), + NOOP_GCP_SYNCER, + retentionLeaseSyncer + ); replicas = new CopyOnWriteArrayList<>(); this.indexMetadata = indexMetadata; updateAllocationIDsOnPrimary(); @@ -324,7 +331,7 @@ public IndexShard addReplica() throws IOException { indexMetadata, null, getEngineFactory(replicaRouting), - () -> {}, + NOOP_GCP_SYNCER, retentionLeaseSyncer ); addReplica(replica); @@ -363,7 +370,7 @@ public synchronized IndexShard addReplicaWithExistingPath(final ShardPath shardP null, null, getEngineFactory(shardRouting), - () -> {}, + NOOP_GCP_SYNCER, retentionLeaseSyncer, EMPTY_EVENT_LISTENER ); diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index bbb09c0c545dc..48987a44a1e60 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -73,6 +73,7 @@ import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.ShardGeneration; import org.elasticsearch.repositories.ShardSnapshotResult; +import org.elasticsearch.repositories.SnapshotIndexCommit; import org.elasticsearch.repositories.SnapshotShardContext; import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; import org.elasticsearch.snapshots.Snapshot; @@ -112,6 +113,8 @@ public abstract class IndexShardTestCase extends ESTestCase { public static final IndexEventListener EMPTY_EVENT_LISTENER = new IndexEventListener() { }; + public static final GlobalCheckpointSyncer NOOP_GCP_SYNCER = shardId -> {}; + private static final AtomicBoolean failOnShardFailures = new AtomicBoolean(true); private static final Consumer DEFAULT_SHARD_FAILURE_HANDLER = failure -> { @@ -265,7 +268,7 @@ protected IndexShard newShard( .settings(indexSettings) .primaryTerm(0, primaryTerm) .putMapping("{ \"properties\": {} }"); - return newShard(shardRouting, metadata.build(), null, engineFactory, () -> {}, RetentionLeaseSyncer.EMPTY, listeners); + return newShard(shardRouting, metadata.build(), null, engineFactory, NOOP_GCP_SYNCER, RetentionLeaseSyncer.EMPTY, listeners); } /** @@ -302,7 +305,7 @@ protected IndexShard newShard( IndexMetadata indexMetadata, @Nullable CheckedFunction readerWrapper ) throws IOException { - return newShard(shardId, primary, nodeId, indexMetadata, readerWrapper, () -> {}); + return newShard(shardId, primary, nodeId, indexMetadata, readerWrapper, NOOP_GCP_SYNCER); } /** @@ -319,7 +322,7 @@ protected IndexShard newShard( String nodeId, IndexMetadata indexMetadata, @Nullable CheckedFunction readerWrapper, - Runnable globalCheckpointSyncer + GlobalCheckpointSyncer globalCheckpointSyncer ) throws IOException { ShardRouting shardRouting = TestShardRouting.newShardRouting( shardId, @@ -353,7 +356,7 @@ protected IndexShard newShard( EngineFactory engineFactory, IndexingOperationListener... listeners ) throws IOException { - return newShard(routing, indexMetadata, indexReaderWrapper, engineFactory, () -> {}, RetentionLeaseSyncer.EMPTY, listeners); + return newShard(routing, indexMetadata, indexReaderWrapper, engineFactory, NOOP_GCP_SYNCER, RetentionLeaseSyncer.EMPTY, listeners); } /** @@ -370,7 +373,7 @@ protected IndexShard newShard( IndexMetadata indexMetadata, @Nullable CheckedFunction indexReaderWrapper, @Nullable EngineFactory engineFactory, - Runnable globalCheckpointSyncer, + GlobalCheckpointSyncer globalCheckpointSyncer, RetentionLeaseSyncer retentionLeaseSyncer, IndexingOperationListener... listeners ) throws IOException { @@ -410,7 +413,7 @@ protected IndexShard newShard( @Nullable CheckedFunction storeProvider, @Nullable CheckedFunction indexReaderWrapper, @Nullable EngineFactory engineFactory, - Runnable globalCheckpointSyncer, + GlobalCheckpointSyncer globalCheckpointSyncer, RetentionLeaseSyncer retentionLeaseSyncer, IndexEventListener indexEventListener, IndexingOperationListener... listeners @@ -449,7 +452,7 @@ protected IndexShard newShard( @Nullable CheckedFunction storeProvider, @Nullable CheckedFunction indexReaderWrapper, @Nullable EngineFactory engineFactory, - Runnable globalCheckpointSyncer, + GlobalCheckpointSyncer globalCheckpointSyncer, RetentionLeaseSyncer retentionLeaseSyncer, IndexEventListener indexEventListener, LongSupplier relativeTimeSupplier, @@ -1052,7 +1055,7 @@ protected ShardGeneration snapshotShard(final IndexShard shard, final Snapshot s shard.mapperService(), snapshot.getSnapshotId(), indexId, - indexCommitRef, + new SnapshotIndexCommit(indexCommitRef), null, snapshotStatus, Version.CURRENT, diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index bd137473f2b54..eab91bc84715f 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -682,7 +682,7 @@ protected SnapshotInfo getSnapshot(String repository, String snapshot) { protected static ThreadPoolStats.Stats snapshotThreadPoolStats(final String node) { return StreamSupport.stream(internalCluster().getInstance(ThreadPool.class, node).stats().spliterator(), false) - .filter(threadPool -> threadPool.getName().equals(ThreadPool.Names.SNAPSHOT)) + .filter(threadPool -> threadPool.name().equals(ThreadPool.Names.SNAPSHOT)) .findFirst() .orElseThrow(() -> new AssertionError("Failed to find snapshot pool on node [" + node + "]")); } @@ -690,7 +690,7 @@ protected static ThreadPoolStats.Stats snapshotThreadPoolStats(final String node protected void awaitMasterFinishRepoOperations() throws Exception { logger.info("--> waiting for master to finish all repo operations on its SNAPSHOT pool"); final String masterName = internalCluster().getMasterName(); - assertBusy(() -> assertEquals(snapshotThreadPoolStats(masterName).getActive(), 0)); + assertBusy(() -> assertEquals(snapshotThreadPoolStats(masterName).active(), 0)); } protected List createNSnapshots(String repoName, int count) throws Exception { diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 8caa2680f5e2a..c98169d3f259c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -386,9 +386,6 @@ public InternalTestCluster( if (Strings.hasLength(System.getProperty("tests.es.logger.level"))) { builder.put("logger.level", System.getProperty("tests.es.logger.level")); } - if (Strings.hasLength(System.getProperty("es.logger.prefix"))) { - builder.put("logger.prefix", System.getProperty("es.logger.prefix")); - } // Default the watermarks to absurdly low to prevent the tests // from failing on nodes without enough disk space builder.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b"); diff --git a/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java b/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java index 6e13a9b184a1f..f086230a96012 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java +++ b/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java @@ -33,8 +33,6 @@ */ public class MockLogAppender extends AbstractAppender { - private static final String COMMON_PREFIX = System.getProperty("es.logger.prefix", "org.elasticsearch."); - private final List expectations; public MockLogAppender() { @@ -79,7 +77,7 @@ public abstract static class AbstractEventExpectation implements LoggingExpectat public AbstractEventExpectation(String name, String logger, Level level, String message) { this.name = name; - this.logger = getLoggerName(logger); + this.logger = logger; this.level = level; this.message = message; this.saw = false; @@ -210,13 +208,6 @@ public void assertMatched() { } - private static String getLoggerName(String name) { - if (name.startsWith("org.elasticsearch.")) { - name = name.substring("org.elasticsearch.".length()); - } - return COMMON_PREFIX + name; - } - /** * A wrapper around {@link LoggingExpectation} to detect if the assertMatched method has been called */ @@ -270,4 +261,16 @@ public Releasable capturing(Class... classes) { } }; } + + /** + * Executes an action and verifies expectations against the provided logger + */ + public static void assertThatLogger(Runnable action, Class loggerOwner, MockLogAppender.LoggingExpectation expectation) { + MockLogAppender mockAppender = new MockLogAppender(); + try (var ignored = mockAppender.capturing(loggerOwner)) { + mockAppender.addExpectation(expectation); + action.run(); + mockAppender.assertAllExpectationsMatched(); + } + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java index c5205899338da..c43b8e4f10eaa 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java @@ -7,8 +7,6 @@ */ package org.elasticsearch.test; -import org.apache.lucene.search.Collector; -import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.Query; import org.apache.lucene.search.TotalHits; @@ -73,7 +71,6 @@ public class TestSearchContext extends SearchContext { boolean trackScores = false; int trackTotalHitsUpTo = SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO; RankShardContext rankShardContext; - CollectorManager aggCollectorManager; ContextIndexSearcher searcher; int from; int size; @@ -526,16 +523,6 @@ public Profilers getProfilers() { return null; // no profiling } - @Override - public CollectorManager getAggsCollectorManager() { - return aggCollectorManager; - } - - @Override - public void registerAggsCollectorManager(CollectorManager collector) { - this.aggCollectorManager = collector; - } - @Override public SearchExecutionContext getSearchExecutionContext() { return searchExecutionContext; diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index c834689a0d873..046f92c329c7f 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -1,4 +1,3 @@ -import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.docs-test' @@ -30,9 +29,11 @@ restResources { } } -// TODO: Remove the following when RCS feature is released -// The get-builtin-privileges doc test does not include the new cluster privilege for RCS -// So we disable the test if the build is a snapshot where unreleased feature is enabled by default +// TODO: Remove the following when the following features are released. These tests include new privileges only available under feature flags +// which require snapshot builds: +// * RCS 2.0. cross_cluster_search is only available with untrusted_remote_cluster_feature_flag_registered set +// * DLM. manage_dlm privilege is only available with dlm_feature_flag_enabled set +// We disable these tests for snapshot builds to maintain release build coverage. tasks.named("yamlRestTest").configure { if (BuildParams.isSnapshotBuild()) { systemProperty 'tests.rest.blacklist', '*/get-builtin-privileges/*' diff --git a/x-pack/docs/en/rest-api/security.asciidoc b/x-pack/docs/en/rest-api/security.asciidoc index a47f03cca1d2d..c0f30bb957cfb 100644 --- a/x-pack/docs/en/rest-api/security.asciidoc +++ b/x-pack/docs/en/rest-api/security.asciidoc @@ -61,6 +61,7 @@ without requiring basic authentication: [[security-api-keys]] === API Keys +ifeval::["{release-state}"=="released"] Use the following APIs to create, retrieve and invalidate API keys for access without requiring basic authentication: @@ -72,6 +73,30 @@ without requiring basic authentication: * <> * <> * <> +endif::[] + +ifeval::["{release-state}"!="released"] +Use the following APIs to create and update API keys for access via the REST interface +without requiring basic authentication: + +* <> +* <> +* <> +* <> + +Use the following APIs to create and update cross-cluster API keys for +API key based remote cluster access: + +* <> +* <> + +Use the following APIs to retrieve and invalidate API keys of all types: + +* <> +* <> +* <> +* <> +endif::[] [discrete] [[security-user-apis]] @@ -206,3 +231,7 @@ include::security/get-user-profile.asciidoc[] include::security/suggest-user-profile.asciidoc[] include::security/update-user-profile-data.asciidoc[] include::security/has-privileges-user-profile.asciidoc[] +ifeval::["{release-state}"!="released"] +include::security/create-cross-cluster-api-key.asciidoc[] +include::security/update-cross-cluster-api-key.asciidoc[] +endif::[] diff --git a/x-pack/docs/en/rest-api/security/create-cross-cluster-api-key.asciidoc b/x-pack/docs/en/rest-api/security/create-cross-cluster-api-key.asciidoc new file mode 100644 index 0000000000000..f655346a305e2 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/create-cross-cluster-api-key.asciidoc @@ -0,0 +1,9 @@ +[role="xpack"] +[[security-api-create-cross-cluster-api-key]] +=== Create Cross-Cluster API key API + +++++ +Create Cross-Cluster API key +++++ + +TODO: Placeholder diff --git a/x-pack/docs/en/rest-api/security/update-cross-cluster-api-key.asciidoc b/x-pack/docs/en/rest-api/security/update-cross-cluster-api-key.asciidoc new file mode 100644 index 0000000000000..2653c64069b35 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/update-cross-cluster-api-key.asciidoc @@ -0,0 +1,9 @@ +[role="xpack"] +[[security-api-update-cross-cluster-api-key]] +=== Update Cross-Cluster API key API + +++++ +Update Cross-Cluster API key +++++ + +TODO: Placeholder diff --git a/x-pack/docs/en/security/authorization/privileges.asciidoc b/x-pack/docs/en/security/authorization/privileges.asciidoc index e6dd78b0b07eb..dead9be24bb69 100644 --- a/x-pack/docs/en/security/authorization/privileges.asciidoc +++ b/x-pack/docs/en/security/authorization/privileges.asciidoc @@ -303,6 +303,12 @@ All {Ilm} operations relating to managing the execution of policies of an index or data stream. This includes operations such as retrying policies and removing a policy from an index or data stream. +ifeval::["{release-state}"!="released"] +`manage_dlm`:: +All {Dlm} operations relating to reading and managing the lifecycle of a data stream. +This includes operations such as adding and removing a lifecycle from a data stream. +endif::[] + `manage_leader_index`:: All actions that are required to manage the lifecycle of a leader index, which includes <>. This diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java index c1e1769cdd8eb..2df3d1a24013c 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java @@ -18,19 +18,20 @@ import java.util.List; import java.util.SortedSet; import java.util.TreeSet; -import java.util.stream.Collectors; /** * Keeps track of the contents of a file that may not be completely present. */ public class SparseFileTracker { + private static final Comparator RANGE_START_COMPARATOR = Comparator.comparingLong(r -> r.start); + /** * The byte ranges of the file which are present or pending. These ranges are nonempty, disjoint (and in order) and the non-pending * ranges are not contiguous (i.e. contiguous non-pending ranges are merged together). See {@link SparseFileTracker#invariant()} for * details. */ - private final TreeSet ranges = new TreeSet<>(Comparator.comparingLong(r -> r.start)); + private final TreeSet ranges = new TreeSet<>(RANGE_START_COMPARATOR); private final Object mutex = new Object(); @@ -167,32 +168,19 @@ public List waitForRange(final ByteRange range, final ByteRange subRange, f } final ActionListener wrappedListener = wrapWithAssertions(listener); - final List requiredRanges; final List gaps = new ArrayList<>(); + final List pendingRanges = new ArrayList<>(); + final Range targetRange = new Range(range); synchronized (mutex) { - assert invariant(); - - final List pendingRanges = new ArrayList<>(); - - final Range targetRange = new Range(range); - final SortedSet earlierRanges = ranges.headSet(targetRange, false); // ranges with strictly earlier starts - if (earlierRanges.isEmpty() == false) { - final Range lastEarlierRange = earlierRanges.last(); - if (range.start() < lastEarlierRange.end) { - if (lastEarlierRange.isPending()) { - pendingRanges.add(lastEarlierRange); - } - targetRange.start = Math.min(range.end(), lastEarlierRange.end); - } - } + determineStartingRange(range, pendingRanges, targetRange); while (targetRange.start < range.end()) { assert 0 <= targetRange.start : targetRange; assert invariant(); - final SortedSet existingRanges = ranges.tailSet(targetRange); - if (existingRanges.isEmpty()) { + final Range firstExistingRange = ranges.ceiling(targetRange); + if (firstExistingRange == null) { final Range newPendingRange = new Range( targetRange.start, range.end(), @@ -203,7 +191,6 @@ public List waitForRange(final ByteRange range, final ByteRange subRange, f gaps.add(new Gap(newPendingRange)); targetRange.start = range.end(); } else { - final Range firstExistingRange = existingRanges.first(); assert targetRange.start <= firstExistingRange.start : targetRange + " vs " + firstExistingRange; if (targetRange.start == firstExistingRange.start) { @@ -232,22 +219,32 @@ public List waitForRange(final ByteRange range, final ByteRange subRange, f assert ranges.containsAll(pendingRanges) : ranges + " vs " + pendingRanges; assert pendingRanges.stream().allMatch(Range::isPending) : pendingRanges; assert pendingRanges.size() != 1 || gaps.size() <= 1 : gaps; + } - // Pending ranges that needs to be filled before executing the listener - requiredRanges = range.equals(subRange) - ? pendingRanges - : pendingRanges.stream() - .filter(pendingRange -> pendingRange.start < subRange.end()) - .filter(pendingRange -> subRange.start() < pendingRange.end) - .sorted(Comparator.comparingLong(r -> r.start)) - .collect(Collectors.toList()); + // Pending ranges that needs to be filled before executing the listener + if (range.equals(subRange) == false) { + pendingRanges.removeIf(pendingRange -> (pendingRange.start < subRange.end() && subRange.start() < pendingRange.end) == false); + pendingRanges.sort(RANGE_START_COMPARATOR); } - subscribeToCompletionListeners(requiredRanges, subRange.end(), wrappedListener); + subscribeToCompletionListeners(pendingRanges, subRange.end(), wrappedListener); return Collections.unmodifiableList(gaps); } + private void determineStartingRange(ByteRange range, List pendingRanges, Range targetRange) { + assert invariant(); + final Range lastEarlierRange = ranges.lower(targetRange); + if (lastEarlierRange != null) { + if (range.start() < lastEarlierRange.end) { + if (lastEarlierRange.isPending()) { + pendingRanges.add(lastEarlierRange); + } + targetRange.start = Math.min(range.end(), lastEarlierRange.end); + } + } + } + /** * Called before reading a range from the file to ensure that this range is present. Unlike * {@link SparseFileTracker#waitForRange(ByteRange, ByteRange, ActionListener)} this method does not expect the caller to fill in any @@ -268,30 +265,18 @@ public boolean waitForRangeIfPending(final ByteRange range, final ActionListener final ActionListener wrappedListener = wrapWithAssertions(listener); final List pendingRanges = new ArrayList<>(); + final Range targetRange = new Range(range); synchronized (mutex) { - assert invariant(); - - final Range targetRange = new Range(range); - final SortedSet earlierRanges = ranges.headSet(targetRange, false); // ranges with strictly earlier starts - if (earlierRanges.isEmpty() == false) { - final Range lastEarlierRange = earlierRanges.last(); - if (range.start() < lastEarlierRange.end) { - if (lastEarlierRange.isPending()) { - pendingRanges.add(lastEarlierRange); - } - targetRange.start = Math.min(range.end(), lastEarlierRange.end); - } - } + determineStartingRange(range, pendingRanges, targetRange); while (targetRange.start < range.end()) { assert 0 <= targetRange.start : targetRange; assert invariant(); - final SortedSet existingRanges = ranges.tailSet(targetRange); - if (existingRanges.isEmpty()) { + final Range firstExistingRange = ranges.ceiling(targetRange); + if (firstExistingRange == null) { return false; } else { - final Range firstExistingRange = existingRanges.first(); assert targetRange.start <= firstExistingRange.start : targetRange + " vs " + firstExistingRange; if (targetRange.start == firstExistingRange.start) { @@ -362,12 +347,11 @@ public ByteRange getAbsentRangeWithin(ByteRange range) { final long start = range.start(); // Find the first absent byte in the range - final SortedSet startRanges = ranges.headSet(new Range(start, start, null), true); // ranges which start <= 'start' + final Range lastStartRange = ranges.floor(new Range(start, start, null)); long resultStart; - if (startRanges.isEmpty()) { + if (lastStartRange == null) { resultStart = start; } else { - final Range lastStartRange = startRanges.last(); // last range which starts <= 'start' and which therefore may contain the first byte of the range if (lastStartRange.end < start) { resultStart = start; @@ -381,12 +365,12 @@ public ByteRange getAbsentRangeWithin(ByteRange range) { final long end = range.end(); // Find the last absent byte in the range - final SortedSet endRanges = ranges.headSet(new Range(end, end, null), false); // ranges which start < 'end' + final Range lastEndRange = ranges.lower(new Range(end, end, null)); + final long resultEnd; - if (endRanges.isEmpty()) { + if (lastEndRange == null) { resultEnd = end; } else { - final Range lastEndRange = endRanges.last(); // last range which starts < 'end' and which therefore may contain the last byte of the range if (lastEndRange.end < end) { resultEnd = end; @@ -485,6 +469,7 @@ private void onGapFailure(final Range gapRange, Exception e) { } private boolean invariant() { + assert Thread.holdsLock(mutex); long lengthOfRanges = 0L; Range previousRange = null; for (final Range range : ranges) { diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index c538b064b6e63..d7b390d7f8bb4 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.unit.RelativeByteSizeValue; import org.elasticsearch.common.util.concurrent.AbstractAsyncTask; import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.common.util.concurrent.KeyedLock; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.Assertions; import org.elasticsearch.core.Releasable; @@ -242,8 +241,6 @@ public void validate(ByteSizeValue value, Map, Object> settings, bool private final ConcurrentHashMap, Entry> keyMapping; private final ThreadPool threadPool; - private final KeyedLock keyedLock = new KeyedLock<>(); - private final SharedBytes sharedBytes; private final long cacheSize; private final long regionSize; @@ -380,57 +377,80 @@ private long getRegionSize(long fileLength, int region) { public CacheFileRegion get(KeyType cacheKey, long fileLength, int region) { final long effectiveRegionSize = getRegionSize(fileLength, region); - try (Releasable ignore = keyedLock.acquire(cacheKey)) { - final RegionKey regionKey = new RegionKey<>(cacheKey, region); - final long now = threadPool.relativeTimeInMillis(); - final Entry entry = keyMapping.computeIfAbsent( - regionKey, - key -> new Entry<>(new CacheFileRegion(key, effectiveRegionSize), now) - ); - if (entry.chunk.sharedBytesPos == -1) { - // new item - assert entry.freq == 0; - assert entry.prev == null; - assert entry.next == null; - final Integer freeSlot = freeRegions.poll(); - if (freeSlot != null) { - // no need to evict an item, just add - entry.chunk.sharedBytesPos = freeSlot; - assert regionOwners[freeSlot].compareAndSet(null, entry.chunk); - synchronized (this) { - pushEntryToBack(entry); - } - } else { - // need to evict something - synchronized (this) { - maybeEvict(); + final RegionKey regionKey = new RegionKey<>(cacheKey, region); + final long now = threadPool.relativeTimeInMillis(); + final Entry entry = keyMapping.computeIfAbsent( + regionKey, + key -> new Entry<>(new CacheFileRegion(key, effectiveRegionSize), now) + ); + // sharedBytesPos is volatile, double locking is fine, as long as we assign it last. + if (entry.chunk.sharedBytesPos == -1) { + synchronized (entry.chunk) { + if (entry.chunk.sharedBytesPos == -1) { + if (keyMapping.get(regionKey) != entry) { + throw new AlreadyClosedException("no free region found (contender)"); } - final Integer freeSlotRetry = freeRegions.poll(); - if (freeSlotRetry != null) { - entry.chunk.sharedBytesPos = freeSlotRetry; - assert regionOwners[freeSlotRetry].compareAndSet(null, entry.chunk); + // new item + assert entry.freq == 0; + assert entry.prev == null; + assert entry.next == null; + final Integer freeSlot = freeRegions.poll(); + if (freeSlot != null) { + // no need to evict an item, just add + assert regionOwners[freeSlot].compareAndSet(null, entry.chunk); synchronized (this) { pushEntryToBack(entry); + // assign sharedBytesPos only when chunk is ready for use. Under lock to avoid concurrent tryEvict. + entry.chunk.sharedBytesPos = freeSlot; } } else { - boolean removed = keyMapping.remove(regionKey, entry); - assert removed; - throw new AlreadyClosedException("no free region found"); - } - } - } else { - // check if we need to promote item - synchronized (this) { - if (now - entry.lastAccessed >= minTimeDelta && entry.freq + 1 < maxFreq) { - unlink(entry); - entry.freq++; - entry.lastAccessed = now; - pushEntryToBack(entry); + // need to evict something + synchronized (this) { + maybeEvict(); + } + final Integer freeSlotRetry = freeRegions.poll(); + if (freeSlotRetry != null) { + assert regionOwners[freeSlotRetry].compareAndSet(null, entry.chunk); + synchronized (this) { + pushEntryToBack(entry); + // assign sharedBytesPos only when chunk is ready for use. Under lock to avoid concurrent tryEvict. + entry.chunk.sharedBytesPos = freeSlotRetry; + } + } else { + boolean removed = keyMapping.remove(regionKey, entry); + assert removed; + throw new AlreadyClosedException("no free region found"); + } } + + return entry.chunk; } } - return entry.chunk; } + assertChunkActiveOrEvicted(entry); + + // existing item, check if we need to promote item + synchronized (this) { + if (now - entry.lastAccessed >= minTimeDelta && entry.freq + 1 < maxFreq) { + unlink(entry); + entry.freq++; + entry.lastAccessed = now; + pushEntryToBack(entry); + } + } + + return entry.chunk; + } + + private void assertChunkActiveOrEvicted(Entry entry) { + if (Assertions.ENABLED) { + synchronized (this) { + // assert linked (or evicted) + assert entry.prev != null || entry.chunk.isEvicted(); + + } + } + assert regionOwners[entry.chunk.sharedBytesPos].get() == entry.chunk || entry.chunk.isEvicted(); } public void onClose(CacheFileRegion chunk) { @@ -731,7 +751,7 @@ void populateAndRead( ensureOpen(); final SharedBytes.IO fileChannel = sharedBytes.getFileChannel(sharedBytesPos); - resources[0] = Releasables.releaseOnce(fileChannel::decRef); + resources[0] = Releasables.releaseOnce(fileChannel); final ActionListener rangeListener = rangeListener( rangeToRead, diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBytes.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBytes.java index 1594d755f8044..9a69d840788d8 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBytes.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBytes.java @@ -12,9 +12,9 @@ import org.elasticsearch.blobcache.BlobCacheUtils; import org.elasticsearch.blobcache.common.ByteBufferReference; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Releasable; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; @@ -27,7 +27,6 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; -import java.util.Map; import java.util.function.IntConsumer; import java.util.function.LongConsumer; @@ -54,6 +53,9 @@ public class SharedBytes extends AbstractRefCounted { StandardOpenOption.CREATE }; final int numRegions; + + private final IO[] ios; + final long regionSize; // TODO: for systems like Windows without true p-write/read support we should split this up into multiple channels since positional @@ -82,6 +84,10 @@ public class SharedBytes extends AbstractRefCounted { } } this.path = cacheFile; + this.ios = new IO[numRegions]; + for (int i = 0; i < numRegions; i++) { + ios[i] = new IO(i); + } this.writeBytes = writeBytes; this.readBytes = readBytes; } @@ -215,27 +221,11 @@ protected void closeInternal() { } } - private final Map ios = ConcurrentCollections.newConcurrentMap(); - public IO getFileChannel(int sharedBytesPos) { assert fileChannel != null; - return ios.compute(sharedBytesPos, (p, io) -> { - if (io == null || io.tryIncRef() == false) { - final IO newIO; - boolean success = false; - incRef(); - try { - newIO = new IO(p); - success = true; - } finally { - if (success == false) { - decRef(); - } - } - return newIO; - } - return io; - }); + var res = ios[sharedBytesPos]; + incRef(); + return res; } long getPhysicalOffset(long chunkPosition) { @@ -244,13 +234,11 @@ long getPhysicalOffset(long chunkPosition) { return physicalOffset; } - public final class IO extends AbstractRefCounted { + public final class IO implements Releasable { - private final int sharedBytesPos; private final long pageStart; private IO(final int sharedBytesPos) { - this.sharedBytesPos = sharedBytesPos; pageStart = getPhysicalOffset(sharedBytesPos); } @@ -282,9 +270,8 @@ private void checkOffsets(long position, long length) { } @Override - protected void closeInternal() { - ios.remove(sharedBytesPos, this); - SharedBytes.this.decRef(); + public void close() { + decRef(); } } diff --git a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java index b836c6812f45c..99a7219270618 100644 --- a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java +++ b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.blobcache.shared; +import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.blobcache.common.ByteRange; import org.elasticsearch.cluster.node.DiscoveryNodeRole; @@ -24,13 +25,17 @@ import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.node.NodeRoleSettings; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CyclicBarrier; import java.util.stream.Collectors; +import java.util.stream.IntStream; import static org.elasticsearch.node.Node.NODE_NAME_SETTING; import static org.hamcrest.Matchers.equalTo; @@ -212,6 +217,74 @@ public void testDecay() throws IOException { } } + /** + * Exercise SharedBlobCacheService#get in multiple threads to trigger any assertion errors. + * @throws IOException + */ + public void testGetMultiThreaded() throws IOException { + int threads = between(2, 10); + Settings settings = Settings.builder() + .put(NODE_NAME_SETTING.getKey(), "node") + .put( + SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), + ByteSizeValue.ofBytes(size(between(1, 20) * 100L)).getStringRep() + ) + .put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(100)).getStringRep()) + .put("path.home", createTempDir()) + .build(); + long fileLength = size(500); + ThreadPool threadPool = new TestThreadPool("testGetMultiThreaded"); + Set files = randomSet(1, 10, () -> randomAlphaOfLength(5)); + try ( + NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings)); + var cacheService = new SharedBlobCacheService(environment, settings, threadPool) + ) { + CyclicBarrier ready = new CyclicBarrier(threads); + List threadList = IntStream.range(0, threads).mapToObj(no -> { + int iterations = between(100, 500); + String[] cacheKeys = IntStream.range(0, iterations).mapToObj(ignore -> randomFrom(files)).toArray(String[]::new); + int[] regions = IntStream.range(0, iterations).map(ignore -> between(0, 4)).toArray(); + int[] yield = IntStream.range(0, iterations).map(ignore -> between(0, 9)).toArray(); + return new Thread(() -> { + try { + ready.await(); + for (int i = 0; i < iterations; ++i) { + try { + SharedBlobCacheService.CacheFileRegion cacheFileRegion = cacheService.get( + cacheKeys[i], + fileLength, + regions[i] + ); + if (cacheFileRegion.tryIncRef()) { + if (yield[i] == 0) { + Thread.yield(); + } + cacheFileRegion.decRef(); + } + } catch (AlreadyClosedException e) { + // ignore + } + } + } catch (InterruptedException | BrokenBarrierException e) { + assert false; + throw new RuntimeException(e); + } + }); + }).toList(); + threadList.forEach(Thread::start); + threadList.forEach(thread -> { + try { + thread.join(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException(e); + } + }); + } finally { + threadPool.shutdownNow(); + } + } + public void testCacheSizeRejectedOnNonFrozenNodes() { String cacheSize = randomBoolean() ? ByteSizeValue.ofBytes(size(500)).getStringRep() diff --git a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBytesTests.java b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBytesTests.java new file mode 100644 index 0000000000000..6d8d202a06445 --- /dev/null +++ b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBytesTests.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.blobcache.shared; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.node.Node; +import org.elasticsearch.test.ESTestCase; + +import java.nio.file.Files; + +public class SharedBytesTests extends ESTestCase { + + public void testReleasesFileCorrectly() throws Exception { + int regions = randomIntBetween(1, 10); + var nodeSettings = Settings.builder() + .put(Node.NODE_NAME_SETTING.getKey(), "node") + .put("path.home", createTempDir()) + .putList(Environment.PATH_DATA_SETTING.getKey(), createTempDir().toString()) + .build(); + try (var nodeEnv = new NodeEnvironment(nodeSettings, TestEnvironment.newEnvironment(nodeSettings))) { + final SharedBytes sharedBytes = new SharedBytes( + regions, + randomIntBetween(1, 16) * 4096L, + nodeEnv, + ignored -> {}, + ignored -> {} + ); + final var sharedBytesPath = nodeEnv.nodeDataPaths()[0].resolve("shared_snapshot_cache"); + assertTrue(Files.exists(sharedBytesPath)); + SharedBytes.IO fileChannel = sharedBytes.getFileChannel(randomInt(regions - 1)); + assertTrue(Files.exists(sharedBytesPath)); + if (randomBoolean()) { + fileChannel.close(); + assertTrue(Files.exists(sharedBytesPath)); + sharedBytes.decRef(); + } else { + sharedBytes.decRef(); + assertTrue(Files.exists(sharedBytesPath)); + fileChannel.close(); + } + assertFalse(Files.exists(sharedBytesPath)); + } + } +} diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index 93aa30ec68f44..b1ae78328d0ab 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -66,10 +66,13 @@ if (BuildParams.isSnapshotBuild() == false) { // private key, these tests are blacklisted in non-snapshot test runs restTestBlacklist.addAll(['xpack/15_basic/*', 'license/20_put_license/*', 'license/30_enterprise_license/*']) - // TODO: Remove the following when RCS feature is released - // cross_cluster_search privilege is only available when untrusted_remote_cluster_feature_flag_registered is enabled - // which requires snapshot build + // TODO: Remove the following when the following features are released. These tests include new privileges only available under feature flags + // which require snapshot builds: + // * RCS 2.0. cross_cluster_search is only available with untrusted_remote_cluster_feature_flag_registered set + // * DLM. manage_dlm privilege is only available with dlm_feature_flag_enabled set + // We disable these tests for snapshot builds to maintain release build coverage. restTestBlacklist.add('privileges/11_builtin/Test get builtin privileges') + restTestBlacklist.add('api_key/50_cross_cluster/*') } tasks.named("yamlRestTest").configure { @@ -94,7 +97,10 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> "vectors/10_dense_vector_basic/Deprecated function signature", "to support it, it would require to almost revert back the #48725 and complicate the code" ) - task.skipTest("vectors/20_dense_vector_special_cases/Indexing of Dense vectors should error when dims don't match defined in the mapping", "Error message has changed") + task.skipTest( + "vectors/20_dense_vector_special_cases/Indexing of Dense vectors should error when dims don't match defined in the mapping", + "Error message has changed" + ) task.skipTest("vectors/30_sparse_vector_basic/Cosine Similarity", "not supported for compatibility") task.skipTest("vectors/30_sparse_vector_basic/Deprecated function signature", "not supported for compatibility") task.skipTest("vectors/30_sparse_vector_basic/Dot Product", "not supported for compatibility") diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataLifecycleUsageTransportActionIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataLifecycleUsageTransportActionIT.java new file mode 100644 index 0000000000000..7ca5b12462a8e --- /dev/null +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/action/DataLifecycleUsageTransportActionIT.java @@ -0,0 +1,206 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.action; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.DataLifecycle; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamAlias; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.protocol.xpack.XPackUsageRequest; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.XPackClientPlugin; +import org.junit.After; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Function; + +import static org.elasticsearch.xpack.core.action.XPackUsageFeatureAction.DATA_LIFECYCLE; +import static org.hamcrest.Matchers.equalTo; + +public class DataLifecycleUsageTransportActionIT extends ESIntegTestCase { + /* + * The DataLifecycleUsageTransportAction is not exposed in the xpack core plugin, so we have a special test plugin to do this + */ + @Override + protected Collection> nodePlugins() { + return List.of(TestDateLifecycleUsagePlugin.class); + } + + @After + private void cleanup() throws Exception { + updateClusterState(clusterState -> { + ClusterState.Builder clusterStateBuilder = new ClusterState.Builder(clusterState); + Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()); + metadataBuilder.dataStreams(Map.of(), Map.of()); + clusterStateBuilder.metadata(metadataBuilder); + return clusterStateBuilder.build(); + }); + updateClusterSettings(Settings.builder().put(DataLifecycle.CLUSTER_DLM_DEFAULT_ROLLOVER_SETTING.getKey(), (String) null)); + } + + @SuppressWarnings("unchecked") + public void testAction() throws Exception { + assertUsageResults(0, 0, 0, 0.0, true); + AtomicLong count = new AtomicLong(0); + AtomicLong totalRetentionTimes = new AtomicLong(0); + AtomicLong minRetention = new AtomicLong(Long.MAX_VALUE); + AtomicLong maxRetention = new AtomicLong(Long.MIN_VALUE); + boolean useDefaultRolloverConfig = randomBoolean(); + if (useDefaultRolloverConfig == false) { + updateClusterSettings(Settings.builder().put(DataLifecycle.CLUSTER_DLM_DEFAULT_ROLLOVER_SETTING.getKey(), "min_docs=33")); + } + /* + * We now add a number of simulated data streams to the cluster state. Some have lifecycles, some don't. The ones with lifecycles + * have varying retention periods. After adding them, we make sure the numbers add up. + */ + updateClusterState(clusterState -> { + Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()); + Map dataStreamMap = new HashMap<>(); + for (int dataStreamCount = 0; dataStreamCount < randomInt(200); dataStreamCount++) { + boolean hasLifecycle = randomBoolean(); + long retentionMillis; + if (hasLifecycle) { + retentionMillis = randomLongBetween(1000, 100000); + count.incrementAndGet(); + totalRetentionTimes.addAndGet(retentionMillis); + if (retentionMillis < minRetention.get()) { + minRetention.set(retentionMillis); + } + if (retentionMillis > maxRetention.get()) { + maxRetention.set(retentionMillis); + } + } else { + retentionMillis = 0; + } + List indices = new ArrayList<>(); + for (int indicesCount = 0; indicesCount < randomIntBetween(1, 10); indicesCount++) { + Index index = new Index(randomAlphaOfLength(60), randomAlphaOfLength(60)); + indices.add(index); + } + boolean systemDataStream = randomBoolean(); + DataStream dataStream = new DataStream( + randomAlphaOfLength(50), + indices, + randomLongBetween(0, 1000), + Map.of(), + systemDataStream || randomBoolean(), + randomBoolean(), + systemDataStream, + randomBoolean(), + IndexMode.STANDARD, + hasLifecycle ? new DataLifecycle(retentionMillis) : null + ); + dataStreamMap.put(dataStream.getName(), dataStream); + } + Map dataStreamAliasesMap = Map.of(); + metadataBuilder.dataStreams(dataStreamMap, dataStreamAliasesMap); + ClusterState.Builder clusterStateBuilder = new ClusterState.Builder(clusterState); + clusterStateBuilder.metadata(metadataBuilder); + return clusterStateBuilder.build(); + }); + int expectedMinimumRetention = minRetention.get() == Long.MAX_VALUE ? 0 : minRetention.intValue(); + int expectedMaximumRetention = maxRetention.get() == Long.MIN_VALUE ? 0 : maxRetention.intValue(); + double expectedAverageRetention = count.get() == 0 ? 0.0 : totalRetentionTimes.doubleValue() / count.get(); + assertUsageResults( + count.intValue(), + expectedMinimumRetention, + expectedMaximumRetention, + expectedAverageRetention, + useDefaultRolloverConfig + ); + } + + @SuppressWarnings("unchecked") + private void assertUsageResults( + int count, + int minimumRetention, + int maximumRetention, + double averageRetention, + boolean defaultRolloverUsed + ) throws Exception { + XPackUsageFeatureResponse response = client().execute(DATA_LIFECYCLE, new XPackUsageRequest()).get(); + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder = response.getUsage().toXContent(builder, ToXContent.EMPTY_PARAMS); + Tuple> tuple = XContentHelper.convertToMap( + BytesReference.bytes(builder), + true, + XContentType.JSON + ); + + Map map = tuple.v2(); + assertThat(map.get("available"), equalTo(true)); + assertThat(map.get("enabled"), equalTo(true)); + assertThat(map.get("count"), equalTo(count)); + assertThat(map.get("default_rollover_used"), equalTo(defaultRolloverUsed)); + Map retentionMap = (Map) map.get("retention"); + assertThat(retentionMap.size(), equalTo(3)); + assertThat(retentionMap.get("minimum_millis"), equalTo(minimumRetention)); + assertThat(retentionMap.get("maximum_millis"), equalTo(maximumRetention)); + assertThat(retentionMap.get("average_millis"), equalTo(averageRetention)); + } + + /* + * Updates the cluster state in the internal cluster using the provided function + */ + protected static void updateClusterState(final Function updater) throws Exception { + final PlainActionFuture future = PlainActionFuture.newFuture(); + final ClusterService clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + clusterService.submitUnbatchedStateUpdateTask("test", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return updater.apply(currentState); + } + + @Override + public void onFailure(Exception e) { + future.onFailure(e); + } + + @Override + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { + future.onResponse(null); + } + }); + future.get(); + } + + /* + * This plugin exposes the DataLifecycleUsageTransportAction. + */ + public static final class TestDateLifecycleUsagePlugin extends XPackClientPlugin { + @Override + public List> getActions() { + List> actions = new ArrayList<>(); + actions.add(new ActionPlugin.ActionHandler<>(DATA_LIFECYCLE, DataLifecycleUsageTransportAction.class)); + return actions; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotRepository.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotRepository.java index c0272fe912375..de5b5e4d825a2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotRepository.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotRepository.java @@ -39,6 +39,7 @@ import org.elasticsearch.repositories.FinalizeSnapshotContext; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.Repository; +import org.elasticsearch.repositories.SnapshotIndexCommit; import org.elasticsearch.repositories.SnapshotShardContext; import java.io.Closeable; @@ -194,8 +195,11 @@ protected void closeInternal() { SegmentInfos segmentInfos = tempStore.readLastCommittedSegmentsInfo(); final long maxDoc = segmentInfos.totalMaxDoc(); tempStore.bootstrapNewHistory(maxDoc, maxDoc); - store.incRef(); - toClose.add(store::decRef); + try (var ignored = context.withCommitRef()) { + // obtain commit ref first, ensuring the store is still open here, or else reporting aborted snapshots properly + store.incRef(); + toClose.add(store::decRef); + } DirectoryReader reader = DirectoryReader.open(tempStore.directory()); toClose.add(reader); IndexCommit indexCommit = reader.getIndexCommit(); @@ -205,7 +209,7 @@ protected void closeInternal() { mapperService, context.snapshotId(), context.indexId(), - new Engine.IndexCommitRef(indexCommit, () -> IOUtils.close(toClose)), + new SnapshotIndexCommit(new Engine.IndexCommitRef(indexCommit, () -> IOUtils.close(toClose))), context.stateIdentifier(), context.status(), context.getRepositoryMetaVersion(), @@ -213,7 +217,7 @@ protected void closeInternal() { context ) ); - } catch (IOException e) { + } catch (Exception e) { try { IOUtils.close(toClose); } catch (IOException ex) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index f18eaa4675c02..a6cbc37c89f4d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NamedDiff; +import org.elasticsearch.cluster.metadata.DataLifecycle; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Setting; @@ -38,6 +39,7 @@ import org.elasticsearch.xpack.core.archive.ArchiveFeatureSetUsage; import org.elasticsearch.xpack.core.async.DeleteAsyncResultAction; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; +import org.elasticsearch.xpack.core.datastreams.DataLifecycleFeatureSetUsage; import org.elasticsearch.xpack.core.datastreams.DataStreamFeatureSetUsage; import org.elasticsearch.xpack.core.downsample.DownsampleIndexerAction; import org.elasticsearch.xpack.core.enrich.EnrichFeatureSetUsage; @@ -234,6 +236,8 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Objects; +import java.util.stream.Stream; // TODO: merge this into XPackPlugin public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPlugin { @@ -414,7 +418,7 @@ public List> getClientActions() { @Override public List getNamedWriteables() { - return Arrays.asList( + return Stream.of( // graph new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.GRAPH, GraphFeatureSetUsage::new), // logstash @@ -545,6 +549,13 @@ public List getNamedWriteables() { ), // Data Streams new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.DATA_STREAMS, DataStreamFeatureSetUsage::new), + DataLifecycle.isEnabled() + ? new NamedWriteableRegistry.Entry( + XPackFeatureSet.Usage.class, + XPackField.DATA_LIFECYCLE, + DataLifecycleFeatureSetUsage::new + ) + : null, // Data Tiers new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.DATA_TIERS, DataTiersFeatureSetUsage::new), // Archive @@ -561,7 +572,7 @@ public List getNamedWriteables() { XPackField.ENTERPRISE_SEARCH, EnterpriseSearchFeatureSetUsage::new ) - ); + ).filter(Objects::nonNull).toList(); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java index 94e438fa77b2a..dd20ec3c9cd5b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java @@ -65,6 +65,8 @@ public final class XPackField { public static final String SEARCHABLE_SNAPSHOTS = "searchable_snapshots"; /** Name constant for the data streams feature. */ public static final String DATA_STREAMS = "data_streams"; + /** Name constant for the data lifecycle feature. */ + public static final String DATA_LIFECYCLE = "data_lifecycle"; /** Name constant for the data tiers feature. */ public static final String DATA_TIERS = "data_tiers"; /** Name constant for the aggregate_metric plugin. */ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index da2ae5afe129d..f3130ed3d0142 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -17,6 +17,7 @@ import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataLifecycle; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -74,6 +75,7 @@ import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.cluster.routing.allocation.DataTierAllocationDecider; import org.elasticsearch.xpack.cluster.routing.allocation.mapper.DataTierFieldMapper; +import org.elasticsearch.xpack.core.action.DataLifecycleUsageTransportAction; import org.elasticsearch.xpack.core.action.DataStreamInfoTransportAction; import org.elasticsearch.xpack.core.action.DataStreamUsageTransportAction; import org.elasticsearch.xpack.core.action.ReloadAnalyzerAction; @@ -355,6 +357,9 @@ public Collection createComponents( actions.add(new ActionHandler<>(XPackUsageFeatureAction.DATA_TIERS, DataTiersUsageTransportAction.class)); actions.add(new ActionHandler<>(XPackUsageFeatureAction.DATA_STREAMS, DataStreamUsageTransportAction.class)); actions.add(new ActionHandler<>(XPackInfoFeatureAction.DATA_STREAMS, DataStreamInfoTransportAction.class)); + if (DataLifecycle.isEnabled()) { + actions.add(new ActionHandler<>(XPackUsageFeatureAction.DATA_LIFECYCLE, DataLifecycleUsageTransportAction.class)); + } actions.add(new ActionHandler<>(XPackUsageFeatureAction.HEALTH, HealthApiUsageTransportAction.class)); actions.add(new ActionHandler<>(XPackUsageFeatureAction.REMOTE_CLUSTERS, RemoteClusterUsageTransportAction.class)); return actions; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataLifecycleUsageTransportAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataLifecycleUsageTransportAction.java new file mode 100644 index 0000000000000..7b0babc73a724 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DataLifecycleUsageTransportAction.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataLifecycle; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.protocol.xpack.XPackUsageRequest; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.datastreams.DataLifecycleFeatureSetUsage; + +import java.util.Collection; +import java.util.LongSummaryStatistics; +import java.util.stream.Collectors; + +public class DataLifecycleUsageTransportAction extends XPackUsageFeatureTransportAction { + + @Inject + public DataLifecycleUsageTransportAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + XPackUsageFeatureAction.DATA_LIFECYCLE.name(), + transportService, + clusterService, + threadPool, + actionFilters, + indexNameExpressionResolver + ); + } + + @Override + protected void masterOperation( + Task task, + XPackUsageRequest request, + ClusterState state, + ActionListener listener + ) { + final Collection dataStreams = state.metadata().dataStreams().values(); + LongSummaryStatistics retentionStats = dataStreams.stream() + .filter(ds -> ds.getLifecycle() != null) + .collect(Collectors.summarizingLong(ds -> ds.getLifecycle().getDataRetention().getMillis())); + long dataStreamsWithLifecycles = retentionStats.getCount(); + long minRetention = dataStreamsWithLifecycles == 0 ? 0 : retentionStats.getMin(); + long maxRetention = dataStreamsWithLifecycles == 0 ? 0 : retentionStats.getMax(); + double averageRetention = retentionStats.getAverage(); + RolloverConfiguration rolloverConfiguration = clusterService.getClusterSettings() + .get(DataLifecycle.CLUSTER_DLM_DEFAULT_ROLLOVER_SETTING); + String rolloverConfigString = rolloverConfiguration.toString(); + final DataLifecycleFeatureSetUsage.LifecycleStats stats = new DataLifecycleFeatureSetUsage.LifecycleStats( + dataStreamsWithLifecycles, + minRetention, + maxRetention, + averageRetention, + DataLifecycle.CLUSTER_DLM_DEFAULT_ROLLOVER_SETTING.getDefault(null).toString().equals(rolloverConfigString) + ); + + final DataLifecycleFeatureSetUsage usage = new DataLifecycleFeatureSetUsage(stats); + listener.onResponse(new XPackUsageFeatureResponse(usage)); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java index bb9c25f5cb6f2..8ddcdfd2e9e24 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.action; import org.elasticsearch.action.ActionType; +import org.elasticsearch.cluster.metadata.DataLifecycle; import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.xpack.core.XPackField; @@ -45,6 +46,7 @@ public class XPackUsageFeatureAction extends ActionType shutdownMetadataMap.get(idShardsShouldBeOn)) - .map(singleNodeShutdown -> switch (singleNodeShutdown.getType()) { - case REMOVE: - case SIGTERM: - case REPLACE: - yield true; - case RESTART: - yield false; - }) - .orElse(false); + var shutdown = clusterState.metadata().nodeShutdowns().get(idShardsShouldBeOn); + boolean nodeBeingRemoved = shutdown != null && shutdown.getType() != SingleNodeShutdownMetadata.Type.RESTART; final IndexRoutingTable routingTable = clusterState.getRoutingTable().index(index); int foundShards = 0; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java index 046b2e3c951c2..208c47e1dcedb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java @@ -237,7 +237,8 @@ public RoleDescriptor roleDescriptor() { runAs, metadata, Collections.emptyMap(), - remoteIndicesPrivileges.toArray(new RoleDescriptor.RemoteIndicesPrivileges[0]) + remoteIndicesPrivileges.toArray(new RoleDescriptor.RemoteIndicesPrivileges[0]), + null ); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestBuilder.java index 765b12de186e4..2ce1778941270 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestBuilder.java @@ -38,7 +38,7 @@ public PutRoleRequestBuilder(ElasticsearchClient client, PutRoleAction action) { public PutRoleRequestBuilder source(String name, BytesReference source, XContentType xContentType) throws IOException { // we pass false as last parameter because we want to reject the request if field permissions // are given in 2.x syntax - RoleDescriptor descriptor = RoleDescriptor.parse(name, source, false, xContentType); + RoleDescriptor descriptor = RoleDescriptor.parse(name, source, false, xContentType, false); assert name.equals(descriptor.getName()); request.name(name); request.cluster(descriptor.getClusterPrivileges()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/RoleDescriptorRequestValidator.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/RoleDescriptorRequestValidator.java index 499640ec215b4..c9a3eb0a6e416 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/RoleDescriptorRequestValidator.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/RoleDescriptorRequestValidator.java @@ -85,6 +85,9 @@ public static ActionRequestValidationException validate( validationException ); } + if (roleDescriptor.hasWorkflowsRestriction()) { + // TODO: Validate workflow names here! + } return validationException; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java index e61f2594ffe00..0de60e830433d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java @@ -8,6 +8,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.bytes.BytesArray; @@ -55,6 +56,8 @@ */ public class RoleDescriptor implements ToXContentObject, Writeable { + public static final TransportVersion WORKFLOWS_RESTRICTION_VERSION = TransportVersion.V_8_500_005; + public static final String ROLE_TYPE = "role"; private final String name; @@ -64,6 +67,7 @@ public class RoleDescriptor implements ToXContentObject, Writeable { private final ApplicationResourcePrivileges[] applicationPrivileges; private final String[] runAs; private final RemoteIndicesPrivileges[] remoteIndicesPrivileges; + private final Restriction restriction; private final Map metadata; private final Map transientMetadata; @@ -89,7 +93,7 @@ public RoleDescriptor( /** * @deprecated Use {@link #RoleDescriptor(String, String[], IndicesPrivileges[], ApplicationResourcePrivileges[], - * ConfigurableClusterPrivilege[], String[], Map, Map, RemoteIndicesPrivileges[])} + * ConfigurableClusterPrivilege[], String[], Map, Map, RemoteIndicesPrivileges[], Restriction)} */ @Deprecated public RoleDescriptor( @@ -104,7 +108,7 @@ public RoleDescriptor( /** * @deprecated Use {@link #RoleDescriptor(String, String[], IndicesPrivileges[], ApplicationResourcePrivileges[], - * ConfigurableClusterPrivilege[], String[], Map, Map, RemoteIndicesPrivileges[])} + * ConfigurableClusterPrivilege[], String[], Map, Map, RemoteIndicesPrivileges[], Restriction)} */ @Deprecated public RoleDescriptor( @@ -115,7 +119,18 @@ public RoleDescriptor( @Nullable Map metadata, @Nullable Map transientMetadata ) { - this(name, clusterPrivileges, indicesPrivileges, null, null, runAs, metadata, transientMetadata, RemoteIndicesPrivileges.NONE); + this( + name, + clusterPrivileges, + indicesPrivileges, + null, + null, + runAs, + metadata, + transientMetadata, + RemoteIndicesPrivileges.NONE, + Restriction.NONE + ); } public RoleDescriptor( @@ -137,7 +152,8 @@ public RoleDescriptor( runAs, metadata, transientMetadata, - RemoteIndicesPrivileges.NONE + RemoteIndicesPrivileges.NONE, + Restriction.NONE ); } @@ -150,7 +166,8 @@ public RoleDescriptor( @Nullable String[] runAs, @Nullable Map metadata, @Nullable Map transientMetadata, - @Nullable RemoteIndicesPrivileges[] remoteIndicesPrivileges + @Nullable RemoteIndicesPrivileges[] remoteIndicesPrivileges, + @Nullable Restriction restriction ) { this.name = name; this.clusterPrivileges = clusterPrivileges != null ? clusterPrivileges : Strings.EMPTY_ARRAY; @@ -163,6 +180,7 @@ public RoleDescriptor( ? Collections.unmodifiableMap(transientMetadata) : Collections.singletonMap("enabled", true); this.remoteIndicesPrivileges = remoteIndicesPrivileges != null ? remoteIndicesPrivileges : RemoteIndicesPrivileges.NONE; + this.restriction = restriction != null ? restriction : Restriction.NONE; } public RoleDescriptor(StreamInput in) throws IOException { @@ -184,6 +202,11 @@ public RoleDescriptor(StreamInput in) throws IOException { } else { this.remoteIndicesPrivileges = RemoteIndicesPrivileges.NONE; } + if (in.getTransportVersion().onOrAfter(WORKFLOWS_RESTRICTION_VERSION)) { + this.restriction = new Restriction(in); + } else { + this.restriction = Restriction.NONE; + } } public String getName() { @@ -235,13 +258,26 @@ public boolean hasPrivilegesOtherThanIndex() { || hasConfigurableClusterPrivileges() || hasApplicationPrivileges() || hasRunAs() - || hasRemoteIndicesPrivileges(); + || hasRemoteIndicesPrivileges() + || hasWorkflowsRestriction(); } public String[] getRunAs() { return this.runAs; } + public Restriction getRestriction() { + return restriction; + } + + public boolean hasRestriction() { + return restriction != null && false == restriction.isEmpty(); + } + + public boolean hasWorkflowsRestriction() { + return hasRestriction() && restriction.hasWorkflows(); + } + public Map getMetadata() { return metadata; } @@ -276,7 +312,7 @@ public String toString() { for (RemoteIndicesPrivileges group : remoteIndicesPrivileges) { sb.append(group.toString()).append(","); } - sb.append("]"); + sb.append("], restriction=").append(restriction); sb.append("]"); return sb.toString(); } @@ -295,7 +331,8 @@ public boolean equals(Object o) { if (Arrays.equals(applicationPrivileges, that.applicationPrivileges) == false) return false; if (metadata.equals(that.getMetadata()) == false) return false; if (Arrays.equals(runAs, that.runAs) == false) return false; - return Arrays.equals(remoteIndicesPrivileges, that.remoteIndicesPrivileges); + if (Arrays.equals(remoteIndicesPrivileges, that.remoteIndicesPrivileges) == false) return false; + return restriction.equals(that.restriction); } @Override @@ -308,6 +345,7 @@ public int hashCode() { result = 31 * result + Arrays.hashCode(runAs); result = 31 * result + metadata.hashCode(); result = 31 * result + Arrays.hashCode(remoteIndicesPrivileges); + result = 31 * result + restriction.hashCode(); return result; } @@ -318,7 +356,8 @@ public boolean isEmpty() { && applicationPrivileges.length == 0 && runAs.length == 0 && metadata.size() == 0 - && remoteIndicesPrivileges.length == 0; + && remoteIndicesPrivileges.length == 0 + && restriction.isEmpty(); } @Override @@ -358,6 +397,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, boolea if (hasRemoteIndicesPrivileges()) { builder.xContentList(Fields.REMOTE_INDICES.getPreferredName(), remoteIndicesPrivileges); } + if (hasRestriction()) { + builder.field(Fields.RESTRICTION.getPreferredName(), restriction); + } return builder.endObject(); } @@ -377,10 +419,23 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY_CCS)) { out.writeArray(remoteIndicesPrivileges); } + if (out.getTransportVersion().onOrAfter(WORKFLOWS_RESTRICTION_VERSION)) { + restriction.writeTo(out); + } } public static RoleDescriptor parse(String name, BytesReference source, boolean allow2xFormat, XContentType xContentType) throws IOException { + return parse(name, source, allow2xFormat, xContentType, true); + } + + public static RoleDescriptor parse( + String name, + BytesReference source, + boolean allow2xFormat, + XContentType xContentType, + boolean allowRestriction + ) throws IOException { assert name != null; // EMPTY is safe here because we never use namedObject try ( @@ -388,16 +443,26 @@ public static RoleDescriptor parse(String name, BytesReference source, boolean a XContentParser parser = xContentType.xContent() .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) ) { - return parse(name, parser, allow2xFormat); + return parse(name, parser, allow2xFormat, allowRestriction); } } public static RoleDescriptor parse(String name, XContentParser parser, boolean allow2xFormat) throws IOException { - return parse(name, parser, allow2xFormat, TcpTransport.isUntrustedRemoteClusterEnabled()); + return parse(name, parser, allow2xFormat, TcpTransport.isUntrustedRemoteClusterEnabled(), true); } - static RoleDescriptor parse(String name, XContentParser parser, boolean allow2xFormat, boolean untrustedRemoteClusterEnabled) + public static RoleDescriptor parse(String name, XContentParser parser, boolean allow2xFormat, boolean allowRestriction) throws IOException { + return parse(name, parser, allow2xFormat, TcpTransport.isUntrustedRemoteClusterEnabled(), allowRestriction); + } + + static RoleDescriptor parse( + String name, + XContentParser parser, + boolean allow2xFormat, + boolean untrustedRemoteClusterEnabled, + boolean allowRestriction + ) throws IOException { // validate name Validation.Error validationError = Validation.Roles.validateRoleName(name, true); if (validationError != null) { @@ -418,6 +483,7 @@ static RoleDescriptor parse(String name, XContentParser parser, boolean allow2xF List configurableClusterPrivileges = Collections.emptyList(); ApplicationResourcePrivileges[] applicationPrivileges = null; String[] runAsUsers = null; + Restriction restriction = null; Map metadata = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -457,6 +523,8 @@ static RoleDescriptor parse(String name, XContentParser parser, boolean allow2xF } else if (untrustedRemoteClusterEnabled && Fields.REMOTE_INDICES.match(currentFieldName, parser.getDeprecationHandler())) { remoteIndicesPrivileges = parseRemoteIndices(name, parser); + } else if (allowRestriction && Fields.RESTRICTION.match(currentFieldName, parser.getDeprecationHandler())) { + restriction = Restriction.parse(name, parser); } else if (Fields.TYPE.match(currentFieldName, parser.getDeprecationHandler())) { // don't need it } else { @@ -476,7 +544,8 @@ static RoleDescriptor parse(String name, XContentParser parser, boolean allow2xF runAsUsers, metadata, null, - remoteIndicesPrivileges + remoteIndicesPrivileges, + restriction ); } @@ -1513,8 +1582,115 @@ public ApplicationResourcePrivileges build() { } return applicationPrivileges; } + } + } + + public static class Restriction implements Writeable, ToXContentObject { + + public static final Restriction NONE = new Restriction((String[]) null); + + private final String[] workflows; + public Restriction(String[] workflows) { + assert workflows == null || workflows.length > 0 : "workflows cannot be an empty array"; + this.workflows = workflows; } + + public Restriction(StreamInput in) throws IOException { + this(in.readOptionalStringArray()); + } + + public boolean hasWorkflows() { + return workflows != null && workflows.length > 0; + } + + public String[] getWorkflows() { + return workflows; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.array(Fields.WORKFLOWS.getPreferredName(), workflows); + return builder.endObject(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalStringArray(workflows); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Restriction that = (Restriction) o; + return Arrays.equals(workflows, that.workflows); + } + + @Override + public int hashCode() { + return Arrays.hashCode(workflows); + } + + public boolean isEmpty() { + return workflows == null || workflows.length == 0; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(getClass().getSimpleName()).append("[workflows=[") + .append(Strings.arrayToCommaDelimitedString(workflows)) + .append("]]"); + return sb.toString(); + } + + static Restriction parse(String roleName, XContentParser parser) throws IOException { + // advance to the START_OBJECT token if needed + XContentParser.Token token = parser.currentToken() == null ? parser.nextToken() : parser.currentToken(); + if (token != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException( + "failed to parse restriction for role [{}]. " + + "expected field [{}] value to be an object, but found an element of type [{}]", + roleName, + parser.currentName(), + token + ); + } + String currentFieldName = null; + String[] workflows = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Fields.WORKFLOWS.match(currentFieldName, parser.getDeprecationHandler())) { + workflows = readWorkflowsStringArray(roleName, parser); + } else { + throw new ElasticsearchParseException( + "failed to parse restriction for role [{}]. unexpected field [{}]", + roleName, + currentFieldName + ); + } + } + if (workflows != null && workflows.length <= 0) { + throw new ElasticsearchParseException( + "failed to parse restriction for role [{}]. [{}] cannot be an empty array", + roleName, + Fields.WORKFLOWS + ); + } + return new Restriction(workflows); + } + + private static String[] readWorkflowsStringArray(String roleName, XContentParser parser) throws IOException { + try { + return XContentUtils.readStringArray(parser, false); + } catch (ElasticsearchParseException e) { + // re-wrap in order to add the role name + throw new ElasticsearchParseException("failed to parse restriction for role [{}]. {}", e, roleName, e.getMessage()); + } + } + } public interface Fields { @@ -1539,5 +1715,7 @@ public interface Fields { ParseField METADATA = new ParseField("metadata"); ParseField TRANSIENT_METADATA = new ParseField("transient_metadata"); ParseField TYPE = new ParseField("type"); + ParseField RESTRICTION = new ParseField("restriction"); + ParseField WORKFLOWS = new ParseField("workflows"); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java index 0cf436e12da55..f20c112091737 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java @@ -30,6 +30,7 @@ import org.elasticsearch.action.datastreams.PromoteDataStreamAction; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction; import org.elasticsearch.action.search.SearchShardsAction; +import org.elasticsearch.cluster.metadata.DataLifecycle; import org.elasticsearch.common.Strings; import org.elasticsearch.index.seqno.RetentionLeaseActions; import org.elasticsearch.transport.TcpTransport; @@ -118,6 +119,8 @@ public final class IndexPrivilege extends Privilege { ValidateQueryAction.NAME + "*", GetSettingsAction.NAME, ExplainLifecycleAction.NAME, + "indices:admin/dlm/get", + "indices:admin/dlm/explain", GetDataStreamAction.NAME, ResolveIndexAction.NAME, FieldCapabilitiesAction.NAME + "*", @@ -133,6 +136,7 @@ public final class IndexPrivilege extends Privilege { ); private static final Automaton MANAGE_LEADER_INDEX_AUTOMATON = patterns(ForgetFollowerAction.NAME + "*"); private static final Automaton MANAGE_ILM_AUTOMATON = patterns("indices:admin/ilm/*"); + private static final Automaton MANAGE_DLM_AUTOMATON = patterns("indices:admin/dlm/*"); private static final Automaton MAINTENANCE_AUTOMATON = patterns( "indices:admin/refresh*", "indices:admin/flush*", @@ -173,6 +177,7 @@ public final class IndexPrivilege extends Privilege { public static final IndexPrivilege MANAGE_FOLLOW_INDEX = new IndexPrivilege("manage_follow_index", MANAGE_FOLLOW_INDEX_AUTOMATON); public static final IndexPrivilege MANAGE_LEADER_INDEX = new IndexPrivilege("manage_leader_index", MANAGE_LEADER_INDEX_AUTOMATON); public static final IndexPrivilege MANAGE_ILM = new IndexPrivilege("manage_ilm", MANAGE_ILM_AUTOMATON); + public static final IndexPrivilege MANAGE_DLM = new IndexPrivilege("manage_dlm", MANAGE_DLM_AUTOMATON); public static final IndexPrivilege MAINTENANCE = new IndexPrivilege("maintenance", MAINTENANCE_AUTOMATON); public static final IndexPrivilege AUTO_CONFIGURE = new IndexPrivilege("auto_configure", AUTO_CONFIGURE_AUTOMATON); public static final IndexPrivilege CROSS_CLUSTER_REPLICATION = new IndexPrivilege( @@ -204,6 +209,7 @@ public final class IndexPrivilege extends Privilege { entry("manage_follow_index", MANAGE_FOLLOW_INDEX), entry("manage_leader_index", MANAGE_LEADER_INDEX), entry("manage_ilm", MANAGE_ILM), + DataLifecycle.isEnabled() ? entry("manage_dlm", MANAGE_DLM) : null, entry("maintenance", MAINTENANCE), entry("auto_configure", AUTO_CONFIGURE), TcpTransport.isUntrustedRemoteClusterEnabled() ? entry("cross_cluster_replication", CROSS_CLUSTER_REPLICATION) : null, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index d97ce66a292b0..d6a941a8bffd8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -97,7 +97,8 @@ public class ReservedRolesStore implements BiConsumer, ActionListene .build(), "*" ) } - : null + : null, + null ); private static final Map RESERVED_ROLES = initializeReservedRoles(); @@ -163,7 +164,8 @@ private static Map initializeReservedRoles() { getRemoteIndicesReadPrivileges(".monitoring-*"), getRemoteIndicesReadPrivileges("/metrics-(beats|elasticsearch|enterprisesearch|kibana|logstash).*/"), getRemoteIndicesReadPrivileges("metricbeat-*") } - : null + : null, + null ) ) .put( @@ -938,7 +940,8 @@ public static RoleDescriptor kibanaSystemRoleDescriptor(String name) { getRemoteIndicesReadPrivileges("metrics-apm.*"), getRemoteIndicesReadPrivileges("traces-apm.*"), getRemoteIndicesReadPrivileges("traces-apm-*") } - : null + : null, + null ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/CrossClusterAccessUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/CrossClusterAccessUser.java index b238387e44b4c..78973f745b243 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/CrossClusterAccessUser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/CrossClusterAccessUser.java @@ -35,6 +35,7 @@ public class CrossClusterAccessUser extends InternalUser { null, null, null, + null, null ); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUsers.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUsers.java index 6f637e0d55be0..00f9cabd4b0df 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUsers.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUsers.java @@ -7,7 +7,10 @@ package org.elasticsearch.xpack.core.security.user; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; +import org.elasticsearch.action.admin.indices.rollover.RolloverAction; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.support.MetadataUtils; @@ -122,6 +125,48 @@ public class InternalUsers { ) ); + /** + * Internal user that manages DLM. Has all indices permissions to perform DLM runtime tasks. + */ + public static final InternalUser DLM_USER = new InternalUser( + UsernamesField.DLM_NAME, + new RoleDescriptor( + UsernamesField.DLM_ROLE, + new String[] {}, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder() + .indices("*") + .privileges( + "delete_index", + RolloverAction.NAME, + ForceMergeAction.NAME + "*", + // indices stats is used by rollover, so we need to grant it here + IndicesStatsAction.NAME + "*" + ) + .allowRestrictedIndices(false) + .build(), + RoleDescriptor.IndicesPrivileges.builder() + .indices( + // System data stream for result history of fleet actions (see Fleet#fleetActionsResultsDescriptor) + ".fleet-actions-results" + ) + .privileges( + "delete_index", + RolloverAction.NAME, + ForceMergeAction.NAME + "*", + // indices stats is used by rollover, so we need to grant it here + IndicesStatsAction.NAME + "*" + ) + .allowRestrictedIndices(true) + .build() }, + null, + null, + new String[] {}, + MetadataUtils.DEFAULT_RESERVED_METADATA, + Map.of() + ) + ); + /** * internal user that manages synonyms via the Synonyms API. Operates on the synonyms system index */ @@ -154,6 +199,7 @@ public class InternalUsers { ASYNC_SEARCH_USER, CROSS_CLUSTER_ACCESS_USER, STORAGE_USER, + DLM_USER, SYNONYMS_USER ).collect(Collectors.toUnmodifiableMap(InternalUser::principal, Function.identity())); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/UsernamesField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/UsernamesField.java index 5c5ddcf4391d7..4e4d2f91980fa 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/UsernamesField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/UsernamesField.java @@ -16,6 +16,8 @@ public final class UsernamesField { public static final String SYSTEM_ROLE = "_system"; public static final String XPACK_SECURITY_NAME = "_xpack_security"; public static final String XPACK_SECURITY_ROLE = "_xpack_security"; + public static final String DLM_NAME = "_dlm"; + public static final String DLM_ROLE = "_dlm"; public static final String CROSS_CLUSTER_ACCESS_NAME = "_cross_cluster_access"; public static final String CROSS_CLUSTER_ACCESS_ROLE = "_cross_cluster_access"; public static final String SECURITY_PROFILE_NAME = "_security_profile"; diff --git a/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/profiler/component-template/profiling-executables.json b/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/profiler/component-template/profiling-executables.json index 2fc9774200f47..ca41ac433443d 100644 --- a/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/profiler/component-template/profiling-executables.json +++ b/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/profiler/component-template/profiling-executables.json @@ -28,10 +28,10 @@ "type": "date", "format": "epoch_second" }, - "Symbolization.lastprocessed": { + "Symbolization.next_time": { "type": "date", "format": "epoch_second", - "index": false + "index": true } } } diff --git a/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/profiler/component-template/profiling-metrics.json b/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/profiler/component-template/profiling-metrics.json index 2a4895b2fea0f..c279b0d327b3e 100644 --- a/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/profiler/component-template/profiling-metrics.json +++ b/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/profiler/component-template/profiling-metrics.json @@ -33,9 +33,6 @@ "@timestamp": { "type": "date", "format": "epoch_second" - }, - "Elasticsearch.cluster.id": { - "type": "keyword" } } } diff --git a/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/profiler/component-template/profiling-stackframes.json b/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/profiler/component-template/profiling-stackframes.json index a238c7805b549..5f0d933913ad7 100644 --- a/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/profiler/component-template/profiling-stackframes.json +++ b/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/profiler/component-template/profiling-stackframes.json @@ -9,6 +9,13 @@ } }, "mappings": { + /* + For the inline chain the profiling-stackframes index needs '_source' to be enabled. + Also, doc_values for the fields below are disabled to not store the values twice. + Using synthetic source reduces storage size by ~50% but requires "concatenation" + of arrays and adds latency when _source is reconstructed at query time. + This last point is why we don't want to use synthetic source right now. + */ "_source": { "enabled": true }, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotShardTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotShardTests.java index 48ea20bfbfcb0..94ee54ef5a4b1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotShardTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotShardTests.java @@ -70,6 +70,7 @@ import org.elasticsearch.repositories.ShardGeneration; import org.elasticsearch.repositories.ShardGenerations; import org.elasticsearch.repositories.ShardSnapshotResult; +import org.elasticsearch.repositories.SnapshotIndexCommit; import org.elasticsearch.repositories.SnapshotShardContext; import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; @@ -130,7 +131,7 @@ public void testSourceIncomplete() throws IOException { shard.mapperService(), snapshotId, indexId, - snapshotRef, + new SnapshotIndexCommit(snapshotRef), null, indexShardSnapshotStatus, Version.CURRENT, @@ -172,7 +173,7 @@ public void testIncrementalSnapshot() throws IOException { shard.mapperService(), snapshotId, indexId, - snapshotRef, + new SnapshotIndexCommit(snapshotRef), null, indexShardSnapshotStatus, Version.CURRENT, @@ -203,7 +204,7 @@ public void testIncrementalSnapshot() throws IOException { shard.mapperService(), snapshotId, indexId, - snapshotRef, + new SnapshotIndexCommit(snapshotRef), null, indexShardSnapshotStatus, Version.CURRENT, @@ -234,7 +235,7 @@ public void testIncrementalSnapshot() throws IOException { shard.mapperService(), snapshotId, indexId, - snapshotRef, + new SnapshotIndexCommit(snapshotRef), null, indexShardSnapshotStatus, Version.CURRENT, @@ -258,7 +259,7 @@ private String randomDoc() { return "{ \"value\" : \"" + randomAlphaOfLength(10) + "\"}"; } - public void testRestoreMinmal() throws IOException { + public void testRestoreMinimal() throws IOException { IndexShard shard = newStartedShard(true); int numInitialDocs = randomIntBetween(10, 100); for (int i = 0; i < numInitialDocs; i++) { @@ -295,7 +296,7 @@ public void testRestoreMinmal() throws IOException { shard.mapperService(), snapshotId, indexId, - snapshotRef, + new SnapshotIndexCommit(snapshotRef), null, indexShardSnapshotStatus, Version.CURRENT, @@ -370,7 +371,7 @@ public void onFailure(Exception e) { metadata, null, SourceOnlySnapshotRepository.getEngineFactory(), - () -> {}, + NOOP_GCP_SYNCER, RetentionLeaseSyncer.EMPTY ); DiscoveryNode discoveryNode = TestDiscoveryNode.create("node_g"); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datastreams/DataLifecycleFeatureSetUsageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datastreams/DataLifecycleFeatureSetUsageTests.java new file mode 100644 index 0000000000000..19d27fc038fa8 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datastreams/DataLifecycleFeatureSetUsageTests.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.datastreams; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; + +public class DataLifecycleFeatureSetUsageTests extends AbstractWireSerializingTestCase { + + @Override + protected DataLifecycleFeatureSetUsage createTestInstance() { + return new DataLifecycleFeatureSetUsage( + new DataLifecycleFeatureSetUsage.LifecycleStats( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomDouble(), + randomBoolean() + ) + ); + } + + @Override + protected DataLifecycleFeatureSetUsage mutateInstance(DataLifecycleFeatureSetUsage instance) { + return switch (randomInt(4)) { + case 0 -> new DataLifecycleFeatureSetUsage( + new DataLifecycleFeatureSetUsage.LifecycleStats( + randomValueOtherThan(instance.lifecycleStats.dataStreamsWithLifecyclesCount, ESTestCase::randomLong), + instance.lifecycleStats.minRetentionMillis, + instance.lifecycleStats.maxRetentionMillis, + instance.lifecycleStats.averageRetentionMillis, + instance.lifecycleStats.defaultRolloverUsed + ) + ); + case 1 -> new DataLifecycleFeatureSetUsage( + new DataLifecycleFeatureSetUsage.LifecycleStats( + instance.lifecycleStats.dataStreamsWithLifecyclesCount, + randomValueOtherThan(instance.lifecycleStats.minRetentionMillis, ESTestCase::randomLong), + instance.lifecycleStats.maxRetentionMillis, + instance.lifecycleStats.averageRetentionMillis, + instance.lifecycleStats.defaultRolloverUsed + ) + ); + case 2 -> new DataLifecycleFeatureSetUsage( + new DataLifecycleFeatureSetUsage.LifecycleStats( + instance.lifecycleStats.dataStreamsWithLifecyclesCount, + instance.lifecycleStats.minRetentionMillis, + randomValueOtherThan(instance.lifecycleStats.maxRetentionMillis, ESTestCase::randomLong), + instance.lifecycleStats.averageRetentionMillis, + instance.lifecycleStats.defaultRolloverUsed + ) + ); + case 3 -> new DataLifecycleFeatureSetUsage( + new DataLifecycleFeatureSetUsage.LifecycleStats( + instance.lifecycleStats.dataStreamsWithLifecyclesCount, + instance.lifecycleStats.minRetentionMillis, + instance.lifecycleStats.maxRetentionMillis, + randomValueOtherThan(instance.lifecycleStats.averageRetentionMillis, ESTestCase::randomDouble), + instance.lifecycleStats.defaultRolloverUsed + ) + ); + case 4 -> new DataLifecycleFeatureSetUsage( + new DataLifecycleFeatureSetUsage.LifecycleStats( + instance.lifecycleStats.dataStreamsWithLifecyclesCount, + instance.lifecycleStats.minRetentionMillis, + instance.lifecycleStats.maxRetentionMillis, + instance.lifecycleStats.averageRetentionMillis, + instance.lifecycleStats.defaultRolloverUsed == false + ) + ); + default -> throw new RuntimeException("unreachable"); + }; + } + + @Override + protected Writeable.Reader instanceReader() { + return DataLifecycleFeatureSetUsage::new; + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationTestHelper.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationTestHelper.java index b147f574a14c5..a60b566a1e685 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationTestHelper.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationTestHelper.java @@ -240,7 +240,8 @@ public static String randomInternalRoleName() { UsernamesField.ASYNC_SEARCH_ROLE, UsernamesField.XPACK_SECURITY_ROLE, UsernamesField.SECURITY_PROFILE_ROLE, - UsernamesField.CROSS_CLUSTER_ACCESS_ROLE + UsernamesField.CROSS_CLUSTER_ACCESS_ROLE, + UsernamesField.DLM_ROLE ); } @@ -312,6 +313,7 @@ public static CrossClusterAccessSubjectInfo randomCrossClusterAccessSubjectInfo( null, null, null, + null, null ) ) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTests.java index a851eb6ee3c87..ac910e93c0519 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTests.java @@ -48,6 +48,7 @@ import static org.elasticsearch.transport.RemoteClusterPortSettings.TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY_CCS; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.security.authz.RoleDescriptor.WORKFLOWS_RESTRICTION_VERSION; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -145,13 +146,13 @@ public void testToString() { + ", indicesPrivileges=[IndicesPrivileges[indices=[i1,i2], allowRestrictedIndices=[false], privileges=[read]" + ", field_security=[grant=[body,title], except=null], query={\"match_all\": {}}],]" + ", applicationPrivileges=[ApplicationResourcePrivileges[application=my_app, privileges=[read,write], resources=[*]],]" - + ", runAs=[sudo], metadata=[{}], remoteIndicesPrivileges=[]]" + + ", runAs=[sudo], metadata=[{}], remoteIndicesPrivileges=[], restriction=Restriction[workflows=[]]]" ) ); } public void testToXContentRoundtrip() throws Exception { - final RoleDescriptor descriptor = randomRoleDescriptor(true, true); + final RoleDescriptor descriptor = randomRoleDescriptor(true, true, true); final XContentType xContentType = randomFrom(XContentType.values()); final BytesReference xContentValue = toShuffledXContent(descriptor, xContentType, ToXContent.EMPTY_PARAMS, false); final RoleDescriptor parsed = RoleDescriptor.parse(descriptor.getName(), xContentValue, false, xContentType); @@ -233,7 +234,10 @@ public void testParse() throws Exception { }, "clusters": ["*"] } - ] + ], + "restriction":{ + "workflows": ["search_application", "search_analytics"] + } }"""; rd = RoleDescriptor.parse("test", new BytesArray(q), false, XContentType.JSON); assertEquals("test", rd.getName()); @@ -244,7 +248,9 @@ public void testParse() throws Exception { assertArrayEquals(new String[] { "r1", "*-*" }, rd.getRemoteIndicesPrivileges()[1].remoteClusters()); assertArrayEquals(new String[] { "*" }, rd.getRemoteIndicesPrivileges()[2].remoteClusters()); assertArrayEquals(new String[] { "m", "n" }, rd.getRunAs()); - + assertThat(rd.hasRestriction(), equalTo(true)); + assertThat(rd.getRestriction().hasWorkflows(), equalTo(true)); + assertArrayEquals(new String[] { "search_application", "search_analytics" }, rd.getRestriction().getWorkflows()); q = """ { "cluster": [ "a", "b" ], @@ -397,6 +403,44 @@ public void testParse() throws Exception { () -> RoleDescriptor.parse("test", new BytesArray(badJson), false, XContentType.JSON) ); assertThat(ex.getMessage(), containsString("not_supported")); + + rd = RoleDescriptor.parse("test_empty_restriction", new BytesArray(""" + { + "index": [{"names": "idx1", "privileges": [ "p1", "p2" ]}], + "restriction":{} + }"""), false, XContentType.JSON); + assertThat(rd.getName(), equalTo("test_empty_restriction")); + assertThat(rd.hasRestriction(), equalTo(false)); + assertThat(rd.hasWorkflowsRestriction(), equalTo(false)); + + final ElasticsearchParseException pex1 = expectThrows( + ElasticsearchParseException.class, + () -> RoleDescriptor.parse("test_null_workflows", new BytesArray(""" + { + "index": [{"names": ["idx1"], "privileges": [ "p1", "p2" ]}], + "restriction":{"workflows":null} + }"""), false, XContentType.JSON) + ); + assertThat( + pex1.getMessage(), + containsString( + "failed to parse restriction for role [test_null_workflows]. could not parse [workflows] field. " + + "expected a string array but found null value instead" + ) + ); + + final ElasticsearchParseException pex2 = expectThrows( + ElasticsearchParseException.class, + () -> RoleDescriptor.parse("test_empty_workflows", new BytesArray(""" + { + "index": [{"names": ["idx1"], "privileges": [ "p1", "p2" ]}], + "restriction":{"workflows":[]} + }"""), false, XContentType.JSON) + ); + assertThat( + pex2.getMessage(), + containsString("failed to parse restriction for role [test_empty_workflows]. [workflows] cannot be an empty array") + ); } public void testParsingFieldPermissionsUsesCache() throws IOException { @@ -446,11 +490,12 @@ public void testParsingFieldPermissionsUsesCache() throws IOException { public void testSerializationForCurrentVersion() throws Exception { final TransportVersion version = TransportVersionUtils.randomCompatibleVersion(random()); final boolean canIncludeRemoteIndices = version.onOrAfter(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY_CCS); + final boolean canIncludeWorkflows = version.onOrAfter(WORKFLOWS_RESTRICTION_VERSION); logger.info("Testing serialization with version {}", version); BytesStreamOutput output = new BytesStreamOutput(); output.setTransportVersion(version); - final RoleDescriptor descriptor = randomRoleDescriptor(true, canIncludeRemoteIndices); + final RoleDescriptor descriptor = randomRoleDescriptor(true, canIncludeRemoteIndices, canIncludeWorkflows); descriptor.writeTo(output); final NamedWriteableRegistry registry = new NamedWriteableRegistry(new XPackClientPlugin().getNamedWriteables()); StreamInput streamInput = new NamedWriteableAwareStreamInput( @@ -475,7 +520,7 @@ public void testSerializationWithRemoteIndicesThrowsOnUnsupportedVersions() thro final BytesStreamOutput output = new BytesStreamOutput(); output.setTransportVersion(version); - final RoleDescriptor descriptor = randomRoleDescriptor(true, true); + final RoleDescriptor descriptor = randomRoleDescriptor(true, true, false); descriptor.writeTo(output); final NamedWriteableRegistry registry = new NamedWriteableRegistry(new XPackClientPlugin().getNamedWriteables()); StreamInput streamInput = new NamedWriteableAwareStreamInput( @@ -497,6 +542,49 @@ public void testSerializationWithRemoteIndicesThrowsOnUnsupportedVersions() thro descriptor.getRunAs(), descriptor.getMetadata(), descriptor.getTransientMetadata(), + null, + descriptor.getRestriction() + ) + ) + ); + } else { + assertThat(descriptor, equalTo(serialized)); + } + } + + public void testSerializationWithWorkflowsRestrictionAndUnsupportedVersions() throws IOException { + final TransportVersion versionBeforeWorkflowsRestriction = TransportVersionUtils.getPreviousVersion(WORKFLOWS_RESTRICTION_VERSION); + final TransportVersion version = TransportVersionUtils.randomVersionBetween( + random(), + TransportVersion.V_7_17_0, + versionBeforeWorkflowsRestriction + ); + final BytesStreamOutput output = new BytesStreamOutput(); + output.setTransportVersion(version); + + final RoleDescriptor descriptor = randomRoleDescriptor(true, false, true); + descriptor.writeTo(output); + final NamedWriteableRegistry registry = new NamedWriteableRegistry(new XPackClientPlugin().getNamedWriteables()); + StreamInput streamInput = new NamedWriteableAwareStreamInput( + ByteBufferStreamInput.wrap(BytesReference.toBytes(output.bytes())), + registry + ); + streamInput.setTransportVersion(version); + final RoleDescriptor serialized = new RoleDescriptor(streamInput); + if (descriptor.hasWorkflowsRestriction()) { + assertThat( + serialized, + equalTo( + new RoleDescriptor( + descriptor.getName(), + descriptor.getClusterPrivileges(), + descriptor.getIndicesPrivileges(), + descriptor.getApplicationPrivileges(), + descriptor.getConditionalClusterPrivileges(), + descriptor.getRunAs(), + descriptor.getMetadata(), + descriptor.getTransientMetadata(), + descriptor.getRemoteIndicesPrivileges(), null ) ) @@ -506,6 +594,51 @@ public void testSerializationWithRemoteIndicesThrowsOnUnsupportedVersions() thro } } + public void testParseRoleWithRestrictionFailsWhenAllowRestrictionIsFalse() { + final String json = """ + { + "restriction": { + "workflows": ["search_application"] + } + }"""; + final ElasticsearchParseException e = expectThrows( + ElasticsearchParseException.class, + () -> RoleDescriptor.parse( + "test_role_with_restriction", + XContentHelper.createParser(XContentParserConfiguration.EMPTY, new BytesArray(json), XContentType.JSON), + randomBoolean(), + randomBoolean(), + false + ) + ); + assertThat( + e, + TestMatchers.throwableWithMessage( + containsString("failed to parse role [test_role_with_restriction]. unexpected field [restriction]") + ) + ); + } + + public void testParseRoleWithRestrictionWhenAllowRestrictionIsTrue() throws IOException { + final String json = """ + { + "restriction": { + "workflows": ["search_application"] + } + }"""; + RoleDescriptor role = RoleDescriptor.parse( + "test_role_with_restriction", + XContentHelper.createParser(XContentParserConfiguration.EMPTY, new BytesArray(json), XContentType.JSON), + randomBoolean(), + randomBoolean(), + true + ); + assertThat(role.getName(), equalTo("test_role_with_restriction")); + assertThat(role.hasRestriction(), equalTo(true)); + assertThat(role.hasWorkflowsRestriction(), equalTo(true)); + assertThat(role.getRestriction().getWorkflows(), arrayContaining("search_application")); + } + public void testParseEmptyQuery() throws Exception { String json = """ { @@ -685,6 +818,7 @@ public void testParseRemoteIndicesPrivilegesFailsWhenUntrustedRemoteClusterEnabl "test", XContentHelper.createParser(XContentParserConfiguration.EMPTY, new BytesArray(json), XContentType.JSON), false, + false, false ) ); @@ -885,7 +1019,8 @@ public void testIsEmpty() { new String[0], new HashMap<>(), new HashMap<>(), - new RoleDescriptor.RemoteIndicesPrivileges[0] + new RoleDescriptor.RemoteIndicesPrivileges[0], + null ).isEmpty() ); @@ -896,6 +1031,7 @@ public void testIsEmpty() { randomBoolean(), randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean() ); @@ -922,7 +1058,8 @@ public void testIsEmpty() { booleans.get(6) ? new RoleDescriptor.RemoteIndicesPrivileges[0] : new RoleDescriptor.RemoteIndicesPrivileges[] { - RoleDescriptor.RemoteIndicesPrivileges.builder("rmt").indices("idx").privileges("foo").build() } + RoleDescriptor.RemoteIndicesPrivileges.builder("rmt").indices("idx").privileges("foo").build() }, + booleans.get(7) ? null : RoleRestrictionTests.randomWorkflowsRestriction(1, 2) ); if (booleans.stream().anyMatch(e -> e.equals(false))) { @@ -934,8 +1071,18 @@ public void testIsEmpty() { public void testHasPrivilegesOtherThanIndex() { assertThat( - new RoleDescriptor("name", null, randomBoolean() ? null : randomIndicesPrivileges(1, 5), null, null, null, null, null, null) - .hasPrivilegesOtherThanIndex(), + new RoleDescriptor( + "name", + null, + randomBoolean() ? null : randomIndicesPrivileges(1, 5), + null, + null, + null, + null, + null, + null, + null + ).hasPrivilegesOtherThanIndex(), is(false) ); final RoleDescriptor roleDescriptor = randomRoleDescriptor(); @@ -959,10 +1106,10 @@ public static RoleDescriptor randomRoleDescriptor() { } public static RoleDescriptor randomRoleDescriptor(boolean allowReservedMetadata) { - return randomRoleDescriptor(allowReservedMetadata, false); + return randomRoleDescriptor(allowReservedMetadata, false, false); } - public static RoleDescriptor randomRoleDescriptor(boolean allowReservedMetadata, boolean allowRemoteIndices) { + public static RoleDescriptor randomRoleDescriptor(boolean allowReservedMetadata, boolean allowRemoteIndices, boolean allowWorkflows) { final RoleDescriptor.RemoteIndicesPrivileges[] remoteIndexPrivileges; if (false == allowRemoteIndices || randomBoolean()) { remoteIndexPrivileges = null; @@ -979,7 +1126,8 @@ public static RoleDescriptor randomRoleDescriptor(boolean allowReservedMetadata, generateRandomStringArray(5, randomIntBetween(2, 8), false, true), randomRoleDescriptorMetadata(allowReservedMetadata), Map.of(), - remoteIndexPrivileges + remoteIndexPrivileges, + allowWorkflows ? RoleRestrictionTests.randomWorkflowsRestriction(1, 3) : null ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleRestrictionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleRestrictionTests.java new file mode 100644 index 0000000000000..c2c3049d74c3b --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleRestrictionTests.java @@ -0,0 +1,114 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.authz; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.Restriction; + +import java.io.IOException; + +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class RoleRestrictionTests extends ESTestCase { + + public void testParse() throws Exception { + final String json = """ + { + "workflows": ["search_application", "search_analytics"] + } + """; + Restriction r = Restriction.parse("test_restriction", createJsonParser(json)); + assertThat(r.getWorkflows(), arrayContaining("search_application", "search_analytics")); + assertThat(r.hasWorkflows(), equalTo(true)); + assertThat(r.isEmpty(), equalTo(false)); + + // tests that "restriction": {} is allowed + r = Restriction.parse("test_restriction", createJsonParser("{}")); + assertThat(r.hasWorkflows(), equalTo(false)); + assertThat(r.isEmpty(), equalTo(true)); + + var e = expectThrows(ElasticsearchParseException.class, () -> Restriction.parse("test_restriction", createJsonParser(""" + { + "workflows": [] + } + """))); + assertThat( + e.getMessage(), + containsString("failed to parse restriction for role [test_restriction]. [workflows] cannot be an empty array") + ); + + e = expectThrows(ElasticsearchParseException.class, () -> Restriction.parse("test_restriction", createJsonParser(""" + { + "workflows": null + } + """))); + assertThat( + e.getMessage(), + containsString( + "failed to parse restriction for role [test_restriction]. could not parse [workflows] field. " + + "expected a string array but found null value instead" + ) + ); + } + + public void testToXContent() throws Exception { + final Restriction restriction = randomWorkflowsRestriction(1, 5); + final XContentType xContentType = randomFrom(XContentType.values()); + final BytesReference xContentValue = toShuffledXContent(restriction, xContentType, ToXContent.EMPTY_PARAMS, false); + final XContentParser parser = xContentType.xContent().createParser(XContentParserConfiguration.EMPTY, xContentValue.streamInput()); + final Restriction parsed = Restriction.parse(randomAlphaOfLengthBetween(3, 6), parser); + assertThat(parsed, equalTo(restriction)); + } + + public void testSerialization() throws IOException { + final BytesStreamOutput out = new BytesStreamOutput(); + final Restriction original = randomWorkflowsRestriction(1, 3); + original.writeTo(out); + + StreamInput in = out.bytes().streamInput(); + final Restriction actual = new Restriction(in); + + assertThat(actual, equalTo(original)); + } + + public void testIsEmpty() { + String[] workflows = null; + Restriction r = new Restriction(workflows); + assertThat(r.isEmpty(), equalTo(true)); + assertThat(r.hasWorkflows(), equalTo(false)); + + workflows = randomWorkflowNames(1, 2); + r = new Restriction(workflows); + assertThat(r.isEmpty(), equalTo(false)); + assertThat(r.hasWorkflows(), equalTo(true)); + } + + private static XContentParser createJsonParser(String json) throws IOException { + return XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, new BytesArray(json).streamInput()); + } + + public static Restriction randomWorkflowsRestriction(int min, int max) { + return new Restriction(randomWorkflowNames(min, max)); + } + + public static String[] randomWorkflowNames(int min, int max) { + // TODO: Change this to use actual workflow names instead of random ones. + return randomArray(min, max, String[]::new, () -> randomAlphaOfLengthBetween(3, 6)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java index 1a060a82b693d..d095045c19238 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateAction; +import org.elasticsearch.cluster.metadata.DataLifecycle; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.TcpTransport; @@ -465,6 +466,35 @@ public void testSlmPrivileges() { } } + public void testDlmPrivileges() { + assumeTrue("feature flag required", DataLifecycle.isEnabled()); + { + Predicate predicate = IndexPrivilege.MANAGE_DLM.predicate(); + // check indices actions + assertThat(predicate.test("indices:admin/dlm/explain"), is(true)); + assertThat(predicate.test("indices:admin/dlm/get"), is(true)); + assertThat(predicate.test("indices:admin/dlm/delete"), is(true)); + assertThat(predicate.test("indices:admin/dlm/put"), is(true)); + assertThat(predicate.test("indices:admin/dlm/brand_new_api"), is(true)); + assertThat(predicate.test("indices:admin/dlm/brand_new_api"), is(true)); + // check non-dlm action + assertThat(predicate.test("indices:admin/whatever"), is(false)); + } + + { + Predicate predicate = IndexPrivilege.VIEW_METADATA.predicate(); + // check indices actions + assertThat(predicate.test("indices:admin/dlm/explain"), is(true)); + assertThat(predicate.test("indices:admin/dlm/get"), is(true)); + assertThat(predicate.test("indices:admin/dlm/delete"), is(false)); + assertThat(predicate.test("indices:admin/dlm/put"), is(false)); + assertThat(predicate.test("indices:admin/dlm/brand_new_api"), is(false)); + assertThat(predicate.test("indices:admin/dlm/brand_new_api"), is(false)); + // check non-dlm action + assertThat(predicate.test("indices:admin/whatever"), is(false)); + } + } + public void testIngestPipelinePrivileges() { { verifyClusterActionAllowed( diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/test/TestRestrictedIndices.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/test/TestRestrictedIndices.java index 276b315087996..b20c8e8678141 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/test/TestRestrictedIndices.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/test/TestRestrictedIndices.java @@ -9,11 +9,13 @@ package org.elasticsearch.xpack.core.security.test; import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.indices.ExecutorNames; +import org.elasticsearch.indices.SystemDataStreamDescriptor; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.indices.SystemIndices.Feature; @@ -27,6 +29,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Set; import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; @@ -100,6 +103,25 @@ public class TestRestrictedIndices { new SystemIndexDescriptor(".fleet-policies-leader*", "fleet policies leader"), new SystemIndexDescriptor(".fleet-servers*", "fleet servers"), new SystemIndexDescriptor(".fleet-artifacts*", "fleet artifacts") + ), + List.of( + new SystemDataStreamDescriptor( + ".fleet-actions-results", + "fleet actions results", + SystemDataStreamDescriptor.Type.EXTERNAL, + new ComposableIndexTemplate( + List.of(".fleet-actions-results"), + null, + null, + null, + null, + null, + new ComposableIndexTemplate.DataStreamTemplate() + ), + Map.of(), + List.of("fleet", "kibana"), + null + ) ) ) ); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/InternalUsersTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/InternalUsersTests.java index 07bce6c70a488..bb56eab276b2f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/InternalUsersTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/InternalUsersTests.java @@ -17,12 +17,16 @@ import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptAction; import org.elasticsearch.action.admin.indices.create.CreateIndexAction; import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.TransportUnpromotableShardRefreshAction; +import org.elasticsearch.action.admin.indices.rollover.RolloverAction; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.get.GetAction; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; @@ -33,11 +37,13 @@ import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationTestHelper; import org.elasticsearch.xpack.core.security.authz.permission.ApplicationPermission; +import org.elasticsearch.xpack.core.security.authz.permission.ClusterPermission; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache; import org.elasticsearch.xpack.core.security.authz.permission.RemoteIndicesPermission; import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.authz.permission.RunAsPermission; import org.elasticsearch.xpack.core.security.authz.permission.SimpleRole; +import org.elasticsearch.xpack.core.security.support.MetadataUtils; import org.elasticsearch.xpack.core.security.test.TestRestrictedIndices; import java.util.List; @@ -46,6 +52,8 @@ import static org.elasticsearch.xpack.core.security.test.TestRestrictedIndices.INTERNAL_SECURITY_TOKENS_INDEX_7; import static org.elasticsearch.xpack.core.security.test.TestRestrictedIndices.SECURITY_MAIN_ALIAS; import static org.elasticsearch.xpack.core.security.test.TestRestrictedIndices.SECURITY_TOKENS_ALIAS; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.mock; @@ -210,6 +218,54 @@ public void testStorageUser() { checkIndexAccess(role, randomFrom(sampleDeniedActions), INTERNAL_SECURITY_MAIN_INDEX_7, false); } + public void testDlmUser() { + assertThat(InternalUsers.getUser("_dlm"), is(InternalUsers.DLM_USER)); + assertThat( + InternalUsers.DLM_USER.getLocalClusterRoleDescriptor().get().getMetadata(), + equalTo(MetadataUtils.DEFAULT_RESERVED_METADATA) + ); + + final SimpleRole role = getLocalClusterRole(InternalUsers.DLM_USER); + + assertThat(role.cluster(), is(ClusterPermission.NONE)); + assertThat(role.runAs(), is(RunAsPermission.NONE)); + assertThat(role.application(), is(ApplicationPermission.NONE)); + assertThat(role.remoteIndices(), is(RemoteIndicesPermission.NONE)); + + final String allowedSystemDataStream = ".fleet-actions-results"; + for (var group : role.indices().groups()) { + if (group.allowRestrictedIndices()) { + assertThat(group.indices(), arrayContaining(allowedSystemDataStream)); + } + } + + final List sampleIndexActions = List.of( + RolloverAction.NAME, + DeleteIndexAction.NAME, + ForceMergeAction.NAME, + IndicesStatsAction.NAME + ); + final String dataStream = randomAlphaOfLengthBetween(3, 12); + checkIndexAccess(role, randomFrom(sampleIndexActions), dataStream, true); + // Also check backing index access + checkIndexAccess( + role, + randomFrom(sampleIndexActions), + DataStream.BACKING_INDEX_PREFIX + dataStream + randomAlphaOfLengthBetween(4, 8), + true + ); + + checkIndexAccess(role, randomFrom(sampleIndexActions), allowedSystemDataStream, true); + checkIndexAccess( + role, + randomFrom(sampleIndexActions), + DataStream.BACKING_INDEX_PREFIX + allowedSystemDataStream + randomAlphaOfLengthBetween(4, 8), + true + ); + + checkIndexAccess(role, randomFrom(sampleIndexActions), randomFrom(TestRestrictedIndices.SAMPLE_RESTRICTED_NAMES), false); + } + public void testRegularUser() { var username = randomAlphaOfLengthBetween(4, 12); expectThrows(IllegalStateException.class, () -> InternalUsers.getUser(username)); diff --git a/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml b/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml index 7dd280daee35b..be77cd4ec11c5 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml +++ b/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml @@ -376,3 +376,32 @@ setup: }] - length: { aggregations.test.buckets: 0 } + +--- +"Decay": + - skip: + features: close_to + version: " - 8.8.99" + reason: "decay functions not supported for unsigned_long" + + - do: + search: + index: test1 + body: + size: 10 + query: + function_score: + functions: [{ + "linear": { + "ul": { + "scale": 18000000000000000000.0, + "origin": 12000000000000000000.0 + } + } + }] + + - close_to: { hits.hits.0._score: { value: 0.9228715, error: 0.001 } } + - close_to: { hits.hits.1._score: { value: 0.9228715, error: 0.001 } } + - close_to: { hits.hits.2._score: { value: 0.8209238, error: 0.001 } } + - close_to: { hits.hits.3._score: { value: 0.8209238, error: 0.001 } } + - close_to: { hits.hits.4._score: { value: 0.6666667, error: 0.001 } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java index d7db947c06573..5128eda6e9b1e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.document.DocumentField; @@ -65,7 +64,6 @@ import org.elasticsearch.xpack.ml.process.MlMemoryTracker; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashSet; @@ -587,10 +585,6 @@ public boolean test(ClusterState clusterState) { } static Set nodesShuttingDown(final ClusterState state) { - return NodesShutdownMetadata.getShutdowns(state) - .map(NodesShutdownMetadata::getAllNodeMetadataMap) - .map(Map::keySet) - .orElse(Collections.emptySet()); + return state.metadata().nodeShutdowns().getAllNodeIds(); } - } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java index 872ba15c3d6c0..6f28570aa7339 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java @@ -939,9 +939,6 @@ static boolean haveMlNodesChanged(ClusterChangedEvent event, TrainedModelAssignm * Returns the set of nodes that are currently shutting down */ static Set nodesShuttingDown(final ClusterState state) { - return NodesShutdownMetadata.getShutdowns(state) - .map(NodesShutdownMetadata::getAllNodeMetadataMap) - .map(Map::keySet) - .orElse(Collections.emptySet()); + return state.metadata().nodeShutdowns().getAllNodeIds(); } } diff --git a/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java b/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java index 0b51e72846531..967e1b9cc02c4 100644 --- a/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java +++ b/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java @@ -405,9 +405,9 @@ public void disableMonitoring() throws Exception { boolean foundBulkThreads = false; for (final ThreadPoolStats.Stats threadPoolStats : nodeStats.getThreadPool()) { - if (WRITE.equals(threadPoolStats.getName())) { + if (WRITE.equals(threadPoolStats.name())) { foundBulkThreads = true; - assertThat("Still some active _bulk threads!", threadPoolStats.getActive(), equalTo(0)); + assertThat("Still some active _bulk threads!", threadPoolStats.active(), equalTo(0)); break; } } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java index b61112f05b37b..26eba6ffd2c60 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java @@ -41,6 +41,7 @@ public void cleanup() throws Exception { wipeMonitoringIndices(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/96374") public void testMultipleNodes() throws Exception { int nodes = 0; diff --git a/x-pack/plugin/profiler/src/internalClusterTest/java/org/elasticsearch/xpack/profiler/GetProfilingActionIT.java b/x-pack/plugin/profiler/src/internalClusterTest/java/org/elasticsearch/xpack/profiler/GetProfilingActionIT.java index 779c42ea697be..be49453a47c89 100644 --- a/x-pack/plugin/profiler/src/internalClusterTest/java/org/elasticsearch/xpack/profiler/GetProfilingActionIT.java +++ b/x-pack/plugin/profiler/src/internalClusterTest/java/org/elasticsearch/xpack/profiler/GetProfilingActionIT.java @@ -7,8 +7,6 @@ package org.elasticsearch.xpack.profiler; -import org.elasticsearch.rest.RestStatus; - import java.util.List; public class GetProfilingActionIT extends ProfilingTestCase { @@ -20,7 +18,6 @@ protected boolean useOnlyAllEvents() { public void testGetProfilingDataUnfiltered() throws Exception { GetProfilingRequest request = new GetProfilingRequest(1, null); GetProfilingResponse response = client().execute(GetProfilingAction.INSTANCE, request).get(); - assertEquals(RestStatus.OK, response.status()); assertEquals(1, response.getTotalFrames()); assertNotNull(response.getStackTraces()); StackTrace stackTrace = response.getStackTraces().get("QjoLteG7HX3VUUXr-J4kHQ"); diff --git a/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/GetProfilingResponse.java b/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/GetProfilingResponse.java index 228857ba5b121..d8eb1a86762c2 100644 --- a/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/GetProfilingResponse.java +++ b/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/GetProfilingResponse.java @@ -7,22 +7,23 @@ package org.elasticsearch.xpack.profiler; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.StatusToXContentObject; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.Nullable; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; +import java.util.Collections; +import java.util.Iterator; import java.util.Map; import java.util.Objects; +import java.util.function.BiFunction; -import static org.elasticsearch.rest.RestStatus.OK; - -public class GetProfilingResponse extends ActionResponse implements StatusToXContentObject { +public class GetProfilingResponse extends ActionResponse implements ChunkedToXContentObject { @Nullable private final Map stackTraces; @Nullable @@ -139,11 +140,6 @@ public void writeTo(StreamOutput out) throws IOException { } } - @Override - public RestStatus status() { - return error != null ? ExceptionsHelper.status(ExceptionsHelper.unwrapCause(error)) : OK; - } - public Map getStackTraces() { return stackTraces; } @@ -169,36 +165,32 @@ public Exception getError() { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - if (stackTraces != null) { - builder.startObject("stack_traces"); - builder.mapContents(stackTraces); - builder.endObject(); - } - if (stackFrames != null) { - builder.startObject("stack_frames"); - builder.mapContents(stackFrames); - builder.endObject(); - } - if (executables != null) { - builder.startObject("executables"); - builder.mapContents(executables); - builder.endObject(); - } - if (stackTraceEvents != null) { - builder.startObject("stack_trace_events"); - builder.mapContents(stackTraceEvents); - builder.endObject(); - } - builder.field("total_frames", totalFrames); + public Iterator toXContentChunked(ToXContent.Params params) { if (error != null) { - builder.startObject("error"); - ElasticsearchException.generateThrowableXContent(builder, params, error); - builder.endObject(); + return Iterators.concat( + ChunkedToXContentHelper.startObject(), + Iterators.single((b, p) -> ElasticsearchException.generateFailureXContent(b, params, error, true)), + ChunkedToXContentHelper.endObject() + ); + } else { + return Iterators.concat( + ChunkedToXContentHelper.startObject(), + optional("stack_traces", stackTraces, ChunkedToXContentHelper::xContentValuesMap), + optional("stack_frames", stackFrames, ChunkedToXContentHelper::xContentValuesMap), + optional("executables", executables, ChunkedToXContentHelper::map), + optional("stack_trace_events", stackTraceEvents, ChunkedToXContentHelper::map), + Iterators.single((b, p) -> b.field("total_frames", totalFrames)), + ChunkedToXContentHelper.endObject() + ); } - builder.endObject(); - return builder; + } + + private Iterator optional( + String name, + Map values, + BiFunction, Iterator> supplier + ) { + return (values != null) ? supplier.apply(name, values) : Collections.emptyIterator(); } @Override diff --git a/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/GetStatusAction.java b/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/GetStatusAction.java new file mode 100644 index 0000000000000..c75b675a4d3ec --- /dev/null +++ b/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/GetStatusAction.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiler; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +public class GetStatusAction extends ActionType { + public static final GetStatusAction INSTANCE = new GetStatusAction(); + public static final String NAME = "cluster:monitor/profiling/status/get"; + + protected GetStatusAction() { + super(NAME, GetStatusAction.Response::new); + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private boolean profilingEnabled; + private boolean resourceManagementEnabled; + private boolean resourcesCreated; + + public Response(StreamInput in) throws IOException { + super(in); + profilingEnabled = in.readBoolean(); + resourceManagementEnabled = in.readBoolean(); + resourcesCreated = in.readBoolean(); + } + + public Response(boolean profilingEnabled, boolean resourceManagementEnabled, boolean resourcesCreated) { + this.profilingEnabled = profilingEnabled; + this.resourceManagementEnabled = resourceManagementEnabled; + this.resourcesCreated = resourcesCreated; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject("profiling").field("enabled", profilingEnabled).endObject(); + builder.startObject("resource_management").field("enabled", resourceManagementEnabled).endObject(); + builder.startObject("resources").field("created", resourcesCreated).endObject(); + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(profilingEnabled); + out.writeBoolean(resourceManagementEnabled); + out.writeBoolean(resourcesCreated); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return profilingEnabled == response.profilingEnabled + && resourceManagementEnabled == response.resourceManagementEnabled + && resourcesCreated == response.resourcesCreated; + } + + @Override + public int hashCode() { + return Objects.hash(profilingEnabled, resourceManagementEnabled, resourcesCreated); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + } + + public static class Request extends AcknowledgedRequest { + + public Request(StreamInput in) throws IOException { + super(in); + } + + public Request() {} + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } + } +} diff --git a/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/ProfilingPlugin.java b/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/ProfilingPlugin.java index 471a843bc2b95..1cec0b98a66b4 100644 --- a/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/ProfilingPlugin.java +++ b/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/ProfilingPlugin.java @@ -40,13 +40,12 @@ import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.XPackSettings; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.function.Supplier; -import static java.util.Collections.singletonList; - public class ProfilingPlugin extends Plugin implements ActionPlugin { private static final Logger logger = LogManager.getLogger(ProfilingPlugin.class); public static final Setting PROFILING_TEMPLATES_ENABLED = Setting.boolSetting( @@ -117,11 +116,12 @@ public List getRestHandlers( final IndexNameExpressionResolver indexNameExpressionResolver, final Supplier nodesInCluster ) { + List handlers = new ArrayList<>(); + handlers.add(new RestGetStatusAction()); if (enabled) { - return singletonList(new RestGetProfilingAction()); - } else { - return Collections.emptyList(); + handlers.add(new RestGetProfilingAction()); } + return Collections.unmodifiableList(handlers); } @Override @@ -150,7 +150,10 @@ public static ExecutorBuilder responseExecutorBuilder() { @Override public List> getActions() { - return List.of(new ActionHandler<>(GetProfilingAction.INSTANCE, TransportGetProfilingAction.class)); + return List.of( + new ActionHandler<>(GetProfilingAction.INSTANCE, TransportGetProfilingAction.class), + new ActionHandler<>(GetStatusAction.INSTANCE, TransportGetStatusAction.class) + ); } @Override diff --git a/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/RestGetProfilingAction.java b/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/RestGetProfilingAction.java index 3b831f257e710..245292067eb11 100644 --- a/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/RestGetProfilingAction.java +++ b/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/RestGetProfilingAction.java @@ -9,8 +9,9 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestActionListener; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestStatusToXContentListener; +import org.elasticsearch.rest.action.RestChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -30,7 +31,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli request.applyContentParser(getProfilingRequest::parseXContent); return channel -> { - RestStatusToXContentListener listener = new RestStatusToXContentListener<>(channel); + RestActionListener listener = new RestChunkedToXContentListener<>(channel); RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); cancelClient.execute(GetProfilingAction.INSTANCE, getProfilingRequest, listener); }; diff --git a/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/RestGetStatusAction.java b/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/RestGetStatusAction.java new file mode 100644 index 0000000000000..e9f41a0564076 --- /dev/null +++ b/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/RestGetStatusAction.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiler; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; + +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +public class RestGetStatusAction extends BaseRestHandler { + + @Override + public List routes() { + return List.of(new Route(GET, "/_profiling/status")); + } + + @Override + public String getName() { + return "get_profiling_status_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + GetStatusAction.Request request = new GetStatusAction.Request(); + request.timeout(restRequest.paramAsTime("timeout", request.timeout())); + request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + return channel -> client.execute(GetStatusAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/TransportGetStatusAction.java b/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/TransportGetStatusAction.java new file mode 100644 index 0000000000000..5fc6b2aa64cc7 --- /dev/null +++ b/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/TransportGetStatusAction.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiler; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackSettings; + +public class TransportGetStatusAction extends TransportMasterNodeAction { + + @Inject + public TransportGetStatusAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + GetStatusAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + GetStatusAction.Request::new, + indexNameExpressionResolver, + GetStatusAction.Response::new, + ThreadPool.Names.SAME + ); + } + + @Override + protected void masterOperation( + Task task, + GetStatusAction.Request request, + ClusterState state, + ActionListener listener + ) { + boolean pluginEnabled = XPackSettings.PROFILING_ENABLED.get(state.getMetadata().settings()); + boolean resourceManagementEnabled = ProfilingPlugin.PROFILING_TEMPLATES_ENABLED.get(state.getMetadata().settings()); + boolean resourcesCreated = ProfilingIndexTemplateRegistry.areAllTemplatesCreated(state); + listener.onResponse(new GetStatusAction.Response(pluginEnabled, resourceManagementEnabled, resourcesCreated)); + } + + @Override + protected ClusterBlockException checkBlock(GetStatusAction.Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } +} diff --git a/x-pack/plugin/profiler/src/test/java/org/elasticsearch/xpack/profiler/GetProfilingResponseTests.java b/x-pack/plugin/profiler/src/test/java/org/elasticsearch/xpack/profiler/GetProfilingResponseTests.java index d4654d9b689a0..08d298a664a61 100644 --- a/x-pack/plugin/profiler/src/test/java/org/elasticsearch/xpack/profiler/GetProfilingResponseTests.java +++ b/x-pack/plugin/profiler/src/test/java/org/elasticsearch/xpack/profiler/GetProfilingResponseTests.java @@ -8,17 +8,13 @@ package org.elasticsearch.xpack.profiler; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractChunkedSerializingTestCase; import org.elasticsearch.test.AbstractWireSerializingTestCase; import java.util.List; import java.util.Map; -import java.util.function.Supplier; public class GetProfilingResponseTests extends AbstractWireSerializingTestCase { - private T randomNullable(Supplier v) { - return randomBoolean() ? v.get() : null; - } - private T randomNullable(T v) { return randomBoolean() ? v : null; } @@ -60,4 +56,21 @@ protected GetProfilingResponse mutateInstance(GetProfilingResponse instance) { protected Writeable.Reader instanceReader() { return GetProfilingResponse::new; } + + public void testChunking() { + AbstractChunkedSerializingTestCase.assertChunkCount(createTestInstance(), instance -> { + // start, end, total_frames + int chunks = 3; + chunks += size(instance.getExecutables()); + chunks += size(instance.getStackFrames()); + chunks += size(instance.getStackTraces()); + chunks += size(instance.getStackTraceEvents()); + return chunks; + }); + } + + private int size(Map m) { + // if there is a map, we also need to take into account start and end object + return m != null ? 2 + m.size() : 0; + } } diff --git a/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/CappedScoreQuery.java b/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/CappedScoreQuery.java index 614172a7edd73..d9e65c385c610 100644 --- a/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/CappedScoreQuery.java +++ b/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/CappedScoreQuery.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.searchbusinessrules; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BoostQuery; @@ -56,8 +55,8 @@ public void visit(QueryVisitor visitor) { } @Override - public Query rewrite(IndexReader reader) throws IOException { - Query rewritten = query.rewrite(reader); + public Query rewrite(IndexSearcher searcher) throws IOException { + Query rewritten = query.rewrite(searcher); if (rewritten != query) { return new CappedScoreQuery(rewritten, maxScore); @@ -71,7 +70,7 @@ public Query rewrite(IndexReader reader) throws IOException { return new CappedScoreQuery(((BoostQuery) rewritten).getQuery(), maxScore); } - return super.rewrite(reader); + return super.rewrite(searcher); } /** diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocator.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocator.java index 2877e8c9609c2..dcf48861e0212 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocator.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocator.java @@ -235,7 +235,7 @@ private static String getRecoverySourceRestoreUuid(ShardRouting shardRouting, Ro // recovery attempt has completed. It might succeed, but if it doesn't then we replace it with a dummy restore to bypass // the RestoreInProgressAllocationDecider - final RestoreInProgress restoreInProgress = allocation.custom(RestoreInProgress.TYPE); + final RestoreInProgress restoreInProgress = allocation.getClusterState().custom(RestoreInProgress.TYPE); if (restoreInProgress == null) { // no ongoing restores, so this shard definitely completed return RecoverySource.SnapshotRecoverySource.NO_API_RESTORE_UUID; @@ -342,11 +342,9 @@ private boolean isDelayedDueToNodeRestart(RoutingAllocation allocation, ShardRou if (shardRouting.unassignedInfo().isDelayed()) { String lastAllocatedNodeId = shardRouting.unassignedInfo().getLastAllocatedNodeId(); if (lastAllocatedNodeId != null) { - SingleNodeShutdownMetadata nodeShutdownMetadata = allocation.metadata().nodeShutdowns().get(lastAllocatedNodeId); - return nodeShutdownMetadata != null && nodeShutdownMetadata.getType() == SingleNodeShutdownMetadata.Type.RESTART; + return allocation.metadata().nodeShutdowns().contains(lastAllocatedNodeId, SingleNodeShutdownMetadata.Type.RESTART); } } - return false; } diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java index e7d2abf61ef8c..093c80b1a9f9b 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java @@ -254,8 +254,8 @@ protected static SearchableSnapshotRecoveryState createRecoveryState(boolean fin protected static void assertThreadPoolNotBusy(ThreadPool threadPool) throws Exception { assertBusy(() -> { for (ThreadPoolStats.Stats stat : threadPool.stats()) { - assertEquals(stat.getActive(), 0); - assertEquals(stat.getQueue(), 0); + assertEquals(stat.active(), 0); + assertEquals(stat.queue(), 0); } }, 30L, TimeUnit.SECONDS); } diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryStatsTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryStatsTests.java index 9b72cd22e7ac6..994de7568d5f1 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryStatsTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryStatsTests.java @@ -644,7 +644,7 @@ private void executeTestCase( Version.CURRENT.luceneVersion().toString() ); final List files = List.of(new FileInfo(blobName, metadata, ByteSizeValue.ofBytes(fileContent.length))); - final BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot(snapshotId.getName(), 0L, files, 0L, 0L, 0, 0L); + final BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot(snapshotId.getName(), files, 0L, 0L, 0, 0L); final Path shardDir = randomShardPath(shardId); final ShardPath shardPath = new ShardPath(false, shardDir, shardDir, shardId); final Path cacheDir = Files.createDirectories(resolveSnapshotCache(shardDir).resolve(snapshotId.getUUID())); diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryTests.java index c8a8e9c805ebe..72f86496f249f 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryTests.java @@ -82,6 +82,7 @@ import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.ShardSnapshotResult; +import org.elasticsearch.repositories.SnapshotIndexCommit; import org.elasticsearch.repositories.SnapshotShardContext; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; @@ -627,7 +628,7 @@ protected void assertSnapshotOrGenericThread() { null, snapshotId, indexId, - new Engine.IndexCommitRef(indexCommit, () -> {}), + new SnapshotIndexCommit(new Engine.IndexCommitRef(indexCommit, () -> {})), null, snapshotStatus, Version.CURRENT, @@ -739,7 +740,7 @@ public void testClearCache() throws Exception { ); } - final BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot("_snapshot", 0L, randomFiles, 0L, 0L, 0, 0L); + final BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot("_snapshot", randomFiles, 0L, 0L, 0, 0L); final BlobContainer blobContainer = new FsBlobContainer( new FsBlobStore(randomIntBetween(1, 8) * 1024, shardSnapshotDir, true), BlobPath.EMPTY, diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/CachedBlobContainerIndexInputTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/CachedBlobContainerIndexInputTests.java index 461cf90d0b37c..e960f1fd1891c 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/CachedBlobContainerIndexInputTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/CachedBlobContainerIndexInputTests.java @@ -82,7 +82,6 @@ public void testRandomReads() throws Exception { final BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot( snapshotId.getName(), - 0L, List.of(new BlobStoreIndexShardSnapshot.FileInfo(blobName, metadata, ByteSizeValue.ofBytes(partSize))), 0L, 0L, @@ -200,7 +199,6 @@ public void testThrowsEOFException() throws Exception { final BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot( snapshotId.getName(), - 0L, List.of(new BlobStoreIndexShardSnapshot.FileInfo(blobName, metadata, ByteSizeValue.ofBytes(input.length + 1))), 0L, 0L, diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java index c833bf72f9d7c..05b67627ed7a1 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java @@ -138,7 +138,7 @@ private class TestSearchableSnapshotDirectory extends SearchableSnapshotDirector ) { super( () -> TestUtils.singleBlobContainer(fileInfo.partName(0), fileData), - () -> new BlobStoreIndexShardSnapshot("_snapshot_id", 0L, List.of(fileInfo), 0L, 0L, 0, 0L), + () -> new BlobStoreIndexShardSnapshot("_snapshot_id", List.of(fileInfo), 0L, 0L, 0, 0L), new TestUtils.SimpleBlobStoreCacheService(), "_repository", snapshotId, diff --git a/x-pack/plugin/security/build.gradle b/x-pack/plugin/security/build.gradle index a170c65550920..f74af8767c0d0 100644 --- a/x-pack/plugin/security/build.gradle +++ b/x-pack/plugin/security/build.gradle @@ -26,6 +26,7 @@ dependencies { testImplementation project(path: ':modules:analysis-common') testImplementation project(path: ':modules:reindex') testImplementation project(':modules:data-streams') + testImplementation project(':modules:dlm') testImplementation project(':modules:rest-root') testImplementation project(":client:rest-high-level") diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityLicensingAndFeatureUsageRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityLicensingAndFeatureUsageRestIT.java index 9883911e5c07c..5b777a59d1069 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityLicensingAndFeatureUsageRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityLicensingAndFeatureUsageRestIT.java @@ -53,7 +53,7 @@ public class RemoteClusterSecurityLicensingAndFeatureUsageRestIT extends Abstrac .name("fulfilling-cluster") .nodes(1) .apply(commonClusterConfig) - .setting("xpack.license.self_generated.type", "basic") + .setting("xpack.license.self_generated.type", "trial") .setting("remote_cluster_server.enabled", "true") .setting("remote_cluster.port", "0") .setting("xpack.security.remote_cluster_server.ssl.enabled", "true") @@ -113,7 +113,6 @@ protected void configureRemoteCluster(boolean isProxyMode) throws Exception { } public void testCrossClusterAccessFeatureTrackingAndLicensing() throws Exception { - assertBasicLicense(fulfillingClusterClient); assertBasicLicense(client()); final boolean useProxyMode = randomBoolean(); @@ -167,7 +166,6 @@ public void testCrossClusterAccessFeatureTrackingAndLicensing() throws Exception assertRequestFailsDueToUnsupportedLicense(() -> performRequestWithRemoteSearchUser(searchRequest)); // We start the trial license which supports all features. - startTrialLicense(fulfillingClusterClient); startTrialLicense(client()); // Check that feature is not tracked before we send CCS request. diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index 40c1e99c0191b..f8eac68b4a687 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.security.operator; +import org.elasticsearch.cluster.metadata.DataLifecycle; import org.elasticsearch.transport.TcpTransport; import java.util.Objects; @@ -110,7 +111,6 @@ public class Constants { "cluster:admin/xpack/application/search_application/list", "cluster:admin/xpack/application/search_application/put", "cluster:admin/xpack/application/search_application/render_query", - "cluster:admin/xpack/application/search_application/search", "cluster:admin/xpack/ccr/auto_follow_pattern/activate", "cluster:admin/xpack/ccr/auto_follow_pattern/delete", "cluster:admin/xpack/ccr/auto_follow_pattern/get", @@ -291,6 +291,7 @@ public class Constants { "cluster:monitor/nodes/info", "cluster:monitor/nodes/stats", "cluster:monitor/nodes/usage", + "cluster:monitor/profiling/status/get", "cluster:monitor/remote/info", "cluster:monitor/settings", "cluster:monitor/state", @@ -368,6 +369,7 @@ public class Constants { "cluster:monitor/xpack/usage/analytics", "cluster:monitor/xpack/usage/archive", "cluster:monitor/xpack/usage/ccr", + DataLifecycle.isEnabled() ? "cluster:monitor/xpack/usage/data_lifecycle" : null, "cluster:monitor/xpack/usage/data_streams", "cluster:monitor/xpack/usage/data_tiers", "cluster:monitor/xpack/usage/enrich", @@ -412,10 +414,10 @@ public class Constants { "indices:admin/data_stream/migrate", "indices:admin/data_stream/modify", "indices:admin/data_stream/promote", - "indices:admin/dlm/delete", - "indices:admin/dlm/get", - "indices:admin/dlm/put", - "indices:admin/dlm/explain", + DataLifecycle.isEnabled() ? "indices:admin/dlm/delete" : null, + DataLifecycle.isEnabled() ? "indices:admin/dlm/get" : null, + DataLifecycle.isEnabled() ? "indices:admin/dlm/put" : null, + DataLifecycle.isEnabled() ? "indices:admin/dlm/explain" : null, "indices:admin/delete", "indices:admin/flush", "indices:admin/flush[s]", @@ -487,6 +489,7 @@ public class Constants { "indices:data/read/sql/translate", "indices:data/read/sql/async/get", // org.elasticsearch.xpack.core.sql.SqlAsyncActionNames.SQL_ASYNC_GET_RESULT_ACTION_NAME "indices:data/read/tv", + "indices:data/read/xpack/application/search_application/search", "indices:data/read/xpack/ccr/shard_changes", "indices:data/read/xpack/enrich/coordinate_lookups", "indices:data/read/xpack/graph/explore", diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/OperatorPrivilegesIT.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/OperatorPrivilegesIT.java index e941083679cfc..3870438c35bac 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/OperatorPrivilegesIT.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/OperatorPrivilegesIT.java @@ -94,7 +94,6 @@ public void testOperatorUserCanCallNonOperatorOnlyApi() throws IOException { client().performRequest(mainRequest); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/96105") @SuppressWarnings("unchecked") public void testEveryActionIsEitherOperatorOnlyOrNonOperator() throws IOException { final String message = "An action should be declared to be either operator-only in [" diff --git a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityWithBasicLicenseIT.java b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityWithBasicLicenseIT.java index f0ea4b34ba0b2..6c834301f03c8 100644 --- a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityWithBasicLicenseIT.java +++ b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityWithBasicLicenseIT.java @@ -50,6 +50,7 @@ public void testWithBasicLicense() throws Exception { assertUserProfileFeatures(false); checkRemoteIndicesXPackUsage(); + assertFailToCreateAndUpdateCrossClusterApiKeys(); } public void testWithTrialLicense() throws Exception { @@ -80,6 +81,7 @@ public void testWithTrialLicense() throws Exception { assertReadWithApiKey(apiKeyCredentials2, "/index*/_search", true); assertUserProfileFeatures(true); checkRemoteIndicesXPackUsage(); + assertSuccessToCreateAndUpdateCrossClusterApiKeys(); } finally { revertTrial(); assertAuthenticateWithToken(accessToken, false); @@ -95,6 +97,7 @@ public void testWithTrialLicense() throws Exception { assertReadWithApiKey(apiKeyCredentials2, "/index1/_doc/1", false); assertUserProfileFeatures(false); checkRemoteIndicesXPackUsage(); + assertFailToCreateAndUpdateCrossClusterApiKeys(); } } @@ -567,4 +570,54 @@ private void assertUserProfileFeatures(boolean clusterHasTrialLicense) throws IO assertThat(e.getMessage(), containsString("current license is non-compliant for [user-profile-collaboration]")); } } + + private void assertFailToCreateAndUpdateCrossClusterApiKeys() { + if (false == TcpTransport.isUntrustedRemoteClusterEnabled()) { + return; + } + + final Request createRequest = new Request("POST", "/_security/cross_cluster/api_key"); + createRequest.setJsonEntity(""" + { + "name": "cc-key", + "access": { + "search": [ { "names": ["*"] } ] + } + }"""); + final ResponseException e1 = expectThrows(ResponseException.class, () -> adminClient().performRequest(createRequest)); + assertThat(e1.getResponse().getStatusLine().getStatusCode(), equalTo(403)); + assertThat(e1.getMessage(), containsString("current license is non-compliant for [advanced-remote-cluster-security]")); + + final Request updateRequest = new Request("PUT", "/_security/cross_cluster/api_key/" + randomAlphaOfLength(20)); + updateRequest.setJsonEntity(""" + { + "metadata": { } + }"""); + final ResponseException e2 = expectThrows(ResponseException.class, () -> adminClient().performRequest(updateRequest)); + assertThat(e2.getResponse().getStatusLine().getStatusCode(), equalTo(403)); + assertThat(e2.getMessage(), containsString("current license is non-compliant for [advanced-remote-cluster-security]")); + } + + private void assertSuccessToCreateAndUpdateCrossClusterApiKeys() throws IOException { + if (false == TcpTransport.isUntrustedRemoteClusterEnabled()) { + return; + } + + final Request createRequest = new Request("POST", "/_security/cross_cluster/api_key"); + createRequest.setJsonEntity(""" + { + "name": "cc-key", + "access": { + "search": [ { "names": ["*"] } ] + } + }"""); + final ObjectPath createResponse = assertOKAndCreateObjectPath(adminClient().performRequest(createRequest)); + + final Request updateRequest = new Request("PUT", "/_security/cross_cluster/api_key/" + createResponse.evaluate("id")); + updateRequest.setJsonEntity(""" + { + "metadata": { } + }"""); + assertOK(adminClient().performRequest(updateRequest)); + } } diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java index 8fc1f3be562f1..729b2f2652c9e 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.security.apikey; +import org.apache.http.client.methods.HttpGet; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; @@ -43,6 +44,7 @@ import static org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField.RUN_AS_USER_HEADER; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.emptyString; @@ -795,6 +797,37 @@ public void testCreateCrossClusterApiKey() throws IOException { assertThat(e.getMessage(), containsString("action [cluster:admin/xpack/security/cross_cluster/api_key/create] is unauthorized")); } + public void testCannotCreateDerivedCrossClusterApiKey() throws IOException { + assumeTrue("untrusted remote cluster feature flag must be enabled", TcpTransport.isUntrustedRemoteClusterEnabled()); + + final Request createRestApiKeyRequest = new Request("POST", "_security/api_key"); + setUserForRequest(createRestApiKeyRequest, MANAGE_SECURITY_USER, END_USER_PASSWORD); + createRestApiKeyRequest.setJsonEntity("{\"name\":\"rest-key\"}"); + final ObjectPath createRestApiKeyResponse = assertOKAndCreateObjectPath(client().performRequest(createRestApiKeyRequest)); + + final Request createDerivedRequest = new Request("POST", "/_security/cross_cluster/api_key"); + createDerivedRequest.setJsonEntity(""" + { + "name": "derived-cross-cluster-key", + "access": { + "replication": [ + { + "names": [ "logs" ] + } + ] + } + }"""); + createDerivedRequest.setOptions( + RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", "ApiKey " + createRestApiKeyResponse.evaluate("encoded")) + ); + final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(createDerivedRequest)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(400)); + assertThat( + e.getMessage(), + containsString("authentication via API key not supported: An API key cannot be used to create a cross-cluster API key") + ); + } + public void testCrossClusterApiKeyDoesNotAllowEmptyAccess() throws IOException { assumeTrue("untrusted remote cluster feature flag must be enabled", TcpTransport.isUntrustedRemoteClusterEnabled()); @@ -1103,41 +1136,226 @@ public void testUpdateFailureCases() throws IOException { final ResponseException e8 = expectThrows(ResponseException.class, () -> client().performRequest(anotherUpdateRequest)); assertThat(e8.getResponse().getStatusLine().getStatusCode(), equalTo(403)); assertThat(e8.getMessage(), containsString("action [cluster:admin/xpack/security/cross_cluster/api_key/update] is unauthorized")); + } - // Cross-cluster API key created by another API key cannot be updated - // This isn't the desired behaviour and more like a bug because we don't yet have a full story about API key's identity. - // Since we actively block it, we are checking it here. But it should be removed once we solve the issue of API key identity. - final Request createDerivedRequest = new Request("POST", "/_security/cross_cluster/api_key"); - createDerivedRequest.setJsonEntity(""" + public void testWorkflowsRestrictionSupportForApiKeys() throws IOException { + final Request createApiKeyRequest = new Request("POST", "_security/api_key"); + createApiKeyRequest.setJsonEntity(""" { - "name": "derived-cross-cluster-key", - "access": { - "replication": [ - { - "names": [ "logs" ] + "name": "key1", + "role_descriptors":{ + "r1": { + "restriction": { + "workflows": ["search_application"] + } + } + } + }"""); + Response response = performRequestWithManageOwnApiKeyUser(createApiKeyRequest); + String apiKeyId = assertOKAndCreateObjectPath(response).evaluate("id"); + assertThat(apiKeyId, notNullValue()); + fetchAndAssertApiKeyContainsWorkflows(apiKeyId, "r1", "search_application"); + + final Request grantApiKeyRequest = new Request("POST", "_security/api_key/grant"); + grantApiKeyRequest.setJsonEntity(Strings.format(""" + { + "grant_type":"password", + "username":"%s", + "password":"end-user-password", + "api_key":{ + "name":"key2", + "role_descriptors":{ + "r1":{ + "restriction": { + "workflows": ["search_application"] + } + } } - ] + } + }""", MANAGE_OWN_API_KEY_USER)); + response = adminClient().performRequest(grantApiKeyRequest); + String grantedApiKeyId = assertOKAndCreateObjectPath(response).evaluate("id"); + fetchAndAssertApiKeyContainsWorkflows(grantedApiKeyId, "r1", "search_application"); + + final Request updateApiKeyRequest = new Request("PUT", "_security/api_key/" + apiKeyId); + updateApiKeyRequest.setJsonEntity(""" + { + "role_descriptors": { + "r1": { + "restriction": { + "workflows": ["search_application", "search_analytics"] + } + } } }"""); - createDerivedRequest.setOptions( - RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", "ApiKey " + createRestApiKeyResponse.evaluate("encoded")) + response = performRequestWithManageOwnApiKeyUser(updateApiKeyRequest); + assertThat(assertOKAndCreateObjectPath(response).evaluate("updated"), equalTo(true)); + fetchAndAssertApiKeyContainsWorkflows(apiKeyId, "r1", "search_application", "search_analytics"); + + final Request bulkUpdateApiKeyRequest = new Request("POST", "_security/api_key/_bulk_update"); + bulkUpdateApiKeyRequest.setJsonEntity(Strings.format(""" + { + "ids": ["%s"], + "role_descriptors": { + "r1": { + "restriction": { + "workflows": ["search_application"] + } + } + } + }""", apiKeyId)); + response = performRequestWithManageOwnApiKeyUser(bulkUpdateApiKeyRequest); + assertThat(assertOKAndCreateObjectPath(response).evaluate("updated"), contains(apiKeyId)); + fetchAndAssertApiKeyContainsWorkflows(apiKeyId, "r1", "search_application"); + + final Request removeRestrictionRequest = new Request("PUT", "_security/api_key/" + apiKeyId); + removeRestrictionRequest.setJsonEntity(""" + { + "role_descriptors": { + "r1": { + } + } + }"""); + response = performRequestWithManageOwnApiKeyUser(removeRestrictionRequest); + assertThat(assertOKAndCreateObjectPath(response).evaluate("updated"), equalTo(true)); + fetchAndAssertApiKeyDoesNotContainWorkflows(apiKeyId, "r1"); + } + + public void testWorkflowsRestrictionValidation() throws IOException { + final Request createInvalidApiKeyRequest = new Request("POST", "_security/api_key"); + final boolean secondRoleWithWorkflowsRestriction = randomBoolean(); + final String r1 = """ + "r1": { + "restriction": { + "workflows": ["search_application"] + } + } + """; + final String r2 = secondRoleWithWorkflowsRestriction ? """ + "r2": { + "restriction": { + "workflows": ["search_analytics"] + } + } + """ : """ + "r2": {} + """; + createInvalidApiKeyRequest.setJsonEntity(Strings.format(""" + { + "name": "key1", + "role_descriptors":{ + %s, + %s + } + }""", r1, r2)); + var e = expectThrows(ResponseException.class, () -> performRequestWithManageOwnApiKeyUser(createInvalidApiKeyRequest)); + if (secondRoleWithWorkflowsRestriction) { + assertThat(e.getMessage(), containsString("more than one role descriptor with restriction is not supported")); + } else { + assertThat(e.getMessage(), containsString("combining role descriptors with and without restriction is not supported")); + } + + final Request grantApiKeyRequest = new Request("POST", "_security/api_key/grant"); + grantApiKeyRequest.setJsonEntity(Strings.format(""" + { + "grant_type":"password", + "username":"%s", + "password":"end-user-password", + "api_key":{ + "name":"key2", + "role_descriptors":{ + %s, + %s + } + } + }""", MANAGE_OWN_API_KEY_USER, r1, r2)); + e = expectThrows(ResponseException.class, () -> adminClient().performRequest(grantApiKeyRequest)); + if (secondRoleWithWorkflowsRestriction) { + assertThat(e.getMessage(), containsString("more than one role descriptor with restriction is not supported")); + } else { + assertThat(e.getMessage(), containsString("combining role descriptors with and without restriction is not supported")); + } + + final Request createApiKeyRequest = new Request("POST", "_security/api_key"); + createApiKeyRequest.setJsonEntity(""" + { + "name": "key1", + "role_descriptors":{ + "r1": { + "restriction": { + "workflows": ["search_application"] + } + } + } + }"""); + Response response = performRequestWithManageOwnApiKeyUser(createApiKeyRequest); + assertOK(response); + String apiKeyId = ObjectPath.createFromResponse(response).evaluate("id"); + assertThat(apiKeyId, notNullValue()); + + final Request updateApiKeyRequest = new Request("PUT", "_security/api_key/" + apiKeyId); + updateApiKeyRequest.setJsonEntity(Strings.format(""" + { + "role_descriptors": { + %s, + %s + } + }""", r1, r2)); + e = expectThrows(ResponseException.class, () -> performRequestWithManageOwnApiKeyUser(updateApiKeyRequest)); + if (secondRoleWithWorkflowsRestriction) { + assertThat(e.getMessage(), containsString("more than one role descriptor with restriction is not supported")); + } else { + assertThat(e.getMessage(), containsString("combining role descriptors with and without restriction is not supported")); + } + + final Request bulkUpdateApiKeyRequest = new Request("POST", "_security/api_key/_bulk_update"); + bulkUpdateApiKeyRequest.setJsonEntity(Strings.format(""" + { + "ids": ["%s"], + "role_descriptors": { + %s, + %s + } + }""", apiKeyId, r1, r2)); + e = expectThrows(ResponseException.class, () -> performRequestWithManageOwnApiKeyUser(bulkUpdateApiKeyRequest)); + if (secondRoleWithWorkflowsRestriction) { + assertThat(e.getMessage(), containsString("more than one role descriptor with restriction is not supported")); + } else { + assertThat(e.getMessage(), containsString("combining role descriptors with and without restriction is not supported")); + } + } + + private Response performRequestWithManageOwnApiKeyUser(Request request) throws IOException { + request.setOptions( + RequestOptions.DEFAULT.toBuilder() + .addHeader("Authorization", headerFromRandomAuthMethod(MANAGE_OWN_API_KEY_USER, END_USER_PASSWORD)) ); - final ObjectPath createDerivedResponse = assertOKAndCreateObjectPath(client().performRequest(createDerivedRequest)); - final String derivedApiKey = createDerivedResponse.evaluate("id"); - // cannot be updated by the original creator user - final Request updateDerivedRequest = new Request("PUT", "/_security/cross_cluster/api_key/" + derivedApiKey); - setUserForRequest(updateDerivedRequest, MANAGE_SECURITY_USER, END_USER_PASSWORD); - updateDerivedRequest.setJsonEntity("{\"metadata\":{}}"); - final ResponseException e9 = expectThrows(ResponseException.class, () -> client().performRequest(updateDerivedRequest)); - assertThat(e9.getResponse().getStatusLine().getStatusCode(), equalTo(404)); - assertThat(e9.getMessage(), containsString("no API key owned by requesting user found")); - // cannot be updated by the original API key either - updateDerivedRequest.setOptions( - RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", "ApiKey " + createRestApiKeyResponse.evaluate("encoded")) + return client().performRequest(request); + } + + @SuppressWarnings("unchecked") + private void fetchAndAssertApiKeyContainsWorkflows(String apiKeyId, String roleName, String... expectedWorkflows) throws IOException { + Response getApiKeyResponse = fetchApiKey(apiKeyId); + List actualWorkflows = assertOKAndCreateObjectPath(getApiKeyResponse).evaluate( + "api_keys.0.role_descriptors." + roleName + ".restriction.workflows" ); - final ResponseException e10 = expectThrows(ResponseException.class, () -> client().performRequest(updateDerivedRequest)); - assertThat(e10.getResponse().getStatusLine().getStatusCode(), equalTo(400)); - assertThat(e10.getMessage(), containsString("authentication via API key not supported: only the owner user can update an API key")); + assertThat(actualWorkflows, containsInAnyOrder(expectedWorkflows)); + } + + @SuppressWarnings("unchecked") + private void fetchAndAssertApiKeyDoesNotContainWorkflows(String apiKeyId, String roleName) throws IOException { + Response getApiKeyResponse = fetchApiKey(apiKeyId); + Map restriction = assertOKAndCreateObjectPath(getApiKeyResponse).evaluate( + "api_keys.0.role_descriptors." + roleName + ".restriction" + ); + assertThat(restriction, nullValue()); + } + + private Response fetchApiKey(String apiKeyId) throws IOException { + Request getApiKeyRequest = new Request(HttpGet.METHOD_NAME, "_security/api_key?id=" + apiKeyId); + Response getApiKeyResponse = adminClient().performRequest(getApiKeyRequest); + assertOK(getApiKeyResponse); + return getApiKeyResponse; } private void assertBadCreateCrossClusterApiKeyRequest(String body, String expectedErrorMessage) throws IOException { @@ -1345,7 +1563,8 @@ private void createRole(String name, Collection clusterPrivileges, Strin null, null, new RoleDescriptor.RemoteIndicesPrivileges[] { - RoleDescriptor.RemoteIndicesPrivileges.builder(remoteIndicesClusterAliases).indices("*").privileges("read").build() } + RoleDescriptor.RemoteIndicesPrivileges.builder(remoteIndicesClusterAliases).indices("*").privileges("read").build() }, + null ); getSecurityClient().putRole(role); } diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java index 0ac3f4b573b84..7f7797f135940 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java @@ -198,6 +198,7 @@ public void testCrossClusterAccessHeadersSentSingleRemote() throws Exception { null, null, null, + null, null ) ) @@ -265,6 +266,7 @@ public void testCrossClusterAccessHeadersSentMultipleRemotes() throws Exception null, null, null, + null, null ) ) @@ -295,6 +297,7 @@ public void testCrossClusterAccessHeadersSentMultipleRemotes() throws Exception null, null, null, + null, null ) ) @@ -400,6 +403,7 @@ public void testApiKeyCrossClusterAccessHeadersSentMultipleRemotes() throws Exce null, null, null, + null, null ) ), @@ -418,6 +422,7 @@ public void testApiKeyCrossClusterAccessHeadersSentMultipleRemotes() throws Exce null, null, null, + null, null ) ) @@ -444,6 +449,7 @@ public void testApiKeyCrossClusterAccessHeadersSentMultipleRemotes() throws Exce null, null, null, + null, null ) ), @@ -465,6 +471,7 @@ public void testApiKeyCrossClusterAccessHeadersSentMultipleRemotes() throws Exce null, null, null, + null, null ) ) @@ -543,6 +550,7 @@ public void testApiKeyCrossClusterAccessHeadersSentSingleRemote() throws Excepti null, null, null, + null, null ) ), @@ -561,6 +569,7 @@ public void testApiKeyCrossClusterAccessHeadersSentSingleRemote() throws Excepti null, null, null, + null, null ) ) @@ -583,6 +592,7 @@ public void testApiKeyCrossClusterAccessHeadersSentSingleRemote() throws Excepti null, null, null, + null, null ) ) @@ -657,6 +667,7 @@ public void testApiKeyCrossClusterAccessHeadersSentSingleRemote() throws Excepti null, null, null, + null, null ) ), @@ -675,6 +686,7 @@ public void testApiKeyCrossClusterAccessHeadersSentSingleRemote() throws Excepti null, null, null, + null, null ) ) @@ -697,6 +709,7 @@ public void testApiKeyCrossClusterAccessHeadersSentSingleRemote() throws Excepti null, null, null, + null, null ) ) diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithRemoteIndicesPrivilegesRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithRemoteIndicesPrivilegesRestIT.java index d0530929e0953..759114b748018 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithRemoteIndicesPrivilegesRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithRemoteIndicesPrivilegesRestIT.java @@ -94,7 +94,8 @@ public void testRemoteIndexPrivileges() throws IOException { .query("{\"match\":{\"field\":\"a\"}}") .privileges("read") .grantedFields("field") - .build() } + .build() }, + null ) ); @@ -166,7 +167,8 @@ public void testRemoteIndexPrivileges() throws IOException { .privileges("read") .query("{\"match\":{\"field\":\"a\"}}") .grantedFields("field") - .build() } + .build() }, + null ) ); } diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithWorkflowsRestrictionRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithWorkflowsRestrictionRestIT.java new file mode 100644 index 0000000000000..d2fc27fb3fcae --- /dev/null +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/role/RoleWithWorkflowsRestrictionRestIT.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.role; + +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.xpack.security.SecurityOnTrialLicenseRestTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; + +public class RoleWithWorkflowsRestrictionRestIT extends SecurityOnTrialLicenseRestTestCase { + + public void testCreateRoleWithWorkflowsRestrictionFail() { + Request createRoleRequest = new Request(HttpPut.METHOD_NAME, "/_security/role/role_with_restriction"); + createRoleRequest.setJsonEntity(""" + { + "cluster": ["all"], + "indices": [ + { + "names": ["index-a"], + "privileges": ["all"] + } + ], + "restriction":{ + "workflows": ["foo", "bar"] + } + }"""); + + ResponseException e = expectThrows(ResponseException.class, () -> adminClient().performRequest(createRoleRequest)); + assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); + assertThat(e.getMessage(), containsString("failed to parse role [role_with_restriction]. unexpected field [restriction]")); + } + + public void testUpdateRoleWithWorkflowsRestrictionFail() throws IOException { + Request createRoleRequest = new Request(HttpPut.METHOD_NAME, "/_security/role/my_role"); + createRoleRequest.setJsonEntity(""" + { + "cluster": ["all"], + "indices": [ + { + "names": ["index-a"], + "privileges": ["all"] + } + ] + }"""); + Response createRoleResponse = adminClient().performRequest(createRoleRequest); + assertOK(createRoleResponse); + + Request updateRoleRequest = new Request(HttpPost.METHOD_NAME, "/_security/role/my_role"); + updateRoleRequest.setJsonEntity(""" + { + "cluster": ["all"], + "indices": [ + { + "names": ["index-*"], + "privileges": ["all"] + } + ], + "restriction":{ + "workflows": ["foo", "bar"] + } + }"""); + + ResponseException e = expectThrows(ResponseException.class, () -> adminClient().performRequest(updateRoleRequest)); + assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); + assertThat(e.getMessage(), containsString("failed to parse role [my_role]. unexpected field [restriction]")); + } +} diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataLifecycleServiceRuntimeSecurityIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataLifecycleServiceRuntimeSecurityIT.java new file mode 100644 index 0000000000000..37e1122c3f750 --- /dev/null +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataLifecycleServiceRuntimeSecurityIT.java @@ -0,0 +1,279 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.integration; + +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.datastreams.CreateDataStreamAction; +import org.elasticsearch.action.datastreams.GetDataStreamAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.DataLifecycle; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.Template; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.datastreams.DataStreamsPlugin; +import org.elasticsearch.dlm.DataLifecycleErrorStore; +import org.elasticsearch.dlm.DataLifecyclePlugin; +import org.elasticsearch.dlm.DataLifecycleService; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.indices.ExecutorNames; +import org.elasticsearch.indices.SystemDataStreamDescriptor; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.SystemIndexPlugin; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.security.LocalStateSecurity; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.ExecutionException; + +import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.backingIndexEqualTo; +import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.DEFAULT_TIMESTAMP_FIELD; +import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MAIN_ALIAS; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.anEmptyMap; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.startsWith; + +/** + * This test suite ensures that DLM runtime tasks work correctly with security enabled, i.e., that the internal user for DLM has all + * requisite privileges to orchestrate DLM + */ +public class DataLifecycleServiceRuntimeSecurityIT extends SecurityIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return List.of(DataLifecyclePlugin.class, LocalStateSecurity.class, DataStreamsPlugin.class, SystemDataStreamTestPlugin.class); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + Settings.Builder settings = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)); + settings.put(DataLifecycleService.DLM_POLL_INTERVAL, "1s"); + settings.put(DataLifecycle.CLUSTER_DLM_DEFAULT_ROLLOVER_SETTING.getKey(), "min_docs=1,max_docs=1"); + return settings.build(); + } + + public void testRolloverLifecycleAndForceMergeAuthorized() throws Exception { + String dataStreamName = randomDataStreamName(); + // empty lifecycle contains the default rollover + prepareDataStreamAndIndex(dataStreamName, new DataLifecycle()); + + assertBusy(() -> { + assertNoAuthzErrors(); + List backingIndices = getDataStreamBackingIndices(dataStreamName); + assertThat(backingIndices.size(), equalTo(2)); + String backingIndex = backingIndices.get(0).getName(); + assertThat(backingIndex, backingIndexEqualTo(dataStreamName, 1)); + String writeIndex = backingIndices.get(1).getName(); + assertThat(writeIndex, backingIndexEqualTo(dataStreamName, 2)); + }); + // Index another doc to force another rollover and trigger an attempted force-merge. The force-merge may be a noop under + // the hood but for authz purposes this doesn't matter, it only matters that the force-merge API was called + indexDoc(dataStreamName); + assertBusy(() -> { + assertNoAuthzErrors(); + List backingIndices = getDataStreamBackingIndices(dataStreamName); + assertThat(backingIndices.size(), equalTo(3)); + }); + } + + public void testRolloverAndRetentionAuthorized() throws Exception { + String dataStreamName = randomDataStreamName(); + prepareDataStreamAndIndex(dataStreamName, new DataLifecycle(TimeValue.timeValueMillis(0))); + + assertBusy(() -> { + assertNoAuthzErrors(); + List backingIndices = getDataStreamBackingIndices(dataStreamName); + assertThat(backingIndices.size(), equalTo(1)); + // we expect the data stream to have only one backing index, the write one, with generation 2 + // as generation 1 would've been deleted by DLM given the lifecycle configuration + String writeIndex = backingIndices.get(0).getName(); + assertThat(writeIndex, backingIndexEqualTo(dataStreamName, 2)); + }); + } + + public void testUnauthorized() throws Exception { + // this is an example index pattern for a system index that DLM does not have access for. DLM will therefore fail at runtime with an + // authz exception + prepareDataStreamAndIndex(SECURITY_MAIN_ALIAS, new DataLifecycle()); + + assertBusy(() -> { + Map indicesAndErrors = collectErrorsFromStoreAsMap(); + assertThat(indicesAndErrors, is(not(anEmptyMap()))); + assertThat( + indicesAndErrors.values(), + hasItem(allOf(containsString("security_exception"), containsString("unauthorized for user [_dlm]"))) + ); + }); + } + + public void testRolloverAndRetentionWithSystemDataStreamAuthorized() throws Exception { + String dataStreamName = SystemDataStreamTestPlugin.SYSTEM_DATA_STREAM_NAME; + indexDoc(dataStreamName); + + assertBusy(() -> { + assertNoAuthzErrors(); + List backingIndices = getDataStreamBackingIndices(dataStreamName); + assertThat(backingIndices.size(), equalTo(1)); + // we expect the data stream to have only one backing index, the write one, with generation 2 + // as generation 1 would've been deleted by DLM given the lifecycle configuration + String writeIndex = backingIndices.get(0).getName(); + assertThat(writeIndex, backingIndexEqualTo(dataStreamName, 2)); + }); + } + + private static String randomDataStreamName() { + // lower-case since this is required for a valid data stream name + return randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT); + } + + private Map collectErrorsFromStoreAsMap() { + Iterable lifecycleServices = internalCluster().getInstances(DataLifecycleService.class); + Map indicesAndErrors = new HashMap<>(); + for (DataLifecycleService lifecycleService : lifecycleServices) { + DataLifecycleErrorStore errorStore = lifecycleService.getErrorStore(); + List allIndices = errorStore.getAllIndices(); + for (var index : allIndices) { + indicesAndErrors.put(index, errorStore.getError(index)); + } + } + return indicesAndErrors; + } + + private void prepareDataStreamAndIndex(String dataStreamName, DataLifecycle lifecycle) throws IOException, InterruptedException, + ExecutionException { + putComposableIndexTemplate("id1", null, List.of(dataStreamName + "*"), null, null, lifecycle); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); + indexDoc(dataStreamName); + } + + private List getDataStreamBackingIndices(String dataStreamName) { + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) + .actionGet(); + assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); + assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getName(), equalTo(dataStreamName)); + return getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices(); + } + + private void assertNoAuthzErrors() { + var indicesAndErrors = collectErrorsFromStoreAsMap(); + for (var entry : indicesAndErrors.entrySet()) { + assertThat( + "unexpected authz error for index [" + entry.getKey() + "] with error message [" + entry.getValue() + "]", + entry.getValue(), + not(anyOf(containsString("security_exception"), containsString("unauthorized for user [_dlm]"))) + ); + } + } + + private static void putComposableIndexTemplate( + String id, + @Nullable String mappings, + List patterns, + @Nullable Settings settings, + @Nullable Map metadata, + @Nullable DataLifecycle lifecycle + ) throws IOException { + PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); + request.indexTemplate( + new ComposableIndexTemplate( + patterns, + new Template(settings, mappings == null ? null : CompressedXContent.fromJSON(mappings), null, lifecycle), + null, + null, + null, + metadata, + new ComposableIndexTemplate.DataStreamTemplate(), + null + ) + ); + client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); + } + + private static void indexDoc(String dataStream) { + BulkRequest bulkRequest = new BulkRequest(); + String value = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(System.currentTimeMillis()); + bulkRequest.add( + new IndexRequest(dataStream).opType(DocWriteRequest.OpType.CREATE) + .source(String.format(Locale.ROOT, "{\"%s\":\"%s\"}", DEFAULT_TIMESTAMP_FIELD, value), XContentType.JSON) + ); + BulkResponse bulkResponse = client().bulk(bulkRequest).actionGet(); + assertThat(bulkResponse.getItems().length, equalTo(1)); + String backingIndexPrefix = DataStream.BACKING_INDEX_PREFIX + dataStream; + for (BulkItemResponse itemResponse : bulkResponse) { + assertThat(itemResponse.getFailureMessage(), nullValue()); + assertThat(itemResponse.status(), equalTo(RestStatus.CREATED)); + assertThat(itemResponse.getIndex(), startsWith(backingIndexPrefix)); + } + client().admin().indices().refresh(new RefreshRequest(dataStream)).actionGet(); + } + + public static class SystemDataStreamTestPlugin extends Plugin implements SystemIndexPlugin { + + static final String SYSTEM_DATA_STREAM_NAME = ".fleet-actions-results"; + + @Override + public Collection getSystemDataStreamDescriptors() { + return List.of( + new SystemDataStreamDescriptor( + SYSTEM_DATA_STREAM_NAME, + "a system data stream for testing", + SystemDataStreamDescriptor.Type.EXTERNAL, + new ComposableIndexTemplate( + List.of(SYSTEM_DATA_STREAM_NAME), + new Template(Settings.EMPTY, null, null, new DataLifecycle(0)), + null, + null, + null, + null, + new ComposableIndexTemplate.DataStreamTemplate() + ), + Map.of(), + Collections.singletonList("test"), + new ExecutorNames(ThreadPool.Names.SYSTEM_CRITICAL_READ, ThreadPool.Names.SYSTEM_READ, ThreadPool.Names.SYSTEM_WRITE) + ) + ); + } + + @Override + public String getFeatureName() { + return SystemDataStreamTestPlugin.class.getSimpleName(); + } + + @Override + public String getFeatureDescription() { + return "A plugin for testing DLM runtime actions on system data streams"; + } + } +} diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java index 269e9b5a84946..51ce8c38d71d5 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java @@ -2664,7 +2664,7 @@ private List randomRoleDescriptors() { new RoleDescriptor(randomAlphaOfLength(10), new String[] { "all" }, null, null), randomValueOtherThanMany( rd -> RoleDescriptorRequestValidator.validate(rd) != null, - () -> RoleDescriptorTests.randomRoleDescriptor(false, allowRemoteIndices) + () -> RoleDescriptorTests.randomRoleDescriptor(false, allowRemoteIndices, false) ) ); case 2 -> null; diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java index 44affb667f61f..22d2686a744f1 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java @@ -435,33 +435,7 @@ public void testCreateCrossClusterApiKey() throws IOException { }"""); final PlainActionFuture future = new PlainActionFuture<>(); - // Cross-cluster API keys can be created by an API key as long as it has manage_security - final boolean createWithUser = randomBoolean(); - if (createWithUser) { - client().execute(CreateCrossClusterApiKeyAction.INSTANCE, request, future); - } else { - final CreateApiKeyResponse createAdminKeyResponse = new CreateApiKeyRequestBuilder(client()).setName("admin-key") - .setRoleDescriptors( - randomFrom( - List.of(new RoleDescriptor(randomAlphaOfLengthBetween(3, 8), new String[] { "manage_security" }, null, null)), - null - ) - ) - .execute() - .actionGet(); - client().filterWithHeader( - Map.of( - "Authorization", - "ApiKey " - + Base64.getEncoder() - .encodeToString( - (createAdminKeyResponse.getId() + ":" + createAdminKeyResponse.getKey().toString()).getBytes( - StandardCharsets.UTF_8 - ) - ) - ) - ).execute(CreateCrossClusterApiKeyAction.INSTANCE, request, future); - } + client().execute(CreateCrossClusterApiKeyAction.INSTANCE, request, future); final CreateApiKeyResponse createApiKeyResponse = future.actionGet(); final String apiKeyId = createApiKeyResponse.getId(); @@ -522,11 +496,7 @@ public void testCreateCrossClusterApiKey() throws IOException { assertThat(getApiKeyInfo.getLimitedBy(), nullValue()); assertThat(getApiKeyInfo.getMetadata(), anEmptyMap()); assertThat(getApiKeyInfo.getUsername(), equalTo("test_user")); - if (createWithUser) { - assertThat(getApiKeyInfo.getRealm(), equalTo("file")); - } else { - assertThat(getApiKeyInfo.getRealm(), equalTo("_es_api_key")); - } + assertThat(getApiKeyInfo.getRealm(), equalTo("file")); // Check the API key attributes with Query API final QueryApiKeyRequest queryApiKeyRequest = new QueryApiKeyRequest( @@ -545,11 +515,7 @@ public void testCreateCrossClusterApiKey() throws IOException { assertThat(queryApiKeyInfo.getLimitedBy(), nullValue()); assertThat(queryApiKeyInfo.getMetadata(), anEmptyMap()); assertThat(queryApiKeyInfo.getUsername(), equalTo("test_user")); - if (createWithUser) { - assertThat(queryApiKeyInfo.getRealm(), equalTo("file")); - } else { - assertThat(queryApiKeyInfo.getRealm(), equalTo("_es_api_key")); - } + assertThat(queryApiKeyInfo.getRealm(), equalTo("file")); } public void testUpdateCrossClusterApiKey() throws IOException { @@ -653,6 +619,41 @@ public void testUpdateCrossClusterApiKey() throws IOException { assertThat(queryApiKeyInfo.getRealm(), equalTo("file")); } + // Cross-cluster API keys cannot be created by an API key even if it has manage_security privilege + // This is intentional until we solve the issue of derived API key ownership + public void testCannotCreateDerivedCrossClusterApiKey() throws IOException { + assumeTrue("untrusted remote cluster feature flag must be enabled", TcpTransport.isUntrustedRemoteClusterEnabled()); + + final CreateApiKeyResponse createAdminKeyResponse = new CreateApiKeyRequestBuilder(client()).setName("admin-key") + .setRoleDescriptors( + randomFrom( + List.of(new RoleDescriptor(randomAlphaOfLengthBetween(3, 8), new String[] { "manage_security" }, null, null)), + null + ) + ) + .execute() + .actionGet(); + final String encoded = Base64.getEncoder() + .encodeToString( + (createAdminKeyResponse.getId() + ":" + createAdminKeyResponse.getKey().toString()).getBytes(StandardCharsets.UTF_8) + ); + + final var request = CreateCrossClusterApiKeyRequest.withNameAndAccess(randomAlphaOfLengthBetween(3, 8), """ + { + "search": [ {"names": ["logs"]} ] + }"""); + + final PlainActionFuture future = new PlainActionFuture<>(); + client().filterWithHeader(Map.of("Authorization", "ApiKey " + encoded)) + .execute(CreateCrossClusterApiKeyAction.INSTANCE, request, future); + + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, future::actionGet); + assertThat( + e.getMessage(), + containsString("authentication via API key not supported: An API key cannot be used to create a cross-cluster API key") + ); + } + private GrantApiKeyRequest buildGrantApiKeyRequest(String username, SecureString password, String runAsUsername) throws IOException { final SecureString clonedPassword = password.clone(); final GrantApiKeyRequest grantApiKeyRequest = new GrantApiKeyRequest(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 085865b7abd64..fcb9fd8bb8bb1 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -7,11 +7,14 @@ package org.elasticsearch.xpack.security; import io.netty.channel.Channel; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpUtil; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; @@ -82,6 +85,7 @@ import org.elasticsearch.rest.RestHandler; import org.elasticsearch.rest.RestHeaderDefinition; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.threadpool.ExecutorBuilder; @@ -1656,26 +1660,6 @@ public boolean test(String profile, InetSocketAddress peerAddress) { } final AuthenticationService authenticationService = this.authcService.get(); final ThreadContext threadContext = this.threadContext.get(); - final HttpValidator httpValidator = (httpRequest, channel, listener) -> { - HttpPreRequest httpPreRequest = HttpHeadersAuthenticatorUtils.asHttpPreRequest(httpRequest); - // step 1: Populate the thread context with credentials and any other HTTP request header values (eg run-as) that the - // authentication process looks for while doing its duty. - perRequestThreadContext.accept(httpPreRequest, threadContext); - populateClientCertificate.accept(channel, threadContext); - RemoteHostHeader.process(channel, threadContext); - // step 2: Run authentication on the now properly prepared thread-context. - // This inspects and modifies the thread context. - if (httpPreRequest.method() != RestRequest.Method.OPTIONS) { - authenticationService.authenticate( - httpPreRequest, - ActionListener.wrap(ignored -> listener.onResponse(null), listener::onFailure) - ); - } else { - // allow for unauthenticated OPTIONS request - // this includes CORS preflight, and regular OPTIONS that return permitted methods for a given path - listener.onResponse(null); - } - }; return getHttpServerTransportWithHeadersValidator( settings, networkService, @@ -1687,12 +1671,81 @@ public boolean test(String profile, InetSocketAddress peerAddress) { tracer, new TLSConfig(sslConfiguration, sslService::createSSLEngine), acceptPredicate, - httpValidator + (httpRequest, channel, listener) -> { + HttpPreRequest httpPreRequest = HttpHeadersAuthenticatorUtils.asHttpPreRequest(httpRequest); + // step 1: Populate the thread context with credentials and any other HTTP request header values (eg run-as) that the + // authentication process looks for while doing its duty. + perRequestThreadContext.accept(httpPreRequest, threadContext); + populateClientCertificate.accept(channel, threadContext); + RemoteHostHeader.process(channel, threadContext); + // step 2: Run authentication on the now properly prepared thread-context. + // This inspects and modifies the thread context. + authenticationService.authenticate( + httpPreRequest, + ActionListener.wrap(ignored -> listener.onResponse(null), listener::onFailure) + ); + }, + (httpRequest, channel, listener) -> { + // allow unauthenticated OPTIONS request through + // this includes CORS preflight, and regular OPTIONS that return permitted methods for a given path + // But still populate the thread context with the usual request headers (as for any other request that is dispatched) + HttpPreRequest httpPreRequest = HttpHeadersAuthenticatorUtils.asHttpPreRequest(httpRequest); + perRequestThreadContext.accept(httpPreRequest, threadContext); + populateClientCertificate.accept(channel, threadContext); + RemoteHostHeader.process(channel, threadContext); + listener.onResponse(null); + } ); }); return httpTransports; } + // "public" so it can be used in tests + public static Netty4HttpServerTransport getHttpServerTransportWithHeadersValidator( + Settings settings, + NetworkService networkService, + ThreadPool threadPool, + NamedXContentRegistry xContentRegistry, + HttpServerTransport.Dispatcher dispatcher, + ClusterSettings clusterSettings, + SharedGroupFactory sharedGroupFactory, + Tracer tracer, + TLSConfig tlsConfig, + @Nullable AcceptChannelHandler.AcceptPredicate acceptPredicate, + HttpValidator httpValidator, + HttpValidator httpOptionsValidator + ) { + return getHttpServerTransportWithHeadersValidator( + settings, + networkService, + threadPool, + xContentRegistry, + dispatcher, + clusterSettings, + sharedGroupFactory, + tracer, + tlsConfig, + acceptPredicate, + (httpRequest, channel, listener) -> { + if (httpRequest.method() == HttpMethod.OPTIONS) { + if (HttpUtil.getContentLength(httpRequest, -1L) > 1 || HttpUtil.isTransferEncodingChunked(httpRequest)) { + // OPTIONS requests with a body are not supported + listener.onFailure( + new ElasticsearchStatusException( + "OPTIONS requests with a payload body are not supported", + RestStatus.BAD_REQUEST + ) + ); + } else { + httpOptionsValidator.validate(httpRequest, channel, listener); + } + } else { + httpValidator.validate(httpRequest, channel, listener); + } + } + ); + } + // "public" so it can be used in tests public static Netty4HttpServerTransport getHttpServerTransportWithHeadersValidator( Settings settings, diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportCreateCrossClusterApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportCreateCrossClusterApiKeyAction.java index 2c0df3cd59dfc..09f1d42ddf83e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportCreateCrossClusterApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportCreateCrossClusterApiKeyAction.java @@ -49,6 +49,12 @@ protected void doExecute(Task task, CreateCrossClusterApiKeyRequest request, Act final Authentication authentication = securityContext.getAuthentication(); if (authentication == null) { listener.onFailure(new IllegalStateException("authentication is required")); + } else if (authentication.isApiKey()) { + listener.onFailure( + new IllegalArgumentException( + "authentication via API key not supported: An API key cannot be used to create a cross-cluster API key" + ) + ); } else { apiKeyService.createApiKey(authentication, request, Set.of(), listener); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java index 290a4f1fed3bc..def2c74f03b67 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java @@ -147,6 +147,7 @@ import static org.elasticsearch.xpack.core.security.action.apikey.CrossClusterApiKeyRoleDescriptorBuilder.CCR_CLUSTER_PRIVILEGE_NAMES; import static org.elasticsearch.xpack.core.security.action.apikey.CrossClusterApiKeyRoleDescriptorBuilder.CCS_AND_CCR_CLUSTER_PRIVILEGE_NAMES; import static org.elasticsearch.xpack.core.security.action.apikey.CrossClusterApiKeyRoleDescriptorBuilder.CCS_CLUSTER_PRIVILEGE_NAMES; +import static org.elasticsearch.xpack.core.security.authz.RoleDescriptor.WORKFLOWS_RESTRICTION_VERSION; import static org.elasticsearch.xpack.security.Security.SECURITY_CRYPTO_THREAD_POOL_NAME; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MAIN_ALIAS; @@ -301,6 +302,8 @@ public void createApiKey( Set userRoleDescriptors, ActionListener listener ) { + assert request.getType() != ApiKey.Type.CROSS_CLUSTER || false == authentication.isApiKey() + : "cannot create derived cross-cluster API keys"; assert request.getType() != ApiKey.Type.CROSS_CLUSTER || userRoleDescriptors.isEmpty() : "owner user role descriptor must be empty for cross-cluster API keys"; ensureEnabled(); @@ -331,6 +334,15 @@ && hasRemoteIndices(request.getRoleDescriptors())) { ); return; } + final IllegalArgumentException workflowsValidationException = validateWorkflowsRestrictionConstraints( + transportVersion, + request.getRoleDescriptors(), + userRoleDescriptors + ); + if (workflowsValidationException != null) { + listener.onFailure(workflowsValidationException); + return; + } final Set filteredUserRoleDescriptors = maybeRemoveRemoteIndicesPrivileges( userRoleDescriptors, @@ -350,6 +362,43 @@ private static boolean hasRemoteIndices(Collection roleDescripto return roleDescriptors != null && roleDescriptors.stream().anyMatch(RoleDescriptor::hasRemoteIndicesPrivileges); } + private static IllegalArgumentException validateWorkflowsRestrictionConstraints( + TransportVersion transportVersion, + List requestRoleDescriptors, + Set userRoleDescriptors + ) { + if (getNumberOfRoleDescriptorsWithRestriction(userRoleDescriptors) > 0L) { + return new IllegalArgumentException("owner user role descriptors must not include restriction"); + } + final long numberOfRoleDescriptorsWithRestriction = getNumberOfRoleDescriptorsWithRestriction(requestRoleDescriptors); + if (numberOfRoleDescriptorsWithRestriction > 0L) { + // creating/updating API keys with restrictions is not allowed in a mixed cluster. + if (transportVersion.before(WORKFLOWS_RESTRICTION_VERSION)) { + return new IllegalArgumentException( + "all nodes must have transport version [" + + WORKFLOWS_RESTRICTION_VERSION + + "] or higher to support restrictions for API keys" + ); + } + // It's only allowed to create/update API keys with a single role descriptor that is restricted. + if (numberOfRoleDescriptorsWithRestriction != 1L) { + return new IllegalArgumentException("more than one role descriptor with restriction is not supported"); + } + // Combining roles with and without restriction is not allowed either. + if (numberOfRoleDescriptorsWithRestriction != requestRoleDescriptors.size()) { + return new IllegalArgumentException("combining role descriptors with and without restriction is not supported"); + } + } + return null; + } + + private static long getNumberOfRoleDescriptorsWithRestriction(Collection roleDescriptors) { + if (roleDescriptors == null || roleDescriptors.isEmpty()) { + return 0L; + } + return roleDescriptors.stream().filter(RoleDescriptor::hasRestriction).count(); + } + private void createApiKeyAndIndexIt( Authentication authentication, AbstractCreateApiKeyRequest request, @@ -421,7 +470,6 @@ public void updateApiKeys( assert request.getType() != ApiKey.Type.CROSS_CLUSTER || userRoleDescriptors.isEmpty() : "owner user role descriptor must be empty for cross-cluster API keys"; ensureEnabled(); - if (authentication == null) { listener.onFailure(new IllegalArgumentException("authentication must be provided")); return; @@ -445,6 +493,15 @@ && hasRemoteIndices(request.getRoleDescriptors())) { ); return; } + final Exception workflowsValidationException = validateWorkflowsRestrictionConstraints( + transportVersion, + request.getRoleDescriptors(), + userRoleDescriptors + ); + if (workflowsValidationException != null) { + listener.onFailure(workflowsValidationException); + return; + } final String[] apiKeyIds = request.getIds().toArray(String[]::new); final Set filteredUserRoleDescriptors = maybeRemoveRemoteIndicesPrivileges( @@ -577,7 +634,8 @@ static Set maybeRemoveRemoteIndicesPrivileges( roleDescriptor.getRunAs(), roleDescriptor.getMetadata(), roleDescriptor.getTransientMetadata(), - null + null, + roleDescriptor.getRestriction() ); } return roleDescriptor; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java index 5e0583934105c..dad1e0d5d7758 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java @@ -128,6 +128,9 @@ public static void switchUserBasedOnActionOriginAndExecute( case POST_WRITE_REFRESH_ORIGIN: securityContext.executeAsInternalUser(InternalUsers.STORAGE_USER, version, consumer); break; + case DLM_ORIGIN: + securityContext.executeAsInternalUser(InternalUsers.DLM_USER, version, consumer); + break; case WATCHER_ORIGIN: case ML_ORIGIN: case MONITORING_ORIGIN: @@ -136,7 +139,6 @@ public static void switchUserBasedOnActionOriginAndExecute( case PERSISTENT_TASK_ORIGIN: case ROLLUP_ORIGIN: case INDEX_LIFECYCLE_ORIGIN: - case DLM_ORIGIN: case ENRICH_ORIGIN: case IDP_ORIGIN: case INGEST_ORIGIN: diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java index 71ad53d20335c..5824ffa4d7559 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java @@ -309,7 +309,7 @@ static RoleDescriptor parseRoleDescriptor( if (token == XContentParser.Token.START_OBJECT) { // we pass true as last parameter because we do not want to reject files if field permissions // are given in 2.x syntax - RoleDescriptor descriptor = RoleDescriptor.parse(roleName, parser, true); + RoleDescriptor descriptor = RoleDescriptor.parse(roleName, parser, true, false); return checkDescriptor(descriptor, path, logger, settings, xContentRegistry); } else { logger.error("invalid role definition [{}] in roles file [{}]. skipping role...", roleName, path.toAbsolutePath()); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java index cdd9bea1330d1..439b83dc65562 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java @@ -242,6 +242,7 @@ public void putRole(final PutRoleRequest request, final RoleDescriptor role, fin void innerPutRole(final PutRoleRequest request, final RoleDescriptor role, final ActionListener listener) { final String roleName = role.getName(); assert NativeRealmValidationUtil.validateRoleName(roleName, false) == null : "Role name was invalid or reserved: " + roleName; + assert false == role.hasRestriction() : "restriction is not supported for native roles"; securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { final XContentBuilder xContentBuilder; @@ -454,9 +455,9 @@ static RoleDescriptor transformRole(String id, BytesReference sourceBytes, Logge assert id.startsWith(ROLE_TYPE) : "[" + id + "] does not have role prefix"; final String name = id.substring(ROLE_TYPE.length() + 1); try { - // we pass true as last parameter because we do not want to reject permissions if the field permissions + // we pass true as allow2xFormat parameter because we do not want to reject permissions if the field permissions // are given in 2.x syntax - RoleDescriptor roleDescriptor = RoleDescriptor.parse(name, sourceBytes, true, XContentType.JSON); + RoleDescriptor roleDescriptor = RoleDescriptor.parse(name, sourceBytes, true, XContentType.JSON, false); final boolean dlsEnabled = Arrays.stream(roleDescriptor.getIndicesPrivileges()) .anyMatch(IndicesPrivileges::isUsingDocumentLevelSecurity); final boolean flsEnabled = Arrays.stream(roleDescriptor.getIndicesPrivileges()) @@ -481,13 +482,14 @@ static RoleDescriptor transformRole(String id, BytesReference sourceBytes, Logge roleDescriptor.getRunAs(), roleDescriptor.getMetadata(), transientMap, - roleDescriptor.getRemoteIndicesPrivileges() + roleDescriptor.getRemoteIndicesPrivileges(), + roleDescriptor.getRestriction() ); } else { return roleDescriptor; } } catch (Exception e) { - logger.error(() -> "error in the format of data for role [" + name + "]", e); + logger.error("error in the format of data for role [" + name + "]", e); return null; } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateCrossClusterApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateCrossClusterApiKeyAction.java index 9f003314c7898..469571798680b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateCrossClusterApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateCrossClusterApiKeyAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; @@ -26,6 +27,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.elasticsearch.xpack.security.Security.ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE; /** * Rest action to create an API key specific to cross cluster access via the dedicate remote cluster server port @@ -79,4 +81,16 @@ protected RestChannelConsumer innerPrepareRequest(final RestRequest request, fin new RestToXContentListener<>(channel) ); } + + @Override + protected Exception checkFeatureAvailable(RestRequest request) { + final Exception failedFeature = super.checkFeatureAvailable(request); + if (failedFeature != null) { + return failedFeature; + } else if (ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE.checkWithoutTracking(licenseState)) { + return null; + } else { + return LicenseUtils.newComplianceException(ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE.getName()); + } + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateCrossClusterApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateCrossClusterApiKeyAction.java index 7453609f6bbe0..0623009529984 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateCrossClusterApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateCrossClusterApiKeyAction.java @@ -9,6 +9,7 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; @@ -24,6 +25,7 @@ import static org.elasticsearch.rest.RestRequest.Method.PUT; import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.elasticsearch.xpack.security.Security.ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE; public final class RestUpdateCrossClusterApiKeyAction extends ApiKeyBaseRestHandler { @@ -64,5 +66,17 @@ protected RestChannelConsumer innerPrepareRequest(final RestRequest request, fin ); } + @Override + protected Exception checkFeatureAvailable(RestRequest request) { + final Exception failedFeature = super.checkFeatureAvailable(request); + if (failedFeature != null) { + return failedFeature; + } else if (ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE.checkWithoutTracking(licenseState)) { + return null; + } else { + return LicenseUtils.newComplianceException(ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE.getName()); + } + } + record Payload(CrossClusterApiKeyRoleDescriptorBuilder builder, Map metadata) {} } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/apikey/TransportCreateCrossClusterApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/apikey/TransportCreateCrossClusterApiKeyActionTests.java index 26b594bca9a01..e2d1fc3a24568 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/apikey/TransportCreateCrossClusterApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/apikey/TransportCreateCrossClusterApiKeyActionTests.java @@ -12,12 +12,9 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyResponse; import org.elasticsearch.xpack.core.security.action.apikey.CreateCrossClusterApiKeyRequest; -import org.elasticsearch.xpack.core.security.action.apikey.CrossClusterApiKeyRoleDescriptorBuilder; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationTestHelper; import org.elasticsearch.xpack.security.authc.ApiKeyService; @@ -25,41 +22,79 @@ import java.io.IOException; import java.util.Set; -import static org.elasticsearch.xcontent.json.JsonXContent.jsonXContent; +import static org.elasticsearch.xpack.core.security.action.apikey.CreateCrossClusterApiKeyRequestTests.randomCrossClusterApiKeyAccessField; +import static org.hamcrest.Matchers.containsString; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.when; public class TransportCreateCrossClusterApiKeyActionTests extends ESTestCase { - public void testApiKeyWillBeCreatedWithEmptyUserRoleDescriptors() throws IOException { - final ApiKeyService apiKeyService = mock(ApiKeyService.class); - final SecurityContext securityContext = mock(SecurityContext.class); - final Authentication authentication = AuthenticationTestHelper.builder().build(); - when(securityContext.getAuthentication()).thenReturn(authentication); - final var action = new TransportCreateCrossClusterApiKeyAction( + private ApiKeyService apiKeyService; + private SecurityContext securityContext; + private TransportCreateCrossClusterApiKeyAction action; + + @Override + public void setUp() throws Exception { + super.setUp(); + apiKeyService = mock(ApiKeyService.class); + securityContext = mock(SecurityContext.class); + action = new TransportCreateCrossClusterApiKeyAction( mock(TransportService.class), mock(ActionFilters.class), apiKeyService, securityContext ); + } - final XContentParser parser = jsonXContent.createParser(XContentParserConfiguration.EMPTY, """ - { - "search": [ {"names": ["idx"]} ] - }"""); + public void testApiKeyWillBeCreatedWithEmptyUserRoleDescriptors() throws IOException { + final Authentication authentication = randomValueOtherThanMany( + Authentication::isApiKey, + () -> AuthenticationTestHelper.builder().build() + ); + when(securityContext.getAuthentication()).thenReturn(authentication); - final CreateCrossClusterApiKeyRequest request = new CreateCrossClusterApiKeyRequest( + final var request = CreateCrossClusterApiKeyRequest.withNameAndAccess( randomAlphaOfLengthBetween(3, 8), - CrossClusterApiKeyRoleDescriptorBuilder.PARSER.parse(parser, null), - null, - null + randomCrossClusterApiKeyAccessField() ); - final PlainActionFuture future = new PlainActionFuture<>(); action.doExecute(mock(Task.class), request, future); verify(apiKeyService).createApiKey(same(authentication), same(request), eq(Set.of()), same(future)); } + + public void testAuthenticationIsRequired() throws IOException { + final var request = CreateCrossClusterApiKeyRequest.withNameAndAccess( + randomAlphaOfLengthBetween(3, 8), + randomCrossClusterApiKeyAccessField() + ); + final PlainActionFuture future = new PlainActionFuture<>(); + action.doExecute(mock(Task.class), request, future); + + final IllegalStateException e = expectThrows(IllegalStateException.class, future::actionGet); + assertThat(e.getMessage(), containsString("authentication is required")); + verifyNoInteractions(apiKeyService); + } + + public void testCannotCreateDerivedCrossClusterApiKey() throws IOException { + final Authentication authentication = AuthenticationTestHelper.builder().apiKey().build(); + when(securityContext.getAuthentication()).thenReturn(authentication); + + final var request = CreateCrossClusterApiKeyRequest.withNameAndAccess( + randomAlphaOfLengthBetween(3, 8), + randomCrossClusterApiKeyAccessField() + ); + final PlainActionFuture future = new PlainActionFuture<>(); + action.doExecute(mock(Task.class), request, future); + + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, future::actionGet); + assertThat( + e.getMessage(), + containsString("authentication via API key not supported: An API key cannot be used to create a cross-cluster API key") + ); + verifyNoInteractions(apiKeyService); + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index dc4e5bc1a1a41..12e3a7e1b5bd2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -104,6 +104,7 @@ import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.RoleDescriptorTests; +import org.elasticsearch.xpack.core.security.authz.RoleRestrictionTests; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilegeResolver; import org.elasticsearch.xpack.core.security.authz.store.RoleReference; @@ -145,6 +146,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -158,12 +160,14 @@ import static org.elasticsearch.xpack.core.security.authc.AuthenticationField.API_KEY_ID_KEY; import static org.elasticsearch.xpack.core.security.authc.AuthenticationField.API_KEY_METADATA_KEY; import static org.elasticsearch.xpack.core.security.authc.AuthenticationField.API_KEY_TYPE_KEY; +import static org.elasticsearch.xpack.core.security.authz.RoleDescriptor.WORKFLOWS_RESTRICTION_VERSION; import static org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR; import static org.elasticsearch.xpack.core.security.test.TestRestrictedIndices.INTERNAL_SECURITY_MAIN_INDEX_7; import static org.elasticsearch.xpack.security.Security.SECURITY_CRYPTO_THREAD_POOL_NAME; import static org.elasticsearch.xpack.security.authc.ApiKeyService.LEGACY_SUPERUSER_ROLE_DESCRIPTOR; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MAIN_ALIAS; import static org.hamcrest.Matchers.anEmptyMap; +import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -1120,6 +1124,36 @@ public void testParseRoleDescriptors() { apiKeyRoleType == RoleReference.ApiKeyRoleType.LIMITED_BY ? SUPERUSER_ROLE_DESCRIPTOR : LEGACY_SUPERUSER_ROLE_DESCRIPTOR ) ); + + // Tests parsing of role descriptor with and without workflows restriction. + roleBytes = new BytesArray(""" + { + "role_with_restriction":{ + "indices":[{"names":["books"],"privileges":["read"]}], + "restriction":{"workflows":["search_application"]} + }, + "role_without_restriction":{ + "indices":[{"names":["movies"],"privileges":["read"]}] + } + } + """); + roleDescriptors = service.parseRoleDescriptorsBytes(apiKeyId, roleBytes, apiKeyRoleType); + assertEquals(2, roleDescriptors.size()); + Map roleDescriptorsByName = roleDescriptors.stream() + .collect(Collectors.toMap(RoleDescriptor::getName, Function.identity())); + assertEquals(Set.of("role_with_restriction", "role_without_restriction"), roleDescriptorsByName.keySet()); + + RoleDescriptor roleWithRestriction = roleDescriptorsByName.get("role_with_restriction"); + assertThat(roleWithRestriction.hasRestriction(), equalTo(true)); + assertThat(roleWithRestriction.getRestriction().isEmpty(), equalTo(false)); + assertThat(roleWithRestriction.getRestriction().hasWorkflows(), equalTo(true)); + assertThat(roleWithRestriction.getRestriction().getWorkflows(), arrayContaining("search_application")); + + RoleDescriptor roleWithoutRestriction = roleDescriptorsByName.get("role_without_restriction"); + assertThat(roleWithoutRestriction.hasRestriction(), equalTo(false)); + assertThat(roleWithoutRestriction.getRestriction().isEmpty(), equalTo(true)); + assertThat(roleWithoutRestriction.getRestriction().hasWorkflows(), equalTo(false)); + assertThat(roleWithoutRestriction.getRestriction().getWorkflows(), nullValue()); } public void testApiKeyServiceDisabled() throws Exception { @@ -2276,7 +2310,7 @@ public void testGetApiKeyMetadata() throws IOException { public void testMaybeRemoveRemoteIndicesPrivilegesWithUnsupportedVersion() { final String apiKeyId = randomAlphaOfLengthBetween(5, 8); final Set userRoleDescriptors = Set.copyOf( - randomList(2, 5, () -> RoleDescriptorTests.randomRoleDescriptor(randomBoolean(), randomBoolean())) + randomList(2, 5, () -> RoleDescriptorTests.randomRoleDescriptor(randomBoolean(), randomBoolean(), randomBoolean())) ); // Selecting random unsupported version. @@ -2357,7 +2391,10 @@ public void testBuildDelimitedStringWithLimit() { } public void testCreateCrossClusterApiKeyMinVersionConstraint() { - final Authentication authentication = AuthenticationTestHelper.builder().build(); + final Authentication authentication = randomValueOtherThanMany( + Authentication::isApiKey, + () -> AuthenticationTestHelper.builder().build() + ); final AbstractCreateApiKeyRequest request = mock(AbstractCreateApiKeyRequest.class); when(request.getType()).thenReturn(ApiKey.Type.CROSS_CLUSTER); @@ -2490,6 +2527,118 @@ public void testValidateApiKeyTypeAndExpiration() throws IOException { assertThat(auth3.getMetadata(), hasEntry(API_KEY_TYPE_KEY, apiKeyDoc3.type.value())); } + public void testCreateOrUpdateApiKeyWithWorkflowsRestrictionForUnsupportedVersion() { + final Authentication authentication = AuthenticationTestHelper.builder().build(); + final ClusterService clusterService = mock(ClusterService.class); + when(clusterService.getClusterSettings()).thenReturn( + new ClusterSettings(Settings.EMPTY, Set.of(ApiKeyService.DELETE_RETENTION_PERIOD)) + ); + final ClusterState clusterState = mock(ClusterState.class); + when(clusterService.state()).thenReturn(clusterState); + final TransportVersion minTransportVersion = TransportVersionUtils.randomVersionBetween( + random(), + TransportVersion.MINIMUM_COMPATIBLE, + TransportVersionUtils.getPreviousVersion(WORKFLOWS_RESTRICTION_VERSION) + ); + when(clusterState.getMinTransportVersion()).thenReturn(minTransportVersion); + + final ApiKeyService service = new ApiKeyService( + Settings.EMPTY, + clock, + client, + securityIndex, + clusterService, + cacheInvalidatorRegistry, + threadPool + ); + + final List roleDescriptorsWithWorkflowsRestriction = randomList( + 1, + 3, + () -> randomRoleDescriptorWithWorkflowsRestriction() + ); + + final AbstractCreateApiKeyRequest createRequest = mock(AbstractCreateApiKeyRequest.class); + when(createRequest.getType()).thenReturn(ApiKey.Type.REST); + when(createRequest.getRoleDescriptors()).thenReturn(roleDescriptorsWithWorkflowsRestriction); + + final PlainActionFuture createFuture = new PlainActionFuture<>(); + service.createApiKey(authentication, createRequest, Set.of(), createFuture); + final IllegalArgumentException e1 = expectThrows(IllegalArgumentException.class, createFuture::actionGet); + assertThat( + e1.getMessage(), + containsString("all nodes must have transport version [8500005] or higher to support restrictions for API keys") + ); + + final BulkUpdateApiKeyRequest updateRequest = new BulkUpdateApiKeyRequest( + randomList(1, 3, () -> randomAlphaOfLengthBetween(3, 5)), + roleDescriptorsWithWorkflowsRestriction, + Map.of() + ); + final PlainActionFuture updateFuture = new PlainActionFuture<>(); + service.updateApiKeys(authentication, updateRequest, Set.of(), updateFuture); + final IllegalArgumentException e2 = expectThrows(IllegalArgumentException.class, createFuture::actionGet); + assertThat( + e2.getMessage(), + containsString("all nodes must have transport version [8500005] or higher to support restrictions for API keys") + ); + } + + public void testValidateOwnerUserRoleDescriptorsWithWorkflowsRestriction() { + final Authentication authentication = AuthenticationTestHelper.builder().build(); + final ClusterService clusterService = mock(ClusterService.class); + when(clusterService.getClusterSettings()).thenReturn( + new ClusterSettings(Settings.EMPTY, Set.of(ApiKeyService.DELETE_RETENTION_PERIOD)) + ); + final ClusterState clusterState = mock(ClusterState.class); + when(clusterService.state()).thenReturn(clusterState); + final TransportVersion minTransportVersion = TransportVersionUtils.randomVersionBetween( + random(), + WORKFLOWS_RESTRICTION_VERSION, + TransportVersion.CURRENT + ); + when(clusterState.getMinTransportVersion()).thenReturn(minTransportVersion); + final ApiKeyService service = new ApiKeyService( + Settings.EMPTY, + clock, + client, + securityIndex, + clusterService, + cacheInvalidatorRegistry, + threadPool + ); + + final Set userRoleDescriptorsWithWorkflowsRestriction = randomSet( + 1, + 2, + () -> randomRoleDescriptorWithWorkflowsRestriction() + ); + final List requestRoleDescriptors = randomList( + 0, + 1, + () -> RoleDescriptorTests.randomRoleDescriptor(randomBoolean(), randomBoolean(), randomBoolean()) + ); + + final AbstractCreateApiKeyRequest createRequest = mock(AbstractCreateApiKeyRequest.class); + when(createRequest.getType()).thenReturn(ApiKey.Type.REST); + when(createRequest.getRoleDescriptors()).thenReturn(requestRoleDescriptors); + + final PlainActionFuture createFuture = new PlainActionFuture<>(); + service.createApiKey(authentication, createRequest, userRoleDescriptorsWithWorkflowsRestriction, createFuture); + final IllegalArgumentException e1 = expectThrows(IllegalArgumentException.class, createFuture::actionGet); + assertThat(e1.getMessage(), containsString("owner user role descriptors must not include restriction")); + + final BulkUpdateApiKeyRequest updateRequest = new BulkUpdateApiKeyRequest( + randomList(1, 3, () -> randomAlphaOfLengthBetween(3, 5)), + requestRoleDescriptors, + Map.of() + ); + final PlainActionFuture updateFuture = new PlainActionFuture<>(); + service.updateApiKeys(authentication, updateRequest, userRoleDescriptorsWithWorkflowsRestriction, updateFuture); + final IllegalArgumentException e2 = expectThrows(IllegalArgumentException.class, createFuture::actionGet); + assertThat(e2.getMessage(), containsString("owner user role descriptors must not include restriction")); + } + private static RoleDescriptor randomRoleDescriptorWithRemoteIndexPrivileges() { return new RoleDescriptor( randomAlphaOfLengthBetween(3, 90), @@ -2500,7 +2649,23 @@ private static RoleDescriptor randomRoleDescriptorWithRemoteIndexPrivileges() { generateRandomStringArray(5, randomIntBetween(2, 8), false, true), RoleDescriptorTests.randomRoleDescriptorMetadata(randomBoolean()), Map.of(), - RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 3) + RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 3), + RoleRestrictionTests.randomWorkflowsRestriction(1, 3) + ); + } + + private static RoleDescriptor randomRoleDescriptorWithWorkflowsRestriction() { + return new RoleDescriptor( + randomAlphaOfLengthBetween(3, 90), + randomSubsetOf(ClusterPrivilegeResolver.names()).toArray(String[]::new), + RoleDescriptorTests.randomIndicesPrivileges(0, 3), + RoleDescriptorTests.randomApplicationPrivileges(), + RoleDescriptorTests.randomClusterPrivileges(), + generateRandomStringArray(5, randomIntBetween(2, 8), false, true), + RoleDescriptorTests.randomRoleDescriptorMetadata(randomBoolean()), + Map.of(), + null, + RoleRestrictionTests.randomWorkflowsRestriction(1, 3) ); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationServiceTests.java index 791697e11e953..81b3b38a88dd3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationServiceTests.java @@ -157,18 +157,7 @@ public void testExceptionProcessingRequestOnInvalidCrossClusterAccessSubjectInfo // Invalid internal user AuthenticationTestHelper.builder().internal(InternalUsers.XPACK_USER).build(), new RoleDescriptorsIntersection( - new RoleDescriptor( - "invalid_role", - new String[] { "all" }, - null, - null, - null, - null, - null, - null, - null - - ) + new RoleDescriptor("invalid_role", new String[] { "all" }, null, null, null, null, null, null, null, null) ) ) ); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceIntegTests.java index 41463f680b015..cd2442a182559 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceIntegTests.java @@ -75,7 +75,8 @@ public void testGetRoleDescriptorsIntersectionForRemoteCluster() throws IOExcept .privileges(shuffledList(List.of("read", "write"))) .build(), randomNonEmptySubsetOf(List.of(concreteClusterAlias, "*")).toArray(new String[0]) - ) } + ) }, + null ) ); final String nodeName = internalCluster().getRandomNodeName(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java index 1056746da9416..d8d96b5db136f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java @@ -8,6 +8,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.metadata.DataLifecycle; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.persistent.PersistentTasksService; @@ -118,6 +119,10 @@ public void testSwitchAndExecuteSecurityProfileUser() throws Exception { ); } + public void testSwitchWithDlmOrigin() throws Exception { + assertSwitchBasedOnOriginAndExecute(DataLifecycle.DLM_ORIGIN, InternalUsers.DLM_USER, randomTransportVersion()); + } + public void testSwitchAndExecuteXpackUser() throws Exception { for (String origin : Arrays.asList( ClientHelper.ML_ORIGIN, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java index 4ce8ed3ad1a4a..be176ec6e7575 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -50,6 +50,7 @@ import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequest.Empty; import org.elasticsearch.xcontent.XContentBuilder; @@ -956,7 +957,8 @@ public ClusterPermission.Builder buildPermission(ClusterPermission.Builder build null, new RoleDescriptor.RemoteIndicesPrivileges[] { RoleDescriptor.RemoteIndicesPrivileges.builder("remote-*", "remote").indices("abc-*", "xyz-*").privileges("read").build(), - RoleDescriptor.RemoteIndicesPrivileges.builder("remote-*").indices("remote-idx-1-*").privileges("read").build(), } + RoleDescriptor.RemoteIndicesPrivileges.builder("remote-*").indices("remote-idx-1-*").privileges("read").build(), }, + null ); ConfigurableClusterPrivilege ccp2 = new MockConfigurableClusterPrivilege() { @@ -983,7 +985,8 @@ public ClusterPermission.Builder buildPermission(ClusterPermission.Builder build null, new RoleDescriptor.RemoteIndicesPrivileges[] { RoleDescriptor.RemoteIndicesPrivileges.builder("*").indices("remote-idx-2-*").privileges("read").build(), - RoleDescriptor.RemoteIndicesPrivileges.builder("remote-*").indices("remote-idx-3-*").privileges("read").build() } + RoleDescriptor.RemoteIndicesPrivileges.builder("remote-*").indices("remote-idx-3-*").privileges("read").build() }, + null ); FieldPermissionsCache cache = new FieldPermissionsCache(Settings.EMPTY); @@ -2017,6 +2020,7 @@ public void testApiKeyAuthUsesApiKeyServiceWithScopedRole() throws Exception { } public void testGetRoleForCrossClusterAccessAuthentication() throws Exception { + assumeTrue("untrusted remote cluster feature flag must be enabled", TcpTransport.isUntrustedRemoteClusterEnabled()); final FileRolesStore fileRolesStore = mock(FileRolesStore.class); doCallRealMethod().when(fileRolesStore).accept(anySet(), anyActionListener()); final NativeRolesStore nativeRolesStore = mock(NativeRolesStore.class); @@ -2096,6 +2100,7 @@ public void testGetRoleForCrossClusterAccessAuthentication() throws Exception { null, null, null, + null, null ) ) @@ -2788,7 +2793,7 @@ private RoleDescriptor roleDescriptorWithIndicesPrivileges( final RoleDescriptor.RemoteIndicesPrivileges[] rips, final IndicesPrivileges[] ips ) { - return new RoleDescriptor(name, null, ips, null, null, null, null, null, rips); + return new RoleDescriptor(name, null, ips, null, null, null, null, null, rips, null); } private Role buildRole(final RoleDescriptor... roleDescriptors) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/FileRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/FileRolesStoreTests.java index 73c96c9e85b38..f8de5c5845c74 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/FileRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/FileRolesStoreTests.java @@ -617,7 +617,7 @@ public void testThatInvalidRoleDefinitions() throws Exception { assertThat(role, notNullValue()); assertThat(role.names(), equalTo(new String[] { "valid_role" })); - assertThat(entries, hasSize(6)); + assertThat(entries, hasSize(7)); assertThat( entries.get(0), startsWith("invalid role definition [fóóbár] in roles file [" + path.toAbsolutePath() + "]. invalid role name") @@ -627,6 +627,7 @@ public void testThatInvalidRoleDefinitions() throws Exception { assertThat(entries.get(3), startsWith("failed to parse role [role3]")); assertThat(entries.get(4), startsWith("failed to parse role [role4]")); assertThat(entries.get(5), startsWith("failed to parse indices privileges for role [role5]")); + assertThat(entries.get(6), startsWith("failed to parse role [role6]. unexpected field [restriction]")); } public void testThatRoleNamesDoesNotResolvePermissions() throws Exception { @@ -635,8 +636,8 @@ public void testThatRoleNamesDoesNotResolvePermissions() throws Exception { List events = CapturingLogger.output(logger.getName(), Level.ERROR); events.clear(); Set roleNames = FileRolesStore.parseFileForRoleNames(path, logger); - assertThat(roleNames.size(), is(6)); - assertThat(roleNames, containsInAnyOrder("valid_role", "role1", "role2", "role3", "role4", "role5")); + assertThat(roleNames.size(), is(7)); + assertThat(roleNames, containsInAnyOrder("valid_role", "role1", "role2", "role3", "role4", "role5", "role6")); assertThat(events, hasSize(1)); assertThat( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java index e8b5824507c36..9cdcf474c1b7e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.security.authz.store; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; @@ -47,12 +49,14 @@ import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; import org.elasticsearch.xpack.core.security.authz.RoleDescriptorTests; +import org.elasticsearch.xpack.core.security.authz.RoleRestrictionTests; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilegeResolver; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.elasticsearch.xpack.security.support.SecuritySystemIndices; import org.elasticsearch.xpack.security.test.SecurityTestUtils; import org.junit.After; import org.junit.Before; +import org.mockito.ArgumentCaptor; import org.mockito.Mockito; import java.io.IOException; @@ -70,7 +74,9 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class NativeRolesStoreTests extends ESTestCase { @@ -119,7 +125,8 @@ public void testRoleDescriptorWithFlsDlsLicensing() throws IOException { generateRandomStringArray(5, randomIntBetween(2, 8), true, true), RoleDescriptorTests.randomRoleDescriptorMetadata(ESTestCase.randomBoolean()), null, - TcpTransport.isUntrustedRemoteClusterEnabled() ? RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 2) : null + TcpTransport.isUntrustedRemoteClusterEnabled() ? RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 2) : null, + null ); assertFalse(flsRole.getTransientMetadata().containsKey("unlicensed_features")); @@ -134,7 +141,8 @@ public void testRoleDescriptorWithFlsDlsLicensing() throws IOException { generateRandomStringArray(5, randomIntBetween(2, 8), true, true), RoleDescriptorTests.randomRoleDescriptorMetadata(ESTestCase.randomBoolean()), null, - TcpTransport.isUntrustedRemoteClusterEnabled() ? RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 2) : null + TcpTransport.isUntrustedRemoteClusterEnabled() ? RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 2) : null, + null ); assertFalse(dlsRole.getTransientMetadata().containsKey("unlicensed_features")); @@ -154,7 +162,8 @@ public void testRoleDescriptorWithFlsDlsLicensing() throws IOException { generateRandomStringArray(5, randomIntBetween(2, 8), true, true), RoleDescriptorTests.randomRoleDescriptorMetadata(ESTestCase.randomBoolean()), null, - TcpTransport.isUntrustedRemoteClusterEnabled() ? RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 2) : null + TcpTransport.isUntrustedRemoteClusterEnabled() ? RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 2) : null, + null ); assertFalse(flsDlsRole.getTransientMetadata().containsKey("unlicensed_features")); @@ -167,7 +176,8 @@ public void testRoleDescriptorWithFlsDlsLicensing() throws IOException { generateRandomStringArray(5, randomIntBetween(2, 8), false, true), RoleDescriptorTests.randomRoleDescriptorMetadata(ESTestCase.randomBoolean()), null, - TcpTransport.isUntrustedRemoteClusterEnabled() ? RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 2) : null + TcpTransport.isUntrustedRemoteClusterEnabled() ? RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 2) : null, + null ); assertFalse(noFlsDlsRole.getTransientMetadata().containsKey("unlicensed_features")); @@ -235,6 +245,61 @@ public void testRoleDescriptorWithFlsDlsLicensing() throws IOException { assertThat(role, equalTo(noFlsDlsRole)); } + public void testTransformingRoleWithRestrictionFails() throws IOException { + MockLicenseState licenseState = mock(MockLicenseState.class); + when(licenseState.isAllowed(DOCUMENT_LEVEL_SECURITY_FEATURE)).thenReturn(false); + RoleDescriptor roleWithRestriction = new RoleDescriptor( + "role_with_restriction", + randomSubsetOf(ClusterPrivilegeResolver.names()).toArray(String[]::new), + new IndicesPrivileges[] { + IndicesPrivileges.builder() + .privileges("READ") + .indices(generateRandomStringArray(5, randomIntBetween(3, 9), false, false)) + .grantedFields("*") + .deniedFields(generateRandomStringArray(5, randomIntBetween(3, 9), false, false)) + .query( + randomBoolean() + ? "{ \"term\": { \"" + + randomAlphaOfLengthBetween(3, 24) + + "\" : \"" + + randomAlphaOfLengthBetween(3, 24) + + "\" }" + : "{ \"match_all\": {} }" + ) + .build() }, + RoleDescriptorTests.randomApplicationPrivileges(), + RoleDescriptorTests.randomClusterPrivileges(), + generateRandomStringArray(5, randomIntBetween(2, 8), true, true), + RoleDescriptorTests.randomRoleDescriptorMetadata(ESTestCase.randomBoolean()), + null, + TcpTransport.isUntrustedRemoteClusterEnabled() ? RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 2) : null, + RoleRestrictionTests.randomWorkflowsRestriction(1, 2) + ); + + XContentBuilder builder = roleWithRestriction.toXContent( + XContentBuilder.builder(XContentType.JSON.xContent()), + ToXContent.EMPTY_PARAMS + ); + + Logger mockedLogger = Mockito.mock(Logger.class); + BytesReference bytes = BytesReference.bytes(builder); + RoleDescriptor transformedRole = NativeRolesStore.transformRole( + RoleDescriptor.ROLE_TYPE + "-role_with_restriction", + bytes, + mockedLogger, + licenseState + ); + assertThat(transformedRole, nullValue()); + ArgumentCaptor exceptionCaptor = ArgumentCaptor.forClass(ElasticsearchParseException.class); + ArgumentCaptor messageCaptor = ArgumentCaptor.forClass(String.class); + verify(mockedLogger).error(messageCaptor.capture(), exceptionCaptor.capture()); + assertThat(messageCaptor.getValue(), containsString("error in the format of data for role [role_with_restriction]")); + assertThat( + exceptionCaptor.getValue().getMessage(), + containsString("failed to parse role [role_with_restriction]. unexpected field [restriction]") + ); + } + public void testPutOfRoleWithFlsDlsUnlicensed() throws IOException { final Client client = mock(Client.class); final ClusterService clusterService = mockClusterServiceWithMinNodeVersion(TransportVersion.CURRENT); @@ -357,7 +422,8 @@ void innerPutRole(final PutRoleRequest request, final RoleDescriptor role, final null, null, new RoleDescriptor.RemoteIndicesPrivileges[] { - RoleDescriptor.RemoteIndicesPrivileges.builder("remote").privileges("read").indices("index").build() } + RoleDescriptor.RemoteIndicesPrivileges.builder("remote").privileges("read").indices("index").build() }, + null ); PlainActionFuture future = new PlainActionFuture<>(); rolesStore.putRole(putRoleRequest, remoteIndicesRole, future); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateCrossClusterApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateCrossClusterApiKeyActionTests.java index d971e06f09481..d722eae69f883 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateCrossClusterApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateCrossClusterApiKeyActionTests.java @@ -7,11 +7,14 @@ package org.elasticsearch.xpack.security.rest.action.apikey; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.license.MockLicenseState; +import org.elasticsearch.rest.AbstractRestChannel; import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -20,10 +23,12 @@ import org.elasticsearch.xpack.core.security.action.apikey.CreateCrossClusterApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.CreateCrossClusterApiKeyRequest; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.security.Security; import org.mockito.ArgumentCaptor; import java.util.List; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -31,9 +36,21 @@ import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class RestCreateCrossClusterApiKeyActionTests extends ESTestCase { + private MockLicenseState licenseState; + private RestCreateCrossClusterApiKeyAction action; + + @Override + public void setUp() throws Exception { + super.setUp(); + licenseState = MockLicenseState.createMock(); + when(licenseState.isAllowed(Security.ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE)).thenReturn(true); + action = new RestCreateCrossClusterApiKeyAction(Settings.EMPTY, licenseState); + } + public void testCreateApiKeyRequestHasTypeOfCrossCluster() throws Exception { final FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withContent(new BytesArray(""" { @@ -49,7 +66,6 @@ public void testCreateApiKeyRequestHasTypeOfCrossCluster() throws Exception { } }"""), XContentType.JSON).build(); - final var action = new RestCreateCrossClusterApiKeyAction(Settings.EMPTY, mock(XPackLicenseState.class)); final NodeClient client = mock(NodeClient.class); action.handleRequest(restRequest, mock(RestChannel.class), client); @@ -80,4 +96,39 @@ public void testCreateApiKeyRequestHasTypeOfCrossCluster() throws Exception { ); assertThat(request.getMetadata(), nullValue()); } + + public void testLicenseEnforcement() throws Exception { + // Disallow by license + when(licenseState.isAllowed(Security.ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE)).thenReturn(false); + + final FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withContent(new BytesArray(""" + { + "name": "my-key", + "access": { + "search": [ + { + "names": [ + "logs" + ] + } + ] + } + }"""), XContentType.JSON).build(); + final SetOnce responseSetOnce = new SetOnce<>(); + final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + @Override + public void sendResponse(RestResponse restResponse) { + responseSetOnce.set(restResponse); + } + }; + + action.handleRequest(restRequest, restChannel, mock(NodeClient.class)); + + final RestResponse restResponse = responseSetOnce.get(); + assertThat(restResponse.status().getStatus(), equalTo(403)); + assertThat( + restResponse.content().utf8ToString(), + containsString("current license is non-compliant for [advanced-remote-cluster-security]") + ); + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateCrossClusterApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateCrossClusterApiKeyActionTests.java index fcedb5fa5e6da..f9fa9269c4ef1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateCrossClusterApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateCrossClusterApiKeyActionTests.java @@ -7,12 +7,15 @@ package org.elasticsearch.xpack.security.rest.action.apikey; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.license.MockLicenseState; +import org.elasticsearch.rest.AbstractRestChannel; import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -21,12 +24,14 @@ import org.elasticsearch.xpack.core.security.action.apikey.CrossClusterApiKeyRoleDescriptorBuilder; import org.elasticsearch.xpack.core.security.action.apikey.UpdateCrossClusterApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.UpdateCrossClusterApiKeyRequest; +import org.elasticsearch.xpack.security.Security; import org.mockito.ArgumentCaptor; import java.util.List; import java.util.Map; import static org.elasticsearch.xpack.core.security.action.apikey.CreateCrossClusterApiKeyRequestTests.randomCrossClusterApiKeyAccessField; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -34,9 +39,21 @@ import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class RestUpdateCrossClusterApiKeyActionTests extends ESTestCase { + private MockLicenseState licenseState; + private RestUpdateCrossClusterApiKeyAction action; + + @Override + public void setUp() throws Exception { + super.setUp(); + licenseState = MockLicenseState.createMock(); + when(licenseState.isAllowed(Security.ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE)).thenReturn(true); + action = new RestUpdateCrossClusterApiKeyAction(Settings.EMPTY, licenseState); + } + public void testUpdateHasTypeOfCrossCluster() throws Exception { final String id = randomAlphaOfLength(10); final String access = randomCrossClusterApiKeyAccessField(); @@ -49,7 +66,6 @@ public void testUpdateHasTypeOfCrossCluster() throws Exception { XContentType.JSON ).withParams(Map.of("id", id)).build(); - final var action = new RestUpdateCrossClusterApiKeyAction(Settings.EMPTY, mock(XPackLicenseState.class)); final NodeClient client = mock(NodeClient.class); action.handleRequest(restRequest, mock(RestChannel.class), client); @@ -68,4 +84,33 @@ public void testUpdateHasTypeOfCrossCluster() throws Exception { assertThat(request.getMetadata(), nullValue()); } } + + public void testLicenseEnforcement() throws Exception { + // Disallow by license + when(licenseState.isAllowed(Security.ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE)).thenReturn(false); + + final FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withContent( + new BytesArray(""" + { + "metadata": {} + }"""), + XContentType.JSON + ).withParams(Map.of("id", randomAlphaOfLength(10))).build(); + final SetOnce responseSetOnce = new SetOnce<>(); + final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + @Override + public void sendResponse(RestResponse restResponse) { + responseSetOnce.set(restResponse); + } + }; + + action.handleRequest(restRequest, restChannel, mock(NodeClient.class)); + + final RestResponse restResponse = responseSetOnce.get(); + assertThat(restResponse.status().getStatus(), equalTo(403)); + assertThat( + restResponse.content().utf8ToString(), + containsString("current license is non-compliant for [advanced-remote-cluster-security]") + ); + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java index e0a58d031b8d9..4aea7b1f97fce 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java @@ -79,6 +79,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; +import static org.elasticsearch.cluster.metadata.DataLifecycle.DLM_ORIGIN; import static org.elasticsearch.test.ActionListenerUtils.anyActionListener; import static org.elasticsearch.xpack.core.ClientHelper.ASYNC_SEARCH_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; @@ -429,7 +430,9 @@ public void testSetUserBasedOnActionOrigin() { TRANSFORM_ORIGIN, InternalUsers.XPACK_USER, ASYNC_SEARCH_ORIGIN, - InternalUsers.ASYNC_SEARCH_USER + InternalUsers.ASYNC_SEARCH_USER, + DLM_ORIGIN, + InternalUsers.DLM_USER ); final String origin = randomFrom(originToUserMap.keySet()); @@ -680,6 +683,7 @@ public void testSendWithCrossClusterAccessHeadersForSystemUserRegularAction() th } public void testSendWithCrossClusterAccessHeadersForSystemUserCcrInternalAction() throws Exception { + assumeTrue("untrusted remote cluster feature flag must be enabled", TcpTransport.isUntrustedRemoteClusterEnabled()); final String action = randomFrom( "internal:admin/ccr/restore/session/put", "internal:admin/ccr/restore/session/clear", @@ -695,6 +699,7 @@ public void testSendWithCrossClusterAccessHeadersForSystemUserCcrInternalAction( } public void testSendWithCrossClusterAccessHeadersForRegularUserRegularAction() throws Exception { + assumeTrue("untrusted remote cluster feature flag must be enabled", TcpTransport.isUntrustedRemoteClusterEnabled()); final Authentication authentication = randomValueOtherThanMany( authc -> authc.getAuthenticationType() == Authentication.AuthenticationType.INTERNAL, () -> AuthenticationTestHelper.builder().build() @@ -705,6 +710,7 @@ public void testSendWithCrossClusterAccessHeadersForRegularUserRegularAction() t } public void testSendWithCrossClusterAccessHeadersForRegularUserClusterStateAction() throws Exception { + assumeTrue("untrusted remote cluster feature flag must be enabled", TcpTransport.isUntrustedRemoteClusterEnabled()); final Authentication authentication = randomValueOtherThanMany( authc -> authc.getAuthenticationType() == Authentication.AuthenticationType.INTERNAL, () -> AuthenticationTestHelper.builder().build() diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransportTests.java index 9c23b39a4dc6e..000a8a90b14bc 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransportTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransportTests.java @@ -43,6 +43,7 @@ import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -681,4 +682,170 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th testThreadPool.shutdownNow(); } } + + public void testOptionsRequestsFailWith400AndNoAuthn() throws Exception { + final Settings settings = Settings.builder().put(env.settings()).build(); + AtomicReference badRequestCauseReference = new AtomicReference<>(); + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + logger.error("--> Unexpected dispatched request [" + FakeRestRequest.requestToString(channel.request()) + "]"); + throw new AssertionError("Unexpected dispatched request"); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + badRequestCauseReference.set(cause); + } + }; + final ThreadPool testThreadPool = new TestThreadPool(TEST_MOCK_TRANSPORT_THREAD_PREFIX); + try ( + Netty4HttpServerTransport transport = Security.getHttpServerTransportWithHeadersValidator( + settings, + new NetworkService(List.of()), + testThreadPool, + xContentRegistry(), + dispatcher, + randomClusterSettings(), + new SharedGroupFactory(settings), + Tracer.NOOP, + TLSConfig.noTLS(), + null, + (httpPreRequest, channel, listener) -> { + throw new AssertionError("should not be invoked for OPTIONS requests"); + }, + (httpPreRequest, channel, listener) -> { + throw new AssertionError("should not be invoked for OPTIONS requests with a body"); + } + ) + ) { + final ChannelHandler handler = transport.configureServerChannelHandler(); + final EmbeddedChannel ch = new EmbeddedChannel(handler); + // OPTIONS request with fixed length content written in one chunk + { + ByteBuf buf = ch.alloc().buffer(); + ByteBufUtil.copy(AsciiString.of("OPTIONS /url/whatever/fixed-length-single-chunk HTTP/1.1"), buf); + buf.writeByte(HttpConstants.LF); + if (randomBoolean()) { + ByteBufUtil.copy(AsciiString.of("Host: localhost"), buf); + buf.writeByte(HttpConstants.LF); + } + if (randomBoolean()) { + ByteBufUtil.copy(AsciiString.of("Accept: */*"), buf); + buf.writeByte(HttpConstants.LF); + } + if (randomBoolean()) { + ByteBufUtil.copy(AsciiString.of("Content-Encoding: gzip"), buf); + buf.writeByte(HttpConstants.LF); + } + if (randomBoolean()) { + ByteBufUtil.copy( + AsciiString.of("Content-Type: " + randomFrom("text/plain; charset=utf-8", "application/json; charset=utf-8")), + buf + ); + buf.writeByte(HttpConstants.LF); + } + String content = randomAlphaOfLengthBetween(4, 1024); + // having a "Content-Length" request header is what makes it "fixed length" + ByteBufUtil.copy(AsciiString.of("Content-Length: " + content.length()), buf); + buf.writeByte(HttpConstants.LF); + // end of headers + buf.writeByte(HttpConstants.LF); + ByteBufUtil.copy(AsciiString.of(content), buf); + // write everything in one single chunk + testThreadPool.generic().submit(() -> { + ch.writeInbound(buf); + ch.flushInbound(); + }).get(); + ch.runPendingTasks(); + Throwable badRequestCause = badRequestCauseReference.get(); + assertThat(badRequestCause, instanceOf(HttpHeadersValidationException.class)); + assertThat(badRequestCause.getCause(), instanceOf(ElasticsearchException.class)); + assertThat(((ElasticsearchException) badRequestCause.getCause()).status(), is(RestStatus.BAD_REQUEST)); + assertThat( + ((ElasticsearchException) badRequestCause.getCause()).getDetailedMessage(), + containsString("OPTIONS requests with a payload body are not supported") + ); + } + { + ByteBuf buf = ch.alloc().buffer(); + ByteBufUtil.copy(AsciiString.of("OPTIONS /url/whatever/chunked-transfer?encoding HTTP/1.1"), buf); + buf.writeByte(HttpConstants.LF); + if (randomBoolean()) { + ByteBufUtil.copy(AsciiString.of("Host: localhost"), buf); + buf.writeByte(HttpConstants.LF); + } + if (randomBoolean()) { + ByteBufUtil.copy(AsciiString.of("Accept: */*"), buf); + buf.writeByte(HttpConstants.LF); + } + if (randomBoolean()) { + ByteBufUtil.copy(AsciiString.of("Content-Encoding: gzip"), buf); + buf.writeByte(HttpConstants.LF); + } + if (randomBoolean()) { + ByteBufUtil.copy( + AsciiString.of("Content-Type: " + randomFrom("text/plain; charset=utf-8", "application/json; charset=utf-8")), + buf + ); + buf.writeByte(HttpConstants.LF); + } + // do not write a "Content-Length" header to make the request "variable length" + if (randomBoolean()) { + ByteBufUtil.copy(AsciiString.of("Transfer-Encoding: " + randomFrom("chunked", "gzip, chunked")), buf); + } else { + ByteBufUtil.copy(AsciiString.of("Transfer-Encoding: chunked"), buf); + } + buf.writeByte(HttpConstants.LF); + buf.writeByte(HttpConstants.LF); + // maybe append some chunks as well + String[] contentParts = randomArray(0, 4, String[]::new, () -> randomAlphaOfLengthBetween(1, 64)); + for (String content : contentParts) { + ByteBufUtil.copy(AsciiString.of(Integer.toHexString(content.length())), buf); + buf.writeByte(HttpConstants.CR); + buf.writeByte(HttpConstants.LF); + ByteBufUtil.copy(AsciiString.of(content), buf); + buf.writeByte(HttpConstants.CR); + buf.writeByte(HttpConstants.LF); + } + testThreadPool.generic().submit(() -> { + ch.writeInbound(buf); + ch.flushInbound(); + }).get(); + // append some more chunks as well + ByteBuf buf2 = ch.alloc().buffer(); + contentParts = randomArray(1, 4, String[]::new, () -> randomAlphaOfLengthBetween(1, 64)); + for (String content : contentParts) { + ByteBufUtil.copy(AsciiString.of(Integer.toHexString(content.length())), buf2); + buf2.writeByte(HttpConstants.CR); + buf2.writeByte(HttpConstants.LF); + ByteBufUtil.copy(AsciiString.of(content), buf2); + buf2.writeByte(HttpConstants.CR); + buf2.writeByte(HttpConstants.LF); + } + // finish chunked request + ByteBufUtil.copy(AsciiString.of("0"), buf2); + buf2.writeByte(HttpConstants.CR); + buf2.writeByte(HttpConstants.LF); + buf2.writeByte(HttpConstants.CR); + buf2.writeByte(HttpConstants.LF); + testThreadPool.generic().submit(() -> { + ch.writeInbound(buf2); + ch.flushInbound(); + }).get(); + ch.runPendingTasks(); + Throwable badRequestCause = badRequestCauseReference.get(); + assertThat(badRequestCause, instanceOf(HttpHeadersValidationException.class)); + assertThat(badRequestCause.getCause(), instanceOf(ElasticsearchException.class)); + assertThat(((ElasticsearchException) badRequestCause.getCause()).status(), is(RestStatus.BAD_REQUEST)); + assertThat( + ((ElasticsearchException) badRequestCause.getCause()).getDetailedMessage(), + containsString("OPTIONS requests with a payload body are not supported") + ); + } + } finally { + testThreadPool.shutdownNow(); + } + } + } diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/invalid_roles.yml b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/invalid_roles.yml index 1bef00f737185..21e9d87189cf0 100644 --- a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/invalid_roles.yml +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/invalid_roles.yml @@ -45,3 +45,16 @@ role5: - names: - 'idx1' privileges: [] + +# role includes unsupported workflows restriction +role6: + cluster: + - ALL + indices: + - names: idx + privileges: + - ALL + restriction: + workflows: + - workflow1 + - workflow2 diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/NodeSeenService.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/NodeSeenService.java index eadfbd167297d..3996707d7552f 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/NodeSeenService.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/NodeSeenService.java @@ -26,7 +26,6 @@ import java.util.Set; import java.util.stream.Collectors; -import static org.elasticsearch.cluster.metadata.NodesShutdownMetadata.getShutdownsOrEmpty; import static org.elasticsearch.core.Strings.format; /** @@ -72,7 +71,7 @@ public void clusterChanged(ClusterChangedEvent event) { return; } - final Set nodesNotPreviouslySeen = eventShutdownMetadata.getAllNodeMetadataMap() + final Set nodesNotPreviouslySeen = eventShutdownMetadata.getAll() .values() .stream() .filter(singleNodeShutdownMetadata -> singleNodeShutdownMetadata.getNodeSeen() == false) @@ -97,7 +96,7 @@ private static class SetSeenNodesShutdownExecutor implements ClusterStateTaskExe @Override public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { final var initialState = batchExecutionContext.initialState(); - var shutdownMetadata = new HashMap<>(getShutdownsOrEmpty(initialState).getAllNodeMetadataMap()); + var shutdownMetadata = new HashMap<>(initialState.metadata().nodeShutdowns().getAll()); var nodesNotPreviouslySeen = new HashSet<>(); for (final var taskContext : batchExecutionContext.taskContexts()) { @@ -112,7 +111,7 @@ public ClusterState execute(BatchExecutionContext batc return v; }); - if (shutdownMetadata.equals(getShutdownsOrEmpty(initialState).getAllNodeMetadataMap())) { + if (shutdownMetadata.equals(initialState.metadata().nodeShutdowns().getAll())) { return initialState; } diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeAction.java index 04c74ed1283d3..15250f0d8f6f8 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeAction.java @@ -36,8 +36,6 @@ import java.util.HashMap; import java.util.Map; -import static org.elasticsearch.cluster.metadata.NodesShutdownMetadata.getShutdownsOrEmpty; - public class TransportDeleteShutdownNodeAction extends AcknowledgedTransportMasterNodeAction { private static final Logger logger = LogManager.getLogger(TransportDeleteShutdownNodeAction.class); @@ -80,7 +78,7 @@ public void onFailure(Exception e) { class DeleteShutdownNodeExecutor implements ClusterStateTaskExecutor { @Override public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { - var shutdownMetadata = new HashMap<>(getShutdownsOrEmpty(batchExecutionContext.initialState()).getAllNodeMetadataMap()); + var shutdownMetadata = new HashMap<>(batchExecutionContext.initialState().metadata().nodeShutdowns().getAll()); boolean changed = false; for (final var taskContext : batchExecutionContext.taskContexts()) { var request = taskContext.getTask().request(); @@ -132,7 +130,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, A throws Exception { { // This block solely to ensure this NodesShutdownMetadata isn't accidentally used in the cluster state update task below NodesShutdownMetadata nodesShutdownMetadata = state.metadata().custom(NodesShutdownMetadata.TYPE); - if (nodesShutdownMetadata == null || nodesShutdownMetadata.getAllNodeMetadataMap().get(request.getNodeId()) == null) { + if (nodesShutdownMetadata == null || nodesShutdownMetadata.get(request.getNodeId()) == null) { throw new ResourceNotFoundException("node [" + request.getNodeId() + "] is not currently shutting down"); } } diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java index 5c1fcb26d98be..8fe5a3fa92a8d 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java @@ -42,7 +42,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Set; @@ -107,7 +106,7 @@ protected void masterOperation( if (nodesShutdownMetadata == null) { response = new GetShutdownStatusAction.Response(new ArrayList<>()); } else if (request.getNodeIds().length == 0) { - final List shutdownStatuses = nodesShutdownMetadata.getAllNodeMetadataMap() + final List shutdownStatuses = nodesShutdownMetadata.getAll() .values() .stream() .map( @@ -131,9 +130,8 @@ protected void masterOperation( response = new GetShutdownStatusAction.Response(shutdownStatuses); } else { new ArrayList<>(); - final Map nodeShutdownMetadataMap = nodesShutdownMetadata.getAllNodeMetadataMap(); final List shutdownStatuses = Arrays.stream(request.getNodeIds()) - .map(nodeShutdownMetadataMap::get) + .map(nodesShutdownMetadata::get) .filter(Objects::nonNull) .map( ns -> new SingleNodeShutdownStatus( @@ -199,7 +197,7 @@ static ShutdownShardMigrationStatus shardMigrationStatus( allocation.setDebugMode(RoutingAllocation.DebugMode.EXCLUDE_YES_DECISIONS); // We also need the set of node IDs which are currently shutting down. - Set shuttingDownNodes = currentState.metadata().nodeShutdowns().keySet(); + Set shuttingDownNodes = currentState.metadata().nodeShutdowns().getAll().keySet(); // Check if we have any unassigned primary shards that have this nodeId as their lastAllocatedNodeId var unassignedShards = currentState.getRoutingNodes() diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeAction.java index cd7557b9e223d..fff5f43d535cc 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeAction.java @@ -37,8 +37,6 @@ import java.util.Objects; import java.util.function.Predicate; -import static org.elasticsearch.cluster.metadata.NodesShutdownMetadata.getShutdownsOrEmpty; - public class TransportPutShutdownNodeAction extends AcknowledgedTransportMasterNodeAction { private static final Logger logger = LogManager.getLogger(TransportPutShutdownNodeAction.class); @@ -121,7 +119,7 @@ class PutShutdownNodeExecutor implements ClusterStateTaskExecutor batchExecutionContext) throws Exception { final var initialState = batchExecutionContext.initialState(); - var shutdownMetadata = new HashMap<>(getShutdownsOrEmpty(initialState).getAllNodeMetadataMap()); + var shutdownMetadata = new HashMap<>(initialState.metadata().nodeShutdowns().getAll()); Predicate nodeExistsPredicate = batchExecutionContext.initialState().getNodes()::nodeExists; boolean changed = false; for (final var taskContext : batchExecutionContext.taskContexts()) { @@ -171,7 +169,7 @@ public TransportPutShutdownNodeAction( @Override protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) { - if (isNoop(getShutdownsOrEmpty(state).getAllNodeMetadataMap(), request)) { + if (isNoop(state.getMetadata().nodeShutdowns().getAll(), request)) { listener.onResponse(AcknowledgedResponse.TRUE); return; } diff --git a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java index 00efa6d60a684..a9ec02a295887 100644 --- a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java +++ b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java @@ -70,6 +70,7 @@ import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.junit.After; @@ -662,6 +663,114 @@ public void testRecoveryIsCancelledAfterDeletingTheIndex() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/96427") + public void testCancelledRecoveryAbortsDownloadPromptly() throws Exception { + updateSetting(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS.getKey(), "1"); + + try { + internalCluster().ensureAtLeastNumDataNodes(2); + + String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + createIndex( + indexName, + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build() + ); + ensureGreen(indexName); + + int numDocs = randomIntBetween(1, 1000); + indexDocs(indexName, numDocs, numDocs); + + String repoName = "repo"; + createRepo(repoName, TestRepositoryPlugin.FILTER_TYPE); + createSnapshot(repoName, "snap", Collections.singletonList(indexName)); + + final AtomicBoolean isCancelled = new AtomicBoolean(); + final CountDownLatch readFromBlobCalledLatch = new CountDownLatch(1); + final CountDownLatch readFromBlobRespondLatch = new CountDownLatch(1); + + internalCluster().getInstances(TransportService.class) + .forEach( + transportService -> ((MockTransportService) transportService).addRequestHandlingBehavior( + PeerRecoverySourceService.Actions.START_RECOVERY, + (handler, request, channel, task) -> handler.messageReceived(request, new TransportChannel() { + @Override + public String getProfileName() { + return channel.getProfileName(); + } + + @Override + public String getChannelType() { + return channel.getChannelType(); + } + + @Override + public void sendResponse(TransportResponse response) { + fail("recovery should not succeed"); + } + + @Override + public void sendResponse(Exception exception) { + // Must not respond until the index deletion is applied on the target node, or else it will get an + // IllegalIndexShardStateException which it considers to be retryable, and will reset the recovery and + // generate a new `CancellableThreads` which is cancelled instead of the original `CancellableThreads`, + // permitting a subsequent read. + transportService.getThreadPool().generic().execute(() -> { + safeAwait(readFromBlobRespondLatch); + try { + channel.sendResponse(exception); + } catch (IOException e) { + throw new AssertionError("unexpected", e); + } + }); + } + }, task) + ) + ); + + FilterFsRepository.wrapReadBlobMethod((blobName, stream) -> { + if (blobName.startsWith("__")) { + return new FilterInputStream(stream) { + @Override + public int read() throws IOException { + beforeRead(); + return super.read(); + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + beforeRead(); + return super.read(b, off, len); + } + + private void beforeRead() { + assertFalse(isCancelled.get()); // should have no further reads once the index is deleted + readFromBlobCalledLatch.countDown(); + safeAwait(readFromBlobRespondLatch); + } + }; + } else { + return stream; + } + }); + + updateIndexSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1), indexName); + safeAwait(readFromBlobCalledLatch); + + assertAcked(client().admin().indices().prepareDelete(indexName).get()); + // cancellation flag is set when applying the cluster state that deletes the index, so no further waiting is necessary + isCancelled.set(true); + readFromBlobRespondLatch.countDown(); + + assertThat(indexExists(indexName), is(equalTo(false))); + assertBusy( + () -> internalCluster().getInstances(PeerRecoveryTargetService.class) + .forEach(peerRecoveryTargetService -> assertEquals(0, peerRecoveryTargetService.ongoingRecoveryCount())) + ); + } finally { + updateSetting(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS.getKey(), null); + } + } + public void testRecoveryAfterRestoreUsesSnapshots() throws Exception { String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); createIndex( diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/content/ParsedMediaType.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/content/ParsedMediaType.java index e1a100d3e7023..cb877c7a285ff 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/content/ParsedMediaType.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/content/ParsedMediaType.java @@ -19,7 +19,7 @@ */ class ParsedMediaType { // tchar pattern as defined by RFC7230 section 3.2.6 - private static final Pattern TCHAR_PATTERN = Pattern.compile("[a-zA-z0-9!#$%&'*+\\-.\\^_`|~]+"); + private static final Pattern TCHAR_PATTERN = Pattern.compile("[a-zA-Z0-9!#$%&'*+\\-.\\^_`|~]+"); private final String originalHeaderValue; private final String type; diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/50_cross_cluster.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/50_cross_cluster.yml new file mode 100644 index 0000000000000..6b1f5bc764151 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/50_cross_cluster.yml @@ -0,0 +1,343 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + + - do: + security.put_role: + name: "admin_role" + body: > + { + "cluster": ["manage_security"] + } + + - do: + security.put_user: + username: "admin_user" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "admin_role" ], + "full_name" : "Admin user" + } + +--- +teardown: + - do: + security.delete_role: + name: "admin_role" + ignore: 404 + + - do: + security.delete_user: + username: "admin_user" + ignore: 404 + +--- +"Test create a cross-cluster API key": + - skip: + features: transform_and_set + + - do: + headers: + Authorization: "Basic YWRtaW5fdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # admin_user + security.create_cross_cluster_api_key: + body: > + { + "name": "my-cc-api-key", + "expiration": "1d", + "access": { + "search": [ + { + "names": ["logs*"], + "query": { + "term": { "category": "shared" } + }, + "field_security": { + "grant": ["*"], + "except": ["private"] + } + } + ], + "replication": [ + { + "names": ["archive"], + "allow_restricted_indices": false + } + ] + }, + "metadata": { + "answer": 42, + "tag": "dev" + } + } + - match: { name: "my-cc-api-key" } + - is_true: id + - is_true: api_key + - is_true: expiration + - set: { id: api_key_id } + - transform_and_set: { login_creds: "#base64EncodeCredentials(id,api_key)" } + - match: { encoded: $login_creds } + + # Authenticate with it via the REST interface should fail + - do: + catch: unauthorized + headers: + Authorization: ApiKey ${login_creds} + security.authenticate: { } + + - match: { "error.type": "security_exception" } + - match: + "error.reason": "authentication expected API key type of [rest], but API key [${api_key_id}] has type [cross_cluster]" + + - do: + headers: + Authorization: "Basic YWRtaW5fdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # admin_user + security.get_api_key: + id: "$api_key_id" + with_limited_by: true + + - length: { "api_keys": 1 } + - match: { "api_keys.0.id": "$api_key_id" } + - match: { "api_keys.0.name": "my-cc-api-key" } + - match: { "api_keys.0.type": "cross_cluster" } + - is_false: api_keys.0.invalidated + - match: { "api_keys.0.metadata": { "answer": 42, "tag": "dev" } } + - match: { "api_keys.0.role_descriptors": { + "cross_cluster": { + "cluster": [ + "cross_cluster_search", + "cross_cluster_replication" + ], + "indices": [ + { + "names": [ + "logs*" + ], + "privileges": [ + "read", + "read_cross_cluster", + "view_index_metadata" + ], + "field_security": { + "grant": [ + "*" + ], + "except": [ + "private" + ] + }, + "query": "{\"term\":{\"category\":\"shared\"}}", + "allow_restricted_indices": false + }, + { + "names": [ + "archive" + ], + "privileges": [ + "cross_cluster_replication", + "cross_cluster_replication_internal" + ], + "allow_restricted_indices": false + } + ], + "applications": [ ], + "run_as": [ ], + "metadata": { }, + "transient_metadata": { + "enabled": true + } + } + } + } + - is_false: api_keys.0.limited_by + +--- +"Test update a cross-cluster API Key": + + - do: + headers: + Authorization: "Basic YWRtaW5fdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # admin_user + security.create_cross_cluster_api_key: + body: > + { + "name": "my-cc-api-key", + "access": { + "search": [ + { + "names": ["logs*"] + } + ] + }, + "metadata": { "tag": "dev" } + } + - is_true: id + - set: { id: api_key_id } + + - do: + headers: + Authorization: "Basic YWRtaW5fdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # admin_user + security.update_cross_cluster_api_key: + id: "$api_key_id" + body: > + { + "access": { + "replication": [ + { + "names": ["archive"] + } + ] + }, + "metadata": { "tag": "prod" } + } + - match: { updated: true } + + - do: + headers: + Authorization: "Basic YWRtaW5fdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # admin_user + security.get_api_key: + id: "$api_key_id" + with_limited_by: true + + - length: { "api_keys": 1 } + - match: { "api_keys.0.id": "$api_key_id" } + - match: { "api_keys.0.name": "my-cc-api-key" } + - match: { "api_keys.0.type": "cross_cluster" } + - is_false: api_keys.0.invalidated + - match: { "api_keys.0.metadata": { "tag": "prod" } } + - match: { "api_keys.0.role_descriptors": { + "cross_cluster": { + "cluster": [ + "cross_cluster_replication" + ], + "indices": [ + { + "names": [ + "archive" + ], + "privileges": [ + "cross_cluster_replication", + "cross_cluster_replication_internal" + ], + "allow_restricted_indices": false + } + ], + "applications": [ ], + "run_as": [ ], + "metadata": { }, + "transient_metadata": { + "enabled": true + } + } + } + } + - is_false: api_keys.0.limited_by + + # No-op update + - do: + headers: + Authorization: "Basic YWRtaW5fdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # admin_user + security.update_cross_cluster_api_key: + id: "$api_key_id" + body: > + { + "access": { + "replication": [ + { + "names": ["archive"] + } + ] + }, + "metadata": { "tag": "prod" } + } + - match: { updated: false } + +--- +"Test invalidate a cross-cluster API Key": + + - do: + headers: + Authorization: "Basic YWRtaW5fdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # admin_user + security.create_cross_cluster_api_key: + body: > + { + "name": "my-cc-api-key", + "access": { + "search": [ + { + "names": ["*"], + "allow_restricted_indices": true + } + ] + } + } + - is_true: id + - set: { id: api_key_id } + + - do: + headers: + Authorization: "Basic YWRtaW5fdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # admin_user + security.get_api_key: + id: "$api_key_id" + + - length: { "api_keys": 1 } + - match: { "api_keys.0.id": "$api_key_id" } + - match: { "api_keys.0.name": "my-cc-api-key" } + - match: { "api_keys.0.type": "cross_cluster" } + - is_false: api_keys.0.invalidated + - match: { "api_keys.0.metadata": { } } + - match: { "api_keys.0.role_descriptors": { + "cross_cluster": { + "cluster": [ + "cross_cluster_search" + ], + "indices": [ + { + "names": [ + "*" + ], + "privileges": [ + "read", + "read_cross_cluster", + "view_index_metadata" + ], + "allow_restricted_indices": true + } + ], + "applications": [ ], + "run_as": [ ], + "metadata": { }, + "transient_metadata": { + "enabled": true + } + } + } + } + - is_false: api_keys.0.limited_by + + - do: + headers: + Authorization: "Basic YWRtaW5fdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # admin_user + security.invalidate_api_key: + body: > + { + "ids": [ "${api_key_id}" ] + } + - length: { "invalidated_api_keys": 1 } + - match: { "invalidated_api_keys.0": "${api_key_id}" } + - length: { "previously_invalidated_api_keys": 0 } + - match: { "error_count": 0 } + + - do: + headers: + Authorization: "Basic YWRtaW5fdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # admin_user + security.get_api_key: + id: "$api_key_id" + + - length: { "api_keys": 1 } + - match: { "api_keys.0.id": "$api_key_id" } + - is_true: api_keys.0.invalidated diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/dlm/10_usage.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/dlm/10_usage.yml new file mode 100644 index 0000000000000..bad01b09b896b --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/dlm/10_usage.yml @@ -0,0 +1,81 @@ +--- +"Test DLM usage stats": + - skip: + version: "- 8.8.99" + reason: "the dlm stats were only added to the usage api in 8.9" + + - do: + xpack.usage: {} + + - match: { data_lifecycle.available: true } + - match: { data_lifecycle.enabled: true } + - match: { data_lifecycle.count: 0 } + - match: { data_lifecycle.default_rollover_used: true } + - match: { data_lifecycle.retention.minimum_millis: 0 } + - match: { data_lifecycle.retention.maximum_millis: 0 } + - match: { data_lifecycle.retention.average_millis: 0 } + + - do: + indices.put_index_template: + name: my-template-1 + body: + index_patterns: [foo-*] + template: + mappings: + properties: + '@timestamp': + type: date + lifecycle: + data_retention: 10d + data_stream: {} + + - do: + indices.create_data_stream: + name: foo-foobar + - is_true: acknowledged + + - do: + indices.put_index_template: + name: my-template-2 + body: + index_patterns: [bar-*] + template: + mappings: + properties: + '@timestamp': + type: date + lifecycle: + data_retention: 5d + data_stream: {} + + - do: + indices.create_data_stream: + name: bar-foobar + - is_true: acknowledged + + - do: + xpack.usage: {} + + - match: { data_lifecycle.available: true } + - match: { data_lifecycle.enabled: true } + - match: { data_lifecycle.count: 2 } + - match: { data_lifecycle.default_rollover_used: true } + - match: { data_lifecycle.retention.minimum_millis: 432000000 } + - match: { data_lifecycle.retention.maximum_millis: 864000000 } + - match: { data_lifecycle.retention.average_millis: 648000000 } + + - do: + indices.delete_data_stream: + name: foo-foobar + - is_true: acknowledged + + - do: + xpack.usage: {} + + - match: { data_lifecycle.available: true } + - match: { data_lifecycle.enabled: true } + - match: { data_lifecycle.count: 1 } + - match: { data_lifecycle.default_rollover_used: true } + - match: { data_lifecycle.retention.minimum_millis: 432000000 } + - match: { data_lifecycle.retention.maximum_millis: 432000000 } + - match: { data_lifecycle.retention.average_millis: 432000000 } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/privileges/11_builtin.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/privileges/11_builtin.yml index 7c43834e70ae9..e17d429e35248 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/privileges/11_builtin.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/privileges/11_builtin.yml @@ -16,4 +16,4 @@ setup: # I would much prefer we could just check that specific entries are in the array, but we don't have # an assertion for that - length: { "cluster" : 48 } - - length: { "index" : 21 } + - length: { "index" : 22 } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java index 2cb96550256ba..cbe233f0d911d 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java @@ -68,8 +68,6 @@ public class WatcherScheduleEngineBenchmark { .build(); public static void main(String[] args) throws Exception { - System.setProperty("es.logger.prefix", ""); - String[] engines = new String[] { "ticker", "scheduler" }; int numWatches = 2000; int benchTime = 60000; @@ -211,7 +209,7 @@ public void run() { NodesStatsResponse response = client.admin().cluster().prepareNodesStats().setThreadPool(true).get(); for (NodeStats nodeStats : response.getNodes()) { for (ThreadPoolStats.Stats threadPoolStats : nodeStats.getThreadPool()) { - if ("watcher".equals(threadPoolStats.getName())) { + if ("watcher".equals(threadPoolStats.name())) { stats.setWatcherThreadPoolStats(threadPoolStats); } } @@ -347,8 +345,8 @@ public void printThreadStats() throws IOException { "%10s | %13s | %12d | %13d \n", name, ByteSizeValue.ofBytes(avgHeapUsed), - watcherThreadPoolStats.getRejected(), - watcherThreadPoolStats.getCompleted() + watcherThreadPoolStats.rejected(), + watcherThreadPoolStats.completed() ); } diff --git a/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/BinaryDvConfirmedAutomatonQuery.java b/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/BinaryDvConfirmedAutomatonQuery.java index f2836e10e4ed8..608e5f1972373 100644 --- a/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/BinaryDvConfirmedAutomatonQuery.java +++ b/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/BinaryDvConfirmedAutomatonQuery.java @@ -9,7 +9,6 @@ import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.index.DocValues; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.ConstantScoreScorer; import org.apache.lucene.search.ConstantScoreWeight; @@ -55,8 +54,8 @@ private BinaryDvConfirmedAutomatonQuery(Query approximation, String field, Strin } @Override - public Query rewrite(IndexReader reader) throws IOException { - Query approxRewrite = approxQuery.rewrite(reader); + public Query rewrite(IndexSearcher searcher) throws IOException { + Query approxRewrite = approxQuery.rewrite(searcher); if (approxQuery != approxRewrite) { return new BinaryDvConfirmedAutomatonQuery(approxRewrite, field, matchPattern, bytesMatcher); } diff --git a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java index f963c55f6602f..b9133ff3bce73 100644 --- a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java +++ b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java @@ -259,7 +259,7 @@ public void testTooBigQueryField() throws IOException { iw.close(); // Test wildcard query - String queryString = randomABString((BooleanQuery.getMaxClauseCount() * 2) + 1); + String queryString = randomABString((IndexSearcher.getMaxClauseCount() * 2) + 1); Query wildcardFieldQuery = wildcardFieldType.fieldType().wildcardQuery(queryString, null, null); TopDocs wildcardFieldTopDocs = searcher.search(wildcardFieldQuery, 10, Sort.INDEXORDER); assertThat(wildcardFieldTopDocs.totalHits.value, equalTo(0L)); @@ -898,7 +898,7 @@ protected Query getSimplifiedApproximationQuery(Query approximationQuery) throws int numRewrites = 0; int maxNumRewrites = 100; for (; numRewrites < maxNumRewrites; numRewrites++) { - Query newApprox = approximationQuery.rewrite(rewriteReader); + Query newApprox = approximationQuery.rewrite(new IndexSearcher(rewriteReader)); if (newApprox == approximationQuery) { break; } diff --git a/x-pack/qa/mixed-tier-cluster/build.gradle b/x-pack/qa/mixed-tier-cluster/build.gradle index bf05be45e18a0..72a91a1d14d23 100644 --- a/x-pack/qa/mixed-tier-cluster/build.gradle +++ b/x-pack/qa/mixed-tier-cluster/build.gradle @@ -1,6 +1,7 @@ apply plugin: 'elasticsearch.legacy-java-rest-test' apply plugin: 'elasticsearch.bwc-test' +import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask @@ -29,6 +30,7 @@ BuildParams.bwcVersions.withWireCompatible(v -> v.onOrAfter("7.9.0") && nodes."${baseName}-1".setting 'node.roles', '["data_content", "data_hot"]' } nodes."${baseName}-2".setting 'node.roles', '["master"]' + requiresFeature 'es.dlm_feature_flag_enabled', Version.fromString("8.8.0") } tasks.register("${baseName}#mixedClusterTest", StandaloneRestIntegTestTask) { diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/ApiKeyBackwardsCompatibilityIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/ApiKeyBackwardsCompatibilityIT.java index 5d06e1ea8f5f4..fafe033597b72 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/ApiKeyBackwardsCompatibilityIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/ApiKeyBackwardsCompatibilityIT.java @@ -348,7 +348,7 @@ private Map getRestClientByVersion() throws IOException { } private static RoleDescriptor randomRoleDescriptor(boolean includeRemoteIndices) { - final Set excludedPrivileges = Set.of("cross_cluster_replication", "cross_cluster_replication_internal"); + final Set excludedPrivileges = Set.of("cross_cluster_replication", "cross_cluster_replication_internal", "manage_dlm"); return new RoleDescriptor( randomAlphaOfLengthBetween(3, 90), randomSubsetOf(Set.of("all", "monitor", "none")).toArray(String[]::new), @@ -358,7 +358,8 @@ private static RoleDescriptor randomRoleDescriptor(boolean includeRemoteIndices) generateRandomStringArray(5, randomIntBetween(2, 8), false, true), RoleDescriptorTests.randomRoleDescriptorMetadata(false), Map.of(), - includeRemoteIndices ? RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 3, excludedPrivileges) : null + includeRemoteIndices ? RoleDescriptorTests.randomRemoteIndicesPrivileges(1, 3, excludedPrivileges) : null, + null ); } }