diff --git a/compiler-rs/Cargo.lock b/compiler-rs/Cargo.lock index bb56551417..9ca0a2f0eb 100644 --- a/compiler-rs/Cargo.lock +++ b/compiler-rs/Cargo.lock @@ -245,8 +245,8 @@ dependencies = [ "derive_more 2.0.1", "icu_segmenter", "indexmap", + "itertools", "openapiv3", - "serde_ignored", "serde_json", "tracing", "tracing-subscriber", diff --git a/compiler-rs/clients_schema/src/lib.rs b/compiler-rs/clients_schema/src/lib.rs index a4c5841949..8fcc6d57e8 100644 --- a/compiler-rs/clients_schema/src/lib.rs +++ b/compiler-rs/clients_schema/src/lib.rs @@ -264,7 +264,7 @@ pub struct Availability { pub visibility: Option, } -/// The availability of an +/// The availability of an endpoint, field or parameter pub type Availabilities = IndexMap; pub trait AvailabilityFilter: Fn(&Option) -> bool {} diff --git a/compiler-rs/clients_schema_to_openapi/Cargo.toml b/compiler-rs/clients_schema_to_openapi/Cargo.toml index 5277450797..727b43ef8b 100644 --- a/compiler-rs/clients_schema_to_openapi/Cargo.toml +++ b/compiler-rs/clients_schema_to_openapi/Cargo.toml @@ -10,7 +10,7 @@ clients_schema = {path="../clients_schema"} argh = { workspace = true } derive_more = { version = "2", features = ["from_str"] } serde_json = { workspace = true } -serde_ignored = { workspace = true } +itertools = { workspace = true } icu_segmenter = { workspace = true } openapiv3 = { workspace = true } anyhow = { workspace = true } diff --git a/compiler-rs/clients_schema_to_openapi/src/paths.rs b/compiler-rs/clients_schema_to_openapi/src/paths.rs index 87292c54dc..c232515526 100644 --- a/compiler-rs/clients_schema_to_openapi/src/paths.rs +++ b/compiler-rs/clients_schema_to_openapi/src/paths.rs @@ -19,10 +19,11 @@ use std::collections::HashMap; use std::fmt::Write; use anyhow::{anyhow, bail}; -use clients_schema::{Property}; +use clients_schema::{Privileges, Property}; use indexmap::IndexMap; use indexmap::indexmap; use icu_segmenter::SentenceSegmenter; +use itertools::Itertools; use openapiv3::{ MediaType, Parameter, ParameterData, ParameterSchemaOrContent, PathItem, PathStyle, Paths, QueryStyle, ReferenceOr, RequestBody, Response, Responses, StatusCode, Example @@ -253,6 +254,13 @@ pub fn add_endpoint( parameters.append(&mut query_params.clone()); let sum_desc = split_summary_desc(&endpoint.description); + + let privilege_desc = add_privileges(&endpoint.privileges); + + let full_desc = match (sum_desc.description, privilege_desc) { + (Some(a), Some(b)) => Some(a+ &b), + (opt_a, opt_b) => opt_a.or(opt_b) + }; // add the x-state extension for availability let mut extensions = crate::availability_as_extensions(&endpoint.availability, &tac.config.flavor); @@ -300,7 +308,7 @@ pub fn add_endpoint( vec![namespace.to_string()] }, summary: sum_desc.summary, - description: sum_desc.description, + description: full_desc, external_docs: tac.convert_external_docs(endpoint), // external_docs: None, // Need values that differ from client purposes operation_id: None, // set in clone_operation below with operation_counter @@ -444,6 +452,26 @@ fn split_summary_desc(desc: &str) -> SplitDesc{ } } +fn add_privileges(privileges: &Option) -> Option{ + if let Some(privs) = privileges { + let mut result = "\n ##Required authorization\n".to_string(); + if !privs.index.is_empty() { + result += "* Index privileges: "; + result += &privs.index.iter() + .map(|a| format!("`{a}`")) + .join(","); + } + if !privs.cluster.is_empty() { + result += "* Cluster privileges: "; + result += &privs.cluster.iter() + .map(|a| format!("`{a}`")) + .join(","); + } + return Some(result) + } + None +} + #[derive(PartialEq,Debug)] struct SplitDesc { summary: Option, diff --git a/compiler-rs/compiler-wasm-lib/pkg/compiler_wasm_lib_bg.wasm b/compiler-rs/compiler-wasm-lib/pkg/compiler_wasm_lib_bg.wasm index e3b49bc082..9b26532ad4 100644 Binary files a/compiler-rs/compiler-wasm-lib/pkg/compiler_wasm_lib_bg.wasm and b/compiler-rs/compiler-wasm-lib/pkg/compiler_wasm_lib_bg.wasm differ diff --git a/output/openapi/elasticsearch-openapi.json b/output/openapi/elasticsearch-openapi.json index ca3d4b476b..26c52f9e09 100644 --- a/output/openapi/elasticsearch-openapi.json +++ b/output/openapi/elasticsearch-openapi.json @@ -127,7 +127,7 @@ "search" ], "summary": "Get the async search status", - "description": "Get the status of a previously submitted async search request given its identifier, without retrieving search results.\nIf the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to:\n\n* The user or API key that submitted the original async search request.\n* Users that have the `monitor` cluster privilege or greater privileges.", + "description": "Get the status of a previously submitted async search request given its identifier, without retrieving search results.\nIf the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to:\n\n* The user or API key that submitted the original async search request.\n* Users that have the `monitor` cluster privilege or greater privileges.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "async-search-status", "parameters": [ { @@ -1049,7 +1049,7 @@ "cat" ], "summary": "Get aliases", - "description": "Get the cluster's index aliases, including filter and routing information.\nThis API does not return data stream aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.", + "description": "Get the cluster's index aliases, including filter and routing information.\nThis API does not return data stream aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "cat-aliases", "parameters": [ { @@ -1085,7 +1085,7 @@ "cat" ], "summary": "Get aliases", - "description": "Get the cluster's index aliases, including filter and routing information.\nThis API does not return data stream aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.", + "description": "Get the cluster's index aliases, including filter and routing information.\nThis API does not return data stream aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "cat-aliases-1", "parameters": [ { @@ -1124,7 +1124,7 @@ "cat" ], "summary": "Get shard allocation information", - "description": "Get a snapshot of the number of shards allocated to each data node and their disk space.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", + "description": "Get a snapshot of the number of shards allocated to each data node and their disk space.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "cat-allocation", "parameters": [ { @@ -1163,7 +1163,7 @@ "cat" ], "summary": "Get shard allocation information", - "description": "Get a snapshot of the number of shards allocated to each data node and their disk space.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", + "description": "Get a snapshot of the number of shards allocated to each data node and their disk space.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "cat-allocation-1", "parameters": [ { @@ -1205,7 +1205,7 @@ "cat" ], "summary": "Get component templates", - "description": "Get information about component templates in a cluster.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the get component template API.", + "description": "Get information about component templates in a cluster.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the get component template API.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "cat-component-templates", "parameters": [ { @@ -1241,7 +1241,7 @@ "cat" ], "summary": "Get component templates", - "description": "Get information about component templates in a cluster.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the get component template API.", + "description": "Get information about component templates in a cluster.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the get component template API.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "cat-component-templates-1", "parameters": [ { @@ -1280,7 +1280,7 @@ "cat" ], "summary": "Get a document count", - "description": "Get quick access to a document count for a data stream, an index, or an entire cluster.\nThe document count only includes live documents, not deleted documents which have not yet been removed by the merge process.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the count API.", + "description": "Get quick access to a document count for a data stream, an index, or an entire cluster.\nThe document count only includes live documents, not deleted documents which have not yet been removed by the merge process.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the count API.\n ##Required authorization\n* Index privileges: `read`", "operationId": "cat-count", "parameters": [ { @@ -1310,7 +1310,7 @@ "cat" ], "summary": "Get a document count", - "description": "Get quick access to a document count for a data stream, an index, or an entire cluster.\nThe document count only includes live documents, not deleted documents which have not yet been removed by the merge process.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the count API.", + "description": "Get quick access to a document count for a data stream, an index, or an entire cluster.\nThe document count only includes live documents, not deleted documents which have not yet been removed by the merge process.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the count API.\n ##Required authorization\n* Index privileges: `read`", "operationId": "cat-count-1", "parameters": [ { @@ -1343,7 +1343,7 @@ "cat" ], "summary": "Get field data cache information", - "description": "Get the amount of heap memory currently used by the field data cache on every data node in the cluster.\n\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the nodes stats API.", + "description": "Get the amount of heap memory currently used by the field data cache on every data node in the cluster.\n\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the nodes stats API.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "cat-fielddata", "parameters": [ { @@ -1379,7 +1379,7 @@ "cat" ], "summary": "Get field data cache information", - "description": "Get the amount of heap memory currently used by the field data cache on every data node in the cluster.\n\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the nodes stats API.", + "description": "Get the amount of heap memory currently used by the field data cache on every data node in the cluster.\n\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the nodes stats API.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "cat-fielddata-1", "parameters": [ { @@ -1418,7 +1418,7 @@ "cat" ], "summary": "Get the cluster health status", - "description": "IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the cluster health API.\nThis API is often used to check malfunctioning clusters.\nTo help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats:\n`HH:MM:SS`, which is human-readable but includes no date information;\n`Unix epoch time`, which is machine-sortable and includes date information.\nThe latter format is useful for cluster recoveries that take multiple days.\nYou can use the cat health API to verify cluster health across multiple nodes.\nYou also can use the API to track the recovery of a large cluster over a longer period of time.", + "description": "IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the cluster health API.\nThis API is often used to check malfunctioning clusters.\nTo help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats:\n`HH:MM:SS`, which is human-readable but includes no date information;\n`Unix epoch time`, which is machine-sortable and includes date information.\nThe latter format is useful for cluster recoveries that take multiple days.\nYou can use the cat health API to verify cluster health across multiple nodes.\nYou also can use the API to track the recovery of a large cluster over a longer period of time.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "cat-health", "parameters": [ { @@ -1521,7 +1521,7 @@ "cat" ], "summary": "Get index information", - "description": "Get high-level information about indices in a cluster, including backing indices for data streams.\n\nUse this request to get the following information for each index in a cluster:\n- shard count\n- document count\n- deleted document count\n- primary store size\n- total store size of all shards, including shard replicas\n\nThese metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents.\nTo get an accurate count of Elasticsearch documents, use the cat count or count APIs.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use an index endpoint.", + "description": "Get high-level information about indices in a cluster, including backing indices for data streams.\n\nUse this request to get the following information for each index in a cluster:\n- shard count\n- document count\n- deleted document count\n- primary store size\n- total store size of all shards, including shard replicas\n\nThese metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents.\nTo get an accurate count of Elasticsearch documents, use the cat count or count APIs.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use an index endpoint.\n ##Required authorization\n* Index privileges: `monitor`* Cluster privileges: `monitor`", "operationId": "cat-indices", "parameters": [ { @@ -1572,7 +1572,7 @@ "cat" ], "summary": "Get index information", - "description": "Get high-level information about indices in a cluster, including backing indices for data streams.\n\nUse this request to get the following information for each index in a cluster:\n- shard count\n- document count\n- deleted document count\n- primary store size\n- total store size of all shards, including shard replicas\n\nThese metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents.\nTo get an accurate count of Elasticsearch documents, use the cat count or count APIs.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use an index endpoint.", + "description": "Get high-level information about indices in a cluster, including backing indices for data streams.\n\nUse this request to get the following information for each index in a cluster:\n- shard count\n- document count\n- deleted document count\n- primary store size\n- total store size of all shards, including shard replicas\n\nThese metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents.\nTo get an accurate count of Elasticsearch documents, use the cat count or count APIs.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use an index endpoint.\n ##Required authorization\n* Index privileges: `monitor`* Cluster privileges: `monitor`", "operationId": "cat-indices-1", "parameters": [ { @@ -1626,7 +1626,7 @@ "cat" ], "summary": "Get master node information", - "description": "Get information about the master node, including the ID, bound IP address, and name.\n\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "description": "Get information about the master node, including the ID, bound IP address, and name.\n\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "cat-master", "parameters": [ { @@ -1706,7 +1706,7 @@ "cat" ], "summary": "Get data frame analytics jobs", - "description": "Get configuration and usage information about data frame analytics jobs.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get data frame analytics jobs statistics API.", + "description": "Get configuration and usage information about data frame analytics jobs.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get data frame analytics jobs statistics API.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "cat-ml-data-frame-analytics", "parameters": [ { @@ -1745,7 +1745,7 @@ "cat" ], "summary": "Get data frame analytics jobs", - "description": "Get configuration and usage information about data frame analytics jobs.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get data frame analytics jobs statistics API.", + "description": "Get configuration and usage information about data frame analytics jobs.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get data frame analytics jobs statistics API.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "cat-ml-data-frame-analytics-1", "parameters": [ { @@ -1787,7 +1787,7 @@ "cat" ], "summary": "Get datafeeds", - "description": "Get configuration and usage information about datafeeds.\nThis API returns a maximum of 10,000 datafeeds.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage`\ncluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get datafeed statistics API.", + "description": "Get configuration and usage information about datafeeds.\nThis API returns a maximum of 10,000 datafeeds.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage`\ncluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get datafeed statistics API.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "cat-ml-datafeeds", "parameters": [ { @@ -1823,7 +1823,7 @@ "cat" ], "summary": "Get datafeeds", - "description": "Get configuration and usage information about datafeeds.\nThis API returns a maximum of 10,000 datafeeds.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage`\ncluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get datafeed statistics API.", + "description": "Get configuration and usage information about datafeeds.\nThis API returns a maximum of 10,000 datafeeds.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage`\ncluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get datafeed statistics API.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "cat-ml-datafeeds-1", "parameters": [ { @@ -1862,7 +1862,7 @@ "cat" ], "summary": "Get anomaly detection jobs", - "description": "Get configuration and usage information for anomaly detection jobs.\nThis API returns a maximum of 10,000 jobs.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`,\n`monitor`, `manage_ml`, or `manage` cluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get anomaly detection job statistics API.", + "description": "Get configuration and usage information for anomaly detection jobs.\nThis API returns a maximum of 10,000 jobs.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`,\n`monitor`, `manage_ml`, or `manage` cluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get anomaly detection job statistics API.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "cat-ml-jobs", "parameters": [ { @@ -1901,7 +1901,7 @@ "cat" ], "summary": "Get anomaly detection jobs", - "description": "Get configuration and usage information for anomaly detection jobs.\nThis API returns a maximum of 10,000 jobs.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`,\n`monitor`, `manage_ml`, or `manage` cluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get anomaly detection job statistics API.", + "description": "Get configuration and usage information for anomaly detection jobs.\nThis API returns a maximum of 10,000 jobs.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`,\n`monitor`, `manage_ml`, or `manage` cluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get anomaly detection job statistics API.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "cat-ml-jobs-1", "parameters": [ { @@ -1943,7 +1943,7 @@ "cat" ], "summary": "Get trained models", - "description": "Get configuration and usage information about inference trained models.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get trained models statistics API.", + "description": "Get configuration and usage information about inference trained models.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get trained models statistics API.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "cat-ml-trained-models", "parameters": [ { @@ -1988,7 +1988,7 @@ "cat" ], "summary": "Get trained models", - "description": "Get configuration and usage information about inference trained models.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get trained models statistics API.", + "description": "Get configuration and usage information about inference trained models.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get trained models statistics API.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "cat-ml-trained-models-1", "parameters": [ { @@ -2036,7 +2036,7 @@ "cat" ], "summary": "Get node attribute information", - "description": "Get information about custom node attributes.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "description": "Get information about custom node attributes.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "cat-nodeattrs", "parameters": [ { @@ -2122,7 +2122,7 @@ "cat" ], "summary": "Get node information", - "description": "Get information about the nodes in a cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "description": "Get information about the nodes in a cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "cat-nodes", "parameters": [ { @@ -2245,7 +2245,7 @@ "cat" ], "summary": "Get pending task information", - "description": "Get information about cluster-level changes that have not yet taken effect.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API.", + "description": "Get information about cluster-level changes that have not yet taken effect.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "cat-pending-tasks", "parameters": [ { @@ -2335,7 +2335,7 @@ "cat" ], "summary": "Get plugin information", - "description": "Get a list of plugins running on each node of a cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "description": "Get a list of plugins running on each node of a cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "cat-plugins", "parameters": [ { @@ -2425,7 +2425,7 @@ "cat" ], "summary": "Get shard recovery information", - "description": "Get information about ongoing and completed shard recoveries.\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing.\nFor data streams, the API returns information about the stream’s backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API.", + "description": "Get information about ongoing and completed shard recoveries.\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing.\nFor data streams, the API returns information about the stream’s backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API.\n ##Required authorization\n* Index privileges: `monitor`* Cluster privileges: `monitor`", "operationId": "cat-recovery", "parameters": [ { @@ -2470,7 +2470,7 @@ "cat" ], "summary": "Get shard recovery information", - "description": "Get information about ongoing and completed shard recoveries.\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing.\nFor data streams, the API returns information about the stream’s backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API.", + "description": "Get information about ongoing and completed shard recoveries.\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing.\nFor data streams, the API returns information about the stream’s backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API.\n ##Required authorization\n* Index privileges: `monitor`* Cluster privileges: `monitor`", "operationId": "cat-recovery-1", "parameters": [ { @@ -2518,7 +2518,7 @@ "cat" ], "summary": "Get snapshot repository information", - "description": "Get a list of snapshot repositories for a cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API.", + "description": "Get a list of snapshot repositories for a cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API.\n ##Required authorization\n* Cluster privileges: `monitor_snapshot`", "operationId": "cat-repositories", "parameters": [ { @@ -2598,7 +2598,7 @@ "cat" ], "summary": "Get segment information", - "description": "Get low-level information about the Lucene segments in index shards.\nFor data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API.", + "description": "Get low-level information about the Lucene segments in index shards.\nFor data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API.\n ##Required authorization\n* Index privileges: `monitor`* Cluster privileges: `monitor`", "operationId": "cat-segments", "parameters": [ { @@ -2637,7 +2637,7 @@ "cat" ], "summary": "Get segment information", - "description": "Get low-level information about the Lucene segments in index shards.\nFor data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API.", + "description": "Get low-level information about the Lucene segments in index shards.\nFor data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API.\n ##Required authorization\n* Index privileges: `monitor`* Cluster privileges: `monitor`", "operationId": "cat-segments-1", "parameters": [ { @@ -2679,7 +2679,7 @@ "cat" ], "summary": "Get shard information", - "description": "Get information about the shards in a cluster.\nFor data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", + "description": "Get information about the shards in a cluster.\nFor data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.\n ##Required authorization\n* Index privileges: `monitor`* Cluster privileges: `monitor`", "operationId": "cat-shards", "parameters": [ { @@ -2718,7 +2718,7 @@ "cat" ], "summary": "Get shard information", - "description": "Get information about the shards in a cluster.\nFor data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", + "description": "Get information about the shards in a cluster.\nFor data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.\n ##Required authorization\n* Index privileges: `monitor`* Cluster privileges: `monitor`", "operationId": "cat-shards-1", "parameters": [ { @@ -2760,7 +2760,7 @@ "cat" ], "summary": "Get snapshot information", - "description": "Get information about the snapshots stored in one or more repositories.\nA snapshot is a backup of an index or running Elasticsearch cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API.", + "description": "Get information about the snapshots stored in one or more repositories.\nA snapshot is a backup of an index or running Elasticsearch cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API.\n ##Required authorization\n* Cluster privileges: `monitor_snapshot`", "operationId": "cat-snapshots", "parameters": [ { @@ -2799,7 +2799,7 @@ "cat" ], "summary": "Get snapshot information", - "description": "Get information about the snapshots stored in one or more repositories.\nA snapshot is a backup of an index or running Elasticsearch cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API.", + "description": "Get information about the snapshots stored in one or more repositories.\nA snapshot is a backup of an index or running Elasticsearch cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API.\n ##Required authorization\n* Cluster privileges: `monitor_snapshot`", "operationId": "cat-snapshots-1", "parameters": [ { @@ -2841,7 +2841,7 @@ "cat" ], "summary": "Get task information", - "description": "Get information about tasks currently running in the cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API.", + "description": "Get information about tasks currently running in the cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "cat-tasks", "parameters": [ { @@ -2977,7 +2977,7 @@ "cat" ], "summary": "Get index template information", - "description": "Get information about the index templates in a cluster.\nYou can use index templates to apply index settings and field mappings to new indices at creation.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API.", + "description": "Get information about the index templates in a cluster.\nYou can use index templates to apply index settings and field mappings to new indices at creation.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "cat-templates", "parameters": [ { @@ -3013,7 +3013,7 @@ "cat" ], "summary": "Get index template information", - "description": "Get information about the index templates in a cluster.\nYou can use index templates to apply index settings and field mappings to new indices at creation.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API.", + "description": "Get information about the index templates in a cluster.\nYou can use index templates to apply index settings and field mappings to new indices at creation.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "cat-templates-1", "parameters": [ { @@ -3052,7 +3052,7 @@ "cat" ], "summary": "Get thread pool statistics", - "description": "Get thread pool statistics for each node in a cluster.\nReturned information includes all built-in thread pools and custom thread pools.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "description": "Get thread pool statistics for each node in a cluster.\nReturned information includes all built-in thread pools and custom thread pools.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "cat-thread-pool", "parameters": [ { @@ -3091,7 +3091,7 @@ "cat" ], "summary": "Get thread pool statistics", - "description": "Get thread pool statistics for each node in a cluster.\nReturned information includes all built-in thread pools and custom thread pools.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "description": "Get thread pool statistics for each node in a cluster.\nReturned information includes all built-in thread pools and custom thread pools.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "cat-thread-pool-1", "parameters": [ { @@ -3133,7 +3133,7 @@ "cat" ], "summary": "Get transform information", - "description": "Get configuration and usage information about transforms.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get transform statistics API.", + "description": "Get configuration and usage information about transforms.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get transform statistics API.\n ##Required authorization\n* Cluster privileges: `monitor_transform`", "operationId": "cat-transforms", "parameters": [ { @@ -3175,7 +3175,7 @@ "cat" ], "summary": "Get transform information", - "description": "Get configuration and usage information about transforms.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get transform statistics API.", + "description": "Get configuration and usage information about transforms.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get transform statistics API.\n ##Required authorization\n* Cluster privileges: `monitor_transform`", "operationId": "cat-transforms-1", "parameters": [ { @@ -3220,7 +3220,7 @@ "ccr" ], "summary": "Get auto-follow patterns", - "description": "Get cross-cluster replication auto-follow patterns.", + "description": "Get cross-cluster replication auto-follow patterns.\n ##Required authorization\n* Cluster privileges: `manage_ccr`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/tools/cross-cluster-replication/manage-auto-follow-patterns" }, @@ -3386,7 +3386,7 @@ "ccr" ], "summary": "Delete auto-follow patterns", - "description": "Delete a collection of cross-cluster replication auto-follow patterns.", + "description": "Delete a collection of cross-cluster replication auto-follow patterns.\n ##Required authorization\n* Cluster privileges: `manage_ccr`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/tools/cross-cluster-replication/manage-auto-follow-patterns" }, @@ -3602,7 +3602,7 @@ "ccr" ], "summary": "Get follower information", - "description": "Get information about all cross-cluster replication follower indices.\nFor example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused.", + "description": "Get information about all cross-cluster replication follower indices.\nFor example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused.\n ##Required authorization\n* Cluster privileges: `monitor`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/tools/cross-cluster-replication" }, @@ -3680,7 +3680,7 @@ "ccr" ], "summary": "Get follower stats", - "description": "Get cross-cluster replication follower stats.\nThe API returns shard-level stats about the \"following tasks\" associated with each shard for the specified indices.", + "description": "Get cross-cluster replication follower stats.\nThe API returns shard-level stats about the \"following tasks\" associated with each shard for the specified indices.\n ##Required authorization\n* Cluster privileges: `monitor`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/tools/cross-cluster-replication" }, @@ -3852,7 +3852,7 @@ "ccr" ], "summary": "Get auto-follow patterns", - "description": "Get cross-cluster replication auto-follow patterns.", + "description": "Get cross-cluster replication auto-follow patterns.\n ##Required authorization\n* Cluster privileges: `manage_ccr`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/tools/cross-cluster-replication/manage-auto-follow-patterns" }, @@ -3882,7 +3882,7 @@ "ccr" ], "summary": "Pause an auto-follow pattern", - "description": "Pause a cross-cluster replication auto-follow pattern.\nWhen the API returns, the auto-follow pattern is inactive.\nNew indices that are created on the remote cluster and match the auto-follow patterns are ignored.\n\nYou can resume auto-following with the resume auto-follow pattern API.\nWhen it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns.\nRemote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim.", + "description": "Pause a cross-cluster replication auto-follow pattern.\nWhen the API returns, the auto-follow pattern is inactive.\nNew indices that are created on the remote cluster and match the auto-follow patterns are ignored.\n\nYou can resume auto-following with the resume auto-follow pattern API.\nWhen it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns.\nRemote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim.\n ##Required authorization\n* Cluster privileges: `manage_ccr`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/tools/cross-cluster-replication/manage-auto-follow-patterns" }, @@ -3943,7 +3943,7 @@ "ccr" ], "summary": "Pause a follower", - "description": "Pause a cross-cluster replication follower index.\nThe follower index will not fetch any additional operations from the leader index.\nYou can resume following with the resume follower API.\nYou can pause and resume a follower index to change the configuration of the following task.", + "description": "Pause a cross-cluster replication follower index.\nThe follower index will not fetch any additional operations from the leader index.\nYou can resume following with the resume follower API.\nYou can pause and resume a follower index to change the configuration of the following task.\n ##Required authorization\n* Cluster privileges: `manage_ccr`", "operationId": "ccr-pause-follow", "parameters": [ { @@ -4001,7 +4001,7 @@ "ccr" ], "summary": "Resume an auto-follow pattern", - "description": "Resume a cross-cluster replication auto-follow pattern that was paused.\nThe auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster.\nRemote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim.", + "description": "Resume a cross-cluster replication auto-follow pattern that was paused.\nThe auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster.\nRemote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim.\n ##Required authorization\n* Cluster privileges: `manage_ccr`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/tools/cross-cluster-replication/manage-auto-follow-patterns" }, @@ -4170,7 +4170,7 @@ "ccr" ], "summary": "Get cross-cluster replication stats", - "description": "This API returns stats about auto-following and the same shard-level stats as the get follower stats API.", + "description": "This API returns stats about auto-following and the same shard-level stats as the get follower stats API.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "ccr-stats", "parameters": [ { @@ -4239,7 +4239,7 @@ "ccr" ], "summary": "Unfollow an index", - "description": "Convert a cross-cluster replication follower index to a regular index.\nThe API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication.\nThe follower index must be paused and closed before you call the unfollow API.\n\n> info\n> Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation.", + "description": "Convert a cross-cluster replication follower index to a regular index.\nThe API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication.\nThe follower index must be paused and closed before you call the unfollow API.\n\n> info\n> Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation.\n ##Required authorization\n* Index privileges: `manage_follow_index`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/tools/cross-cluster-replication" }, @@ -4300,7 +4300,7 @@ "search" ], "summary": "Run a scrolling search", - "description": "IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT).\n\nThe scroll API gets large sets of results from a single scrolling search request.\nTo get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter.\nThe `scroll` parameter indicates how long Elasticsearch should retain the search context for the request.\nThe search response returns a scroll ID in the `_scroll_id` response body parameter.\nYou can then use the scroll ID with the scroll API to retrieve the next batch of results for the request.\nIf the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search.\n\nYou can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context.\n\nIMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.", + "description": "IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT).\n\nThe scroll API gets large sets of results from a single scrolling search request.\nTo get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter.\nThe `scroll` parameter indicates how long Elasticsearch should retain the search context for the request.\nThe search response returns a scroll ID in the `_scroll_id` response body parameter.\nYou can then use the scroll ID with the scroll API to retrieve the next batch of results for the request.\nIf the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search.\n\nYou can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context.\n\nIMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/rest-apis/paginate-search-results#scroll-search-results" }, @@ -4337,7 +4337,7 @@ "search" ], "summary": "Run a scrolling search", - "description": "IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT).\n\nThe scroll API gets large sets of results from a single scrolling search request.\nTo get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter.\nThe `scroll` parameter indicates how long Elasticsearch should retain the search context for the request.\nThe search response returns a scroll ID in the `_scroll_id` response body parameter.\nYou can then use the scroll ID with the scroll API to retrieve the next batch of results for the request.\nIf the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search.\n\nYou can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context.\n\nIMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.", + "description": "IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT).\n\nThe scroll API gets large sets of results from a single scrolling search request.\nTo get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter.\nThe `scroll` parameter indicates how long Elasticsearch should retain the search context for the request.\nThe search response returns a scroll ID in the `_scroll_id` response body parameter.\nYou can then use the scroll ID with the scroll API to retrieve the next batch of results for the request.\nIf the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search.\n\nYou can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context.\n\nIMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/rest-apis/paginate-search-results#scroll-search-results" }, @@ -4402,7 +4402,7 @@ "search" ], "summary": "Run a scrolling search", - "description": "IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT).\n\nThe scroll API gets large sets of results from a single scrolling search request.\nTo get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter.\nThe `scroll` parameter indicates how long Elasticsearch should retain the search context for the request.\nThe search response returns a scroll ID in the `_scroll_id` response body parameter.\nYou can then use the scroll ID with the scroll API to retrieve the next batch of results for the request.\nIf the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search.\n\nYou can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context.\n\nIMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.", + "description": "IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT).\n\nThe scroll API gets large sets of results from a single scrolling search request.\nTo get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter.\nThe `scroll` parameter indicates how long Elasticsearch should retain the search context for the request.\nThe search response returns a scroll ID in the `_scroll_id` response body parameter.\nYou can then use the scroll ID with the scroll API to retrieve the next batch of results for the request.\nIf the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search.\n\nYou can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context.\n\nIMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/rest-apis/paginate-search-results#scroll-search-results" }, @@ -4442,7 +4442,7 @@ "search" ], "summary": "Run a scrolling search", - "description": "IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT).\n\nThe scroll API gets large sets of results from a single scrolling search request.\nTo get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter.\nThe `scroll` parameter indicates how long Elasticsearch should retain the search context for the request.\nThe search response returns a scroll ID in the `_scroll_id` response body parameter.\nYou can then use the scroll ID with the scroll API to retrieve the next batch of results for the request.\nIf the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search.\n\nYou can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context.\n\nIMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.", + "description": "IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT).\n\nThe scroll API gets large sets of results from a single scrolling search request.\nTo get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter.\nThe `scroll` parameter indicates how long Elasticsearch should retain the search context for the request.\nThe search response returns a scroll ID in the `_scroll_id` response body parameter.\nYou can then use the scroll ID with the scroll API to retrieve the next batch of results for the request.\nIf the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search.\n\nYou can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context.\n\nIMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/rest-apis/paginate-search-results#scroll-search-results" }, @@ -4657,7 +4657,7 @@ "indices" ], "summary": "Get component templates", - "description": "Get information about component templates.", + "description": "Get information about component templates.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "cluster-get-component-template-1", "parameters": [ { @@ -4688,7 +4688,7 @@ "indices" ], "summary": "Create or update a component template", - "description": "Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nAn index template can be composed of multiple component templates.\nTo use a component template, specify it in an index template’s `composed_of` list.\nComponent templates are only applied to new data streams and indices as part of a matching index template.\n\nSettings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.\n\nComponent templates are only used during index creation.\nFor data streams, this includes data stream creation and the creation of a stream’s backing indices.\nChanges to component templates do not affect existing indices, including a stream’s backing indices.\n\nYou can use C-style `/* *\\/` block comments in component templates.\nYou can include comments anywhere in the request body except before the opening curly bracket.\n\n**Applying component templates**\n\nYou cannot directly apply a component template to a data stream or index.\nTo be applied, a component template must be included in an index template's `composed_of` list.", + "description": "Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nAn index template can be composed of multiple component templates.\nTo use a component template, specify it in an index template’s `composed_of` list.\nComponent templates are only applied to new data streams and indices as part of a matching index template.\n\nSettings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.\n\nComponent templates are only used during index creation.\nFor data streams, this includes data stream creation and the creation of a stream’s backing indices.\nChanges to component templates do not affect existing indices, including a stream’s backing indices.\n\nYou can use C-style `/* *\\/` block comments in component templates.\nYou can include comments anywhere in the request body except before the opening curly bracket.\n\n**Applying component templates**\n\nYou cannot directly apply a component template to a data stream or index.\nTo be applied, a component template must be included in an index template's `composed_of` list.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "cluster-put-component-template", "parameters": [ { @@ -4722,7 +4722,7 @@ "indices" ], "summary": "Create or update a component template", - "description": "Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nAn index template can be composed of multiple component templates.\nTo use a component template, specify it in an index template’s `composed_of` list.\nComponent templates are only applied to new data streams and indices as part of a matching index template.\n\nSettings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.\n\nComponent templates are only used during index creation.\nFor data streams, this includes data stream creation and the creation of a stream’s backing indices.\nChanges to component templates do not affect existing indices, including a stream’s backing indices.\n\nYou can use C-style `/* *\\/` block comments in component templates.\nYou can include comments anywhere in the request body except before the opening curly bracket.\n\n**Applying component templates**\n\nYou cannot directly apply a component template to a data stream or index.\nTo be applied, a component template must be included in an index template's `composed_of` list.", + "description": "Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nAn index template can be composed of multiple component templates.\nTo use a component template, specify it in an index template’s `composed_of` list.\nComponent templates are only applied to new data streams and indices as part of a matching index template.\n\nSettings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.\n\nComponent templates are only used during index creation.\nFor data streams, this includes data stream creation and the creation of a stream’s backing indices.\nChanges to component templates do not affect existing indices, including a stream’s backing indices.\n\nYou can use C-style `/* *\\/` block comments in component templates.\nYou can include comments anywhere in the request body except before the opening curly bracket.\n\n**Applying component templates**\n\nYou cannot directly apply a component template to a data stream or index.\nTo be applied, a component template must be included in an index template's `composed_of` list.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "cluster-put-component-template-1", "parameters": [ { @@ -4756,7 +4756,7 @@ "indices" ], "summary": "Delete component templates", - "description": "Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.", + "description": "Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "cluster-delete-component-template", "parameters": [ { @@ -4968,7 +4968,7 @@ "indices" ], "summary": "Get component templates", - "description": "Get information about component templates.", + "description": "Get information about component templates.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "cluster-get-component-template", "parameters": [ { @@ -4998,7 +4998,7 @@ "cluster" ], "summary": "Get cluster-wide settings", - "description": "By default, it returns only settings that have been explicitly defined.", + "description": "By default, it returns only settings that have been explicitly defined.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "cluster-get-settings", "parameters": [ { @@ -5204,7 +5204,7 @@ "cluster" ], "summary": "Get the cluster health status", - "description": "You can also use the API to get the health status of only specified data streams and indices.\nFor data streams, the API retrieves the health status of the stream’s backing indices.\n\nThe cluster health status is: green, yellow or red.\nOn the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated.\nThe index level status is controlled by the worst shard status.\n\nOne of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level.\nThe cluster status is controlled by the worst index status.", + "description": "You can also use the API to get the health status of only specified data streams and indices.\nFor data streams, the API retrieves the health status of the stream’s backing indices.\n\nThe cluster health status is: green, yellow or red.\nOn the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated.\nThe index level status is controlled by the worst shard status.\n\nOne of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level.\nThe cluster status is controlled by the worst index status.\n ##Required authorization\n* Cluster privileges: `monitor`,`manage`", "operationId": "cluster-health", "parameters": [ { @@ -5261,7 +5261,7 @@ "cluster" ], "summary": "Get the cluster health status", - "description": "You can also use the API to get the health status of only specified data streams and indices.\nFor data streams, the API retrieves the health status of the stream’s backing indices.\n\nThe cluster health status is: green, yellow or red.\nOn the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated.\nThe index level status is controlled by the worst shard status.\n\nOne of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level.\nThe cluster status is controlled by the worst index status.", + "description": "You can also use the API to get the health status of only specified data streams and indices.\nFor data streams, the API retrieves the health status of the stream’s backing indices.\n\nThe cluster health status is: green, yellow or red.\nOn the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated.\nThe index level status is controlled by the worst shard status.\n\nOne of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level.\nThe cluster status is controlled by the worst index status.\n ##Required authorization\n* Cluster privileges: `monitor`,`manage`", "operationId": "cluster-health-1", "parameters": [ { @@ -5380,7 +5380,7 @@ "cluster" ], "summary": "Get the pending cluster tasks", - "description": "Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect.\n\nNOTE: This API returns a list of any pending updates to the cluster state.\nThese are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests.\nHowever, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API.", + "description": "Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect.\n\nNOTE: This API returns a list of any pending updates to the cluster state.\nThese are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests.\nHowever, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "cluster-pending-tasks", "parameters": [ { @@ -5436,7 +5436,7 @@ "cluster" ], "summary": "Get remote cluster information", - "description": "Get information about configured remote clusters.\nThe API returns connection and endpoint information keyed by the configured remote cluster alias.\n\n> info\n> This API returns information that reflects current state on the local cluster.\n> The `connected` field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it.\n> Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster.\n> To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the [resolve cluster endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster).", + "description": "Get information about configured remote clusters.\nThe API returns connection and endpoint information keyed by the configured remote cluster alias.\n\n> info\n> This API returns information that reflects current state on the local cluster.\n> The `connected` field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it.\n> Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster.\n> To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the [resolve cluster endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster).\n ##Required authorization\n* Cluster privileges: `monitor`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/cross-cluster-search" }, @@ -5598,7 +5598,7 @@ "cluster" ], "summary": "Get the cluster state", - "description": "Get comprehensive information about the state of the cluster.\n\nThe cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster.\n\nThe elected master node ensures that every node in the cluster has a copy of the same cluster state.\nThis API lets you retrieve a representation of this internal state for debugging or diagnostic purposes.\nYou may need to consult the Elasticsearch source code to determine the precise meaning of the response.\n\nBy default the API will route requests to the elected master node since this node is the authoritative source of cluster states.\nYou can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter.\n\nElasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data.\nIf you use this API repeatedly, your cluster may become unstable.\n\nWARNING: The response is a representation of an internal data structure.\nIts format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version.\nDo not query this API using external monitoring tools.\nInstead, obtain the information you require using other more stable cluster APIs.", + "description": "Get comprehensive information about the state of the cluster.\n\nThe cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster.\n\nThe elected master node ensures that every node in the cluster has a copy of the same cluster state.\nThis API lets you retrieve a representation of this internal state for debugging or diagnostic purposes.\nYou may need to consult the Elasticsearch source code to determine the precise meaning of the response.\n\nBy default the API will route requests to the elected master node since this node is the authoritative source of cluster states.\nYou can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter.\n\nElasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data.\nIf you use this API repeatedly, your cluster may become unstable.\n\nWARNING: The response is a representation of an internal data structure.\nIts format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version.\nDo not query this API using external monitoring tools.\nInstead, obtain the information you require using other more stable cluster APIs.\n ##Required authorization\n* Cluster privileges: `monitor`,`manage`", "operationId": "cluster-state", "parameters": [ { @@ -5640,7 +5640,7 @@ "cluster" ], "summary": "Get the cluster state", - "description": "Get comprehensive information about the state of the cluster.\n\nThe cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster.\n\nThe elected master node ensures that every node in the cluster has a copy of the same cluster state.\nThis API lets you retrieve a representation of this internal state for debugging or diagnostic purposes.\nYou may need to consult the Elasticsearch source code to determine the precise meaning of the response.\n\nBy default the API will route requests to the elected master node since this node is the authoritative source of cluster states.\nYou can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter.\n\nElasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data.\nIf you use this API repeatedly, your cluster may become unstable.\n\nWARNING: The response is a representation of an internal data structure.\nIts format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version.\nDo not query this API using external monitoring tools.\nInstead, obtain the information you require using other more stable cluster APIs.", + "description": "Get comprehensive information about the state of the cluster.\n\nThe cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster.\n\nThe elected master node ensures that every node in the cluster has a copy of the same cluster state.\nThis API lets you retrieve a representation of this internal state for debugging or diagnostic purposes.\nYou may need to consult the Elasticsearch source code to determine the precise meaning of the response.\n\nBy default the API will route requests to the elected master node since this node is the authoritative source of cluster states.\nYou can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter.\n\nElasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data.\nIf you use this API repeatedly, your cluster may become unstable.\n\nWARNING: The response is a representation of an internal data structure.\nIts format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version.\nDo not query this API using external monitoring tools.\nInstead, obtain the information you require using other more stable cluster APIs.\n ##Required authorization\n* Cluster privileges: `monitor`,`manage`", "operationId": "cluster-state-1", "parameters": [ { @@ -5685,7 +5685,7 @@ "cluster" ], "summary": "Get the cluster state", - "description": "Get comprehensive information about the state of the cluster.\n\nThe cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster.\n\nThe elected master node ensures that every node in the cluster has a copy of the same cluster state.\nThis API lets you retrieve a representation of this internal state for debugging or diagnostic purposes.\nYou may need to consult the Elasticsearch source code to determine the precise meaning of the response.\n\nBy default the API will route requests to the elected master node since this node is the authoritative source of cluster states.\nYou can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter.\n\nElasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data.\nIf you use this API repeatedly, your cluster may become unstable.\n\nWARNING: The response is a representation of an internal data structure.\nIts format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version.\nDo not query this API using external monitoring tools.\nInstead, obtain the information you require using other more stable cluster APIs.", + "description": "Get comprehensive information about the state of the cluster.\n\nThe cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster.\n\nThe elected master node ensures that every node in the cluster has a copy of the same cluster state.\nThis API lets you retrieve a representation of this internal state for debugging or diagnostic purposes.\nYou may need to consult the Elasticsearch source code to determine the precise meaning of the response.\n\nBy default the API will route requests to the elected master node since this node is the authoritative source of cluster states.\nYou can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter.\n\nElasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data.\nIf you use this API repeatedly, your cluster may become unstable.\n\nWARNING: The response is a representation of an internal data structure.\nIts format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version.\nDo not query this API using external monitoring tools.\nInstead, obtain the information you require using other more stable cluster APIs.\n ##Required authorization\n* Cluster privileges: `monitor`,`manage`", "operationId": "cluster-state-2", "parameters": [ { @@ -5733,7 +5733,7 @@ "cluster" ], "summary": "Get cluster statistics", - "description": "Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins).", + "description": "Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins).\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "cluster-stats", "parameters": [ { @@ -5757,7 +5757,7 @@ "cluster" ], "summary": "Get cluster statistics", - "description": "Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins).", + "description": "Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins).\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "cluster-stats-1", "parameters": [ { @@ -7736,7 +7736,7 @@ "search" ], "summary": "Count search results", - "description": "Get the number of documents matching a query.\n\nThe query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body.\nThe query is optional. When no query is provided, the API uses `match_all` to count all the documents.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.", + "description": "Get the number of documents matching a query.\n\nThe query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body.\nThe query is optional. When no query is provided, the API uses `match_all` to count all the documents.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.\n ##Required authorization\n* Index privileges: `read`", "operationId": "count-1", "parameters": [ { @@ -7803,7 +7803,7 @@ "search" ], "summary": "Count search results", - "description": "Get the number of documents matching a query.\n\nThe query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body.\nThe query is optional. When no query is provided, the API uses `match_all` to count all the documents.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.", + "description": "Get the number of documents matching a query.\n\nThe query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body.\nThe query is optional. When no query is provided, the API uses `match_all` to count all the documents.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.\n ##Required authorization\n* Index privileges: `read`", "operationId": "count", "parameters": [ { @@ -7872,7 +7872,7 @@ "search" ], "summary": "Count search results", - "description": "Get the number of documents matching a query.\n\nThe query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body.\nThe query is optional. When no query is provided, the API uses `match_all` to count all the documents.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.", + "description": "Get the number of documents matching a query.\n\nThe query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body.\nThe query is optional. When no query is provided, the API uses `match_all` to count all the documents.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.\n ##Required authorization\n* Index privileges: `read`", "operationId": "count-3", "parameters": [ { @@ -7942,7 +7942,7 @@ "search" ], "summary": "Count search results", - "description": "Get the number of documents matching a query.\n\nThe query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body.\nThe query is optional. When no query is provided, the API uses `match_all` to count all the documents.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.", + "description": "Get the number of documents matching a query.\n\nThe query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body.\nThe query is optional. When no query is provided, the API uses `match_all` to count all the documents.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.\n ##Required authorization\n* Index privileges: `read`", "operationId": "count-2", "parameters": [ { @@ -8014,7 +8014,7 @@ "document" ], "summary": "Create a new document in the index", - "description": "You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs\nUsing `_create` guarantees that the document is indexed only if it does not already exist.\nIt returns a 409 response when a document with a same ID already exists in the index.\nTo update an existing document, you must use the `//_doc/` API.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.", + "description": "You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs\nUsing `_create` guarantees that the document is indexed only if it does not already exist.\nIt returns a 409 response when a document with a same ID already exists in the index.\nTo update an existing document, you must use the `//_doc/` API.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n ##Required authorization\n* Index privileges: `create`", "externalDocs": { "url": "https://www.elastic.co/docs/manage-data/data-store/data-streams" }, @@ -8087,7 +8087,7 @@ "document" ], "summary": "Create a new document in the index", - "description": "You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs\nUsing `_create` guarantees that the document is indexed only if it does not already exist.\nIt returns a 409 response when a document with a same ID already exists in the index.\nTo update an existing document, you must use the `//_doc/` API.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.", + "description": "You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs\nUsing `_create` guarantees that the document is indexed only if it does not already exist.\nIt returns a 409 response when a document with a same ID already exists in the index.\nTo update an existing document, you must use the `//_doc/` API.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n ##Required authorization\n* Index privileges: `create`", "externalDocs": { "url": "https://www.elastic.co/docs/manage-data/data-store/data-streams" }, @@ -8162,7 +8162,7 @@ "indices" ], "summary": "Import a dangling index", - "description": "If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling.\nFor example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline.", + "description": "If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling.\nFor example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "dangling-indices-import-dangling-index", "parameters": [ { @@ -8239,7 +8239,7 @@ "indices" ], "summary": "Delete a dangling index", - "description": "If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling.\nFor example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline.", + "description": "If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling.\nFor example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "dangling-indices-delete-dangling-index", "parameters": [ { @@ -8306,7 +8306,7 @@ "indices" ], "summary": "Get the dangling indices", - "description": "If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling.\nFor example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline.\n\nUse this API to list dangling indices, which you can then import or delete.", + "description": "If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling.\nFor example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline.\n\nUse this API to list dangling indices, which you can then import or delete.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "dangling-indices-list-dangling-indices", "responses": { "200": { @@ -8345,7 +8345,7 @@ "document" ], "summary": "Get a document by its ID", - "description": "Get a document and its source or stored fields from an index.\n\nBy default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search).\nIn the case where stored fields are requested with the `stored_fields` parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields.\nTo turn off realtime behavior, set the `realtime` parameter to false.\n\n**Source filtering**\n\nBy default, the API returns the contents of the `_source` field unless you have used the `stored_fields` parameter or the `_source` field is turned off.\nYou can turn off `_source` retrieval by using the `_source` parameter:\n\n```\nGET my-index-000001/_doc/0?_source=false\n```\n\nIf you only need one or two fields from the `_source`, use the `_source_includes` or `_source_excludes` parameters to include or filter out particular fields.\nThis can be helpful with large documents where partial retrieval can save on network overhead\nBoth parameters take a comma separated list of fields or wildcard expressions.\nFor example:\n\n```\nGET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities\n```\n\nIf you only want to specify includes, you can use a shorter notation:\n\n```\nGET my-index-000001/_doc/0?_source=*.id\n```\n\n**Routing**\n\nIf routing is used during indexing, the routing value also needs to be specified to retrieve a document.\nFor example:\n\n```\nGET my-index-000001/_doc/2?routing=user1\n```\n\nThis request gets the document with ID 2, but it is routed based on the user.\nThe document is not fetched if the correct routing is not specified.\n\n**Distributed**\n\nThe GET operation is hashed into a specific shard ID.\nIt is then redirected to one of the replicas within that shard ID and returns the result.\nThe replicas are the primary shard and its replicas within that shard ID group.\nThis means that the more replicas you have, the better your GET scaling will be.\n\n**Versioning support**\n\nYou can use the `version` parameter to retrieve the document only if its current version is equal to the specified one.\n\nInternally, Elasticsearch has marked the old document as deleted and added an entirely new document.\nThe old version of the document doesn't disappear immediately, although you won't be able to access it.\nElasticsearch cleans up deleted documents in the background as you continue to index more data.", + "description": "Get a document and its source or stored fields from an index.\n\nBy default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search).\nIn the case where stored fields are requested with the `stored_fields` parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields.\nTo turn off realtime behavior, set the `realtime` parameter to false.\n\n**Source filtering**\n\nBy default, the API returns the contents of the `_source` field unless you have used the `stored_fields` parameter or the `_source` field is turned off.\nYou can turn off `_source` retrieval by using the `_source` parameter:\n\n```\nGET my-index-000001/_doc/0?_source=false\n```\n\nIf you only need one or two fields from the `_source`, use the `_source_includes` or `_source_excludes` parameters to include or filter out particular fields.\nThis can be helpful with large documents where partial retrieval can save on network overhead\nBoth parameters take a comma separated list of fields or wildcard expressions.\nFor example:\n\n```\nGET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities\n```\n\nIf you only want to specify includes, you can use a shorter notation:\n\n```\nGET my-index-000001/_doc/0?_source=*.id\n```\n\n**Routing**\n\nIf routing is used during indexing, the routing value also needs to be specified to retrieve a document.\nFor example:\n\n```\nGET my-index-000001/_doc/2?routing=user1\n```\n\nThis request gets the document with ID 2, but it is routed based on the user.\nThe document is not fetched if the correct routing is not specified.\n\n**Distributed**\n\nThe GET operation is hashed into a specific shard ID.\nIt is then redirected to one of the replicas within that shard ID and returns the result.\nThe replicas are the primary shard and its replicas within that shard ID group.\nThis means that the more replicas you have, the better your GET scaling will be.\n\n**Versioning support**\n\nYou can use the `version` parameter to retrieve the document only if its current version is equal to the specified one.\n\nInternally, Elasticsearch has marked the old document as deleted and added an entirely new document.\nThe old version of the document doesn't disappear immediately, although you won't be able to access it.\nElasticsearch cleans up deleted documents in the background as you continue to index more data.\n ##Required authorization\n* Index privileges: `read`", "operationId": "get", "parameters": [ { @@ -8513,7 +8513,7 @@ "document" ], "summary": "Create or update a document in an index", - "description": "Add a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.", + "description": "Add a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.\n ##Required authorization\n* Index privileges: `index`", "externalDocs": { "url": "https://www.elastic.co/docs/manage-data/data-store/data-streams" }, @@ -8583,7 +8583,7 @@ "document" ], "summary": "Create or update a document in an index", - "description": "Add a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.", + "description": "Add a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.\n ##Required authorization\n* Index privileges: `index`", "externalDocs": { "url": "https://www.elastic.co/docs/manage-data/data-store/data-streams" }, @@ -8653,7 +8653,7 @@ "document" ], "summary": "Delete a document", - "description": "Remove a JSON document from the specified index.\n\nNOTE: You cannot send deletion requests directly to a data stream.\nTo delete a document in a data stream, you must target the backing index containing the document.\n\n**Optimistic concurrency control**\n\nDelete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Versioning**\n\nEach document indexed is versioned.\nWhen deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime.\nEvery write operation run on a document, deletes included, causes its version to be incremented.\nThe version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations.\nThe length of time for which a deleted document's version remains available is determined by the `index.gc_deletes` index setting.\n\n**Routing**\n\nIf routing is used during indexing, the routing value also needs to be specified to delete a document.\n\nIf the `_routing` mapping is set to `required` and no routing value is specified, the delete API throws a `RoutingMissingException` and rejects the request.\n\nFor example:\n\n```\nDELETE /my-index-000001/_doc/1?routing=shard-1\n```\n\nThis request deletes the document with ID 1, but it is routed based on the user.\nThe document is not deleted if the correct routing is not specified.\n\n**Distributed**\n\nThe delete operation gets hashed into a specific shard ID.\nIt then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group.", + "description": "Remove a JSON document from the specified index.\n\nNOTE: You cannot send deletion requests directly to a data stream.\nTo delete a document in a data stream, you must target the backing index containing the document.\n\n**Optimistic concurrency control**\n\nDelete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Versioning**\n\nEach document indexed is versioned.\nWhen deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime.\nEvery write operation run on a document, deletes included, causes its version to be incremented.\nThe version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations.\nThe length of time for which a deleted document's version remains available is determined by the `index.gc_deletes` index setting.\n\n**Routing**\n\nIf routing is used during indexing, the routing value also needs to be specified to delete a document.\n\nIf the `_routing` mapping is set to `required` and no routing value is specified, the delete API throws a `RoutingMissingException` and rejects the request.\n\nFor example:\n\n```\nDELETE /my-index-000001/_doc/1?routing=shard-1\n```\n\nThis request deletes the document with ID 1, but it is routed based on the user.\nThe document is not deleted if the correct routing is not specified.\n\n**Distributed**\n\nThe delete operation gets hashed into a specific shard ID.\nIt then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group.\n ##Required authorization\n* Index privileges: `delete`", "operationId": "delete", "parameters": [ { @@ -8933,7 +8933,7 @@ "document" ], "summary": "Delete documents", - "description": "Deletes documents that match the specified query.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:\n\n* `read`\n* `delete` or `write`\n\nYou can specify the query criteria in the request URI or the request body using the same syntax as the search API.\nWhen you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning.\nIf a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails.\n\nNOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number.\n\nWhile processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete.\nA bulk delete request is performed for each batch of matching documents.\nIf a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off.\nIf the maximum retry limit is reached, processing halts and all failed requests are returned in the response.\nAny delete requests that completed successfully still stick, they are not rolled back.\n\nYou can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`.\nNote that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query.\n\n**Throttling delete requests**\n\nTo control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number.\nThis pads each batch with a wait time to throttle the rate.\nSet `requests_per_second` to `-1` to disable throttling.\n\nThrottling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is `1000`, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nDelete by query supports sliced scroll to parallelize the delete process.\nThis can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nSetting `slices` to `auto` lets Elasticsearch choose the number of slices to use.\nThis setting will use one slice per shard, up to a certain limit.\nIf there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.\nAdding slices to the delete by query operation creates sub-requests which means it has some quirks:\n\n* You can see these requests in the tasks APIs. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with slices only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with `slices` will cancel each sub-request.\n* Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being deleted.\n* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.\n\nIf you're slicing manually or otherwise tuning automatic slicing, keep in mind that:\n\n* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many `slices` hurts performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead.\n* Delete performance scales linearly across available resources with the number of slices.\n\nWhether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Cancel a delete by query operation**\n\nAny delete by query can be canceled using the task cancel API. For example:\n\n```\nPOST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel\n```\n\nThe task ID can be found by using the get tasks API.\n\nCancellation should happen quickly but might take a few seconds.\nThe get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself.", + "description": "Deletes documents that match the specified query.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:\n\n* `read`\n* `delete` or `write`\n\nYou can specify the query criteria in the request URI or the request body using the same syntax as the search API.\nWhen you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning.\nIf a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails.\n\nNOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number.\n\nWhile processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete.\nA bulk delete request is performed for each batch of matching documents.\nIf a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off.\nIf the maximum retry limit is reached, processing halts and all failed requests are returned in the response.\nAny delete requests that completed successfully still stick, they are not rolled back.\n\nYou can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`.\nNote that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query.\n\n**Throttling delete requests**\n\nTo control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number.\nThis pads each batch with a wait time to throttle the rate.\nSet `requests_per_second` to `-1` to disable throttling.\n\nThrottling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is `1000`, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nDelete by query supports sliced scroll to parallelize the delete process.\nThis can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nSetting `slices` to `auto` lets Elasticsearch choose the number of slices to use.\nThis setting will use one slice per shard, up to a certain limit.\nIf there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.\nAdding slices to the delete by query operation creates sub-requests which means it has some quirks:\n\n* You can see these requests in the tasks APIs. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with slices only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with `slices` will cancel each sub-request.\n* Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being deleted.\n* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.\n\nIf you're slicing manually or otherwise tuning automatic slicing, keep in mind that:\n\n* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many `slices` hurts performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead.\n* Delete performance scales linearly across available resources with the number of slices.\n\nWhether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Cancel a delete by query operation**\n\nAny delete by query can be canceled using the task cancel API. For example:\n\n```\nPOST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel\n```\n\nThe task ID can be found by using the get tasks API.\n\nCancellation should happen quickly but might take a few seconds.\nThe get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself.\n ##Required authorization\n* Index privileges: `read`,`delete`", "operationId": "delete-by-query", "parameters": [ { @@ -9428,7 +9428,7 @@ "script" ], "summary": "Get a script or search template", - "description": "Retrieves a stored script or search template.", + "description": "Retrieves a stored script or search template.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "get-script", "parameters": [ { @@ -9487,7 +9487,7 @@ "script" ], "summary": "Create or update a script or search template", - "description": "Creates or updates a stored script or search template.", + "description": "Creates or updates a stored script or search template.\n ##Required authorization\n* Cluster privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/search-templates" }, @@ -9527,7 +9527,7 @@ "script" ], "summary": "Create or update a script or search template", - "description": "Creates or updates a stored script or search template.", + "description": "Creates or updates a stored script or search template.\n ##Required authorization\n* Cluster privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/search-templates" }, @@ -9567,7 +9567,7 @@ "script" ], "summary": "Delete a script or search template", - "description": "Deletes a stored script or search template.", + "description": "Deletes a stored script or search template.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "delete-script", "parameters": [ { @@ -10176,7 +10176,7 @@ "esql" ], "summary": "Run an async ES|QL query", - "description": "Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available.\n\nThe API accepts the same parameters and request body as the synchronous query API, along with additional async related properties.", + "description": "Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available.\n\nThe API accepts the same parameters and request body as the synchronous query API, along with additional async related properties.\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/explore-analyze/query-filter/languages/esql" }, @@ -10497,7 +10497,7 @@ "esql" ], "summary": "Get a specific running ES|QL query information", - "description": "Returns an object extended information about a running ES|QL query.", + "description": "Returns an object extended information about a running ES|QL query.\n ##Required authorization\n* Cluster privileges: `monitor_esql`", "operationId": "esql-get-query", "parameters": [ { @@ -10568,7 +10568,7 @@ "esql" ], "summary": "Get running ES|QL queries information", - "description": "Returns an object containing IDs and other information about the running ES|QL queries.", + "description": "Returns an object containing IDs and other information about the running ES|QL queries.\n ##Required authorization\n* Cluster privileges: `monitor_esql`", "operationId": "esql-list-queries", "responses": { "200": { @@ -10736,7 +10736,7 @@ "document" ], "summary": "Get a document's source", - "description": "Get the source of a document.\nFor example:\n\n```\nGET my-index-000001/_source/1\n```\n\nYou can use the source filtering parameters to control which parts of the `_source` are returned:\n\n```\nGET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities\n```", + "description": "Get the source of a document.\nFor example:\n\n```\nGET my-index-000001/_source/1\n```\n\nYou can use the source filtering parameters to control which parts of the `_source` are returned:\n\n```\nGET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities\n```\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/mapping-reference/mapping-source-field" }, @@ -10884,7 +10884,7 @@ "document" ], "summary": "Check for a document source", - "description": "Check whether a document source exists in an index.\nFor example:\n\n```\nHEAD my-index-000001/_source/1\n```\n\nA document's source is not available if it is disabled in the mapping.", + "description": "Check whether a document source exists in an index.\nFor example:\n\n```\nHEAD my-index-000001/_source/1\n```\n\nA document's source is not available if it is disabled in the mapping.\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/mapping-reference/mapping-source-field" }, @@ -11020,7 +11020,7 @@ "search" ], "summary": "Explain a document match result", - "description": "Get information about why a specific document matches, or doesn't match, a query.\nIt computes a score explanation for a query and a specific document.", + "description": "Get information about why a specific document matches, or doesn't match, a query.\nIt computes a score explanation for a query and a specific document.\n ##Required authorization\n* Index privileges: `read`", "operationId": "explain", "parameters": [ { @@ -11087,7 +11087,7 @@ "search" ], "summary": "Explain a document match result", - "description": "Get information about why a specific document matches, or doesn't match, a query.\nIt computes a score explanation for a query and a specific document.", + "description": "Get information about why a specific document matches, or doesn't match, a query.\nIt computes a score explanation for a query and a specific document.\n ##Required authorization\n* Index privileges: `read`", "operationId": "explain-1", "parameters": [ { @@ -11263,7 +11263,7 @@ "search" ], "summary": "Get the field capabilities", - "description": "Get information about the capabilities of fields among multiple indices.\n\nFor data streams, the API returns field capabilities among the stream’s backing indices.\nIt returns runtime fields like any other field.\nFor example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family.", + "description": "Get information about the capabilities of fields among multiple indices.\n\nFor data streams, the API returns field capabilities among the stream’s backing indices.\nIt returns runtime fields like any other field.\nFor example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family.\n ##Required authorization\n* Index privileges: `view_index_metadata`,`read`", "operationId": "field-caps", "parameters": [ { @@ -11312,7 +11312,7 @@ "search" ], "summary": "Get the field capabilities", - "description": "Get information about the capabilities of fields among multiple indices.\n\nFor data streams, the API returns field capabilities among the stream’s backing indices.\nIt returns runtime fields like any other field.\nFor example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family.", + "description": "Get information about the capabilities of fields among multiple indices.\n\nFor data streams, the API returns field capabilities among the stream’s backing indices.\nIt returns runtime fields like any other field.\nFor example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family.\n ##Required authorization\n* Index privileges: `view_index_metadata`,`read`", "operationId": "field-caps-1", "parameters": [ { @@ -11363,7 +11363,7 @@ "search" ], "summary": "Get the field capabilities", - "description": "Get information about the capabilities of fields among multiple indices.\n\nFor data streams, the API returns field capabilities among the stream’s backing indices.\nIt returns runtime fields like any other field.\nFor example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family.", + "description": "Get information about the capabilities of fields among multiple indices.\n\nFor data streams, the API returns field capabilities among the stream’s backing indices.\nIt returns runtime fields like any other field.\nFor example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family.\n ##Required authorization\n* Index privileges: `view_index_metadata`,`read`", "operationId": "field-caps-2", "parameters": [ { @@ -11415,7 +11415,7 @@ "search" ], "summary": "Get the field capabilities", - "description": "Get information about the capabilities of fields among multiple indices.\n\nFor data streams, the API returns field capabilities among the stream’s backing indices.\nIt returns runtime fields like any other field.\nFor example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family.", + "description": "Get information about the capabilities of fields among multiple indices.\n\nFor data streams, the API returns field capabilities among the stream’s backing indices.\nIt returns runtime fields like any other field.\nFor example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family.\n ##Required authorization\n* Index privileges: `view_index_metadata`,`read`", "operationId": "field-caps-3", "parameters": [ { @@ -11570,7 +11570,7 @@ "fleet" ], "summary": "Run multiple Fleet searches", - "description": "Run several Fleet searches with a single API request.\nThe API follows the same structure as the multi search API.\nHowever, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter.", + "description": "Run several Fleet searches with a single API request.\nThe API follows the same structure as the multi search API.\nHowever, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter.\n ##Required authorization\n* Index privileges: `read`", "operationId": "fleet-msearch", "parameters": [ { @@ -11628,7 +11628,7 @@ "fleet" ], "summary": "Run multiple Fleet searches", - "description": "Run several Fleet searches with a single API request.\nThe API follows the same structure as the multi search API.\nHowever, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter.", + "description": "Run several Fleet searches with a single API request.\nThe API follows the same structure as the multi search API.\nHowever, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter.\n ##Required authorization\n* Index privileges: `read`", "operationId": "fleet-msearch-1", "parameters": [ { @@ -11688,7 +11688,7 @@ "fleet" ], "summary": "Run multiple Fleet searches", - "description": "Run several Fleet searches with a single API request.\nThe API follows the same structure as the multi search API.\nHowever, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter.", + "description": "Run several Fleet searches with a single API request.\nThe API follows the same structure as the multi search API.\nHowever, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter.\n ##Required authorization\n* Index privileges: `read`", "operationId": "fleet-msearch-2", "parameters": [ { @@ -11749,7 +11749,7 @@ "fleet" ], "summary": "Run multiple Fleet searches", - "description": "Run several Fleet searches with a single API request.\nThe API follows the same structure as the multi search API.\nHowever, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter.", + "description": "Run several Fleet searches with a single API request.\nThe API follows the same structure as the multi search API.\nHowever, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter.\n ##Required authorization\n* Index privileges: `read`", "operationId": "fleet-msearch-3", "parameters": [ { @@ -11812,7 +11812,7 @@ "fleet" ], "summary": "Run a Fleet search", - "description": "The purpose of the Fleet search API is to provide an API where the search will be run only\nafter the provided checkpoint has been processed and is visible for searches inside of Elasticsearch.", + "description": "The purpose of the Fleet search API is to provide an API where the search will be run only\nafter the provided checkpoint has been processed and is visible for searches inside of Elasticsearch.\n ##Required authorization\n* Index privileges: `read`", "operationId": "fleet-search", "parameters": [ { @@ -11963,7 +11963,7 @@ "fleet" ], "summary": "Run a Fleet search", - "description": "The purpose of the Fleet search API is to provide an API where the search will be run only\nafter the provided checkpoint has been processed and is visible for searches inside of Elasticsearch.", + "description": "The purpose of the Fleet search API is to provide an API where the search will be run only\nafter the provided checkpoint has been processed and is visible for searches inside of Elasticsearch.\n ##Required authorization\n* Index privileges: `read`", "operationId": "fleet-search-1", "parameters": [ { @@ -12116,7 +12116,7 @@ "script" ], "summary": "Get script contexts", - "description": "Get a list of supported script contexts and their methods.", + "description": "Get a list of supported script contexts and their methods.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "get-script-context", "responses": { "200": { @@ -12150,7 +12150,7 @@ "script" ], "summary": "Get script languages", - "description": "Get a list of available script types, languages, and contexts.", + "description": "Get a list of available script types, languages, and contexts.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "get-script-languages", "responses": { "200": { @@ -12324,6 +12324,7 @@ "ilm" ], "summary": "Get lifecycle policies", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_ilm`,`read_ilm`", "operationId": "ilm-get-lifecycle", "parameters": [ { @@ -12348,7 +12349,7 @@ "ilm" ], "summary": "Create or update a lifecycle policy", - "description": "If the specified policy exists, it is replaced and the policy version is incremented.\n\nNOTE: Only the latest version of the policy is stored, you cannot revert to previous versions.", + "description": "If the specified policy exists, it is replaced and the policy version is incremented.\n\nNOTE: Only the latest version of the policy is stored, you cannot revert to previous versions.\n ##Required authorization\n* Index privileges: `manage`* Cluster privileges: `manage_ilm`", "externalDocs": { "url": "https://www.elastic.co/docs/manage-data/lifecycle/index-lifecycle-management/index-lifecycle" }, @@ -12437,7 +12438,7 @@ "ilm" ], "summary": "Delete a lifecycle policy", - "description": "You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error.", + "description": "You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error.\n ##Required authorization\n* Cluster privileges: `manage_ilm`", "operationId": "ilm-delete-lifecycle", "parameters": [ { @@ -12499,7 +12500,7 @@ "ilm" ], "summary": "Explain the lifecycle state", - "description": "Get the current lifecycle status for one or more indices.\nFor data streams, the API retrieves the current lifecycle status for the stream's backing indices.\n\nThe response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures.", + "description": "Get the current lifecycle status for one or more indices.\nFor data streams, the API retrieves the current lifecycle status for the stream's backing indices.\n\nThe response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures.\n ##Required authorization\n* Index privileges: `view_index_metadata`,`manage_ilm`", "operationId": "ilm-explain-lifecycle", "parameters": [ { @@ -12582,6 +12583,7 @@ "ilm" ], "summary": "Get lifecycle policies", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_ilm`,`read_ilm`", "operationId": "ilm-get-lifecycle-1", "parameters": [ { @@ -12605,7 +12607,7 @@ "ilm" ], "summary": "Get the ILM status", - "description": "Get the current index lifecycle management status.", + "description": "Get the current index lifecycle management status.\n ##Required authorization\n* Cluster privileges: `read_ilm`", "operationId": "ilm-get-status", "responses": { "200": { @@ -12774,7 +12776,7 @@ "ilm" ], "summary": "Move to a lifecycle step", - "description": "Manually move an index into a specific step in the lifecycle policy and run that step.\n\nWARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API.\n\nYou must specify both the current step and the step to be executed in the body of the request.\nThe request will fail if the current step does not match the step currently running for the index\nThis is to prevent the index from being moved from an unexpected step into the next step.\n\nWhen specifying the target (`next_step`) to which the index will be moved, either the name or both the action and name fields are optional.\nIf only the phase is specified, the index will move to the first step of the first action in the target phase.\nIf the phase and action are specified, the index will move to the first step of the specified action in the specified phase.\nOnly actions specified in the ILM policy are considered valid.\nAn index cannot move to a step that is not part of its policy.", + "description": "Manually move an index into a specific step in the lifecycle policy and run that step.\n\nWARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API.\n\nYou must specify both the current step and the step to be executed in the body of the request.\nThe request will fail if the current step does not match the step currently running for the index\nThis is to prevent the index from being moved from an unexpected step into the next step.\n\nWhen specifying the target (`next_step`) to which the index will be moved, either the name or both the action and name fields are optional.\nIf only the phase is specified, the index will move to the first step of the first action in the target phase.\nIf the phase and action are specified, the index will move to the first step of the specified action in the specified phase.\nOnly actions specified in the ILM policy are considered valid.\nAn index cannot move to a step that is not part of its policy.\n ##Required authorization\n* Index privileges: `manage_ilm`", "operationId": "ilm-move-to-step", "parameters": [ { @@ -12855,7 +12857,7 @@ "ilm" ], "summary": "Remove policies from an index", - "description": "Remove the assigned lifecycle policies from an index or a data stream's backing indices.\nIt also stops managing the indices.", + "description": "Remove the assigned lifecycle policies from an index or a data stream's backing indices.\nIt also stops managing the indices.\n ##Required authorization\n* Index privileges: `manage_ilm`", "operationId": "ilm-remove-policy", "parameters": [ { @@ -12912,7 +12914,7 @@ "ilm" ], "summary": "Retry a policy", - "description": "Retry running the lifecycle policy for an index that is in the ERROR step.\nThe API sets the policy back to the step where the error occurred and runs the step.\nUse the explain lifecycle state API to determine whether an index is in the ERROR step.", + "description": "Retry running the lifecycle policy for an index that is in the ERROR step.\nThe API sets the policy back to the step where the error occurred and runs the step.\nUse the explain lifecycle state API to determine whether an index is in the ERROR step.\n ##Required authorization\n* Index privileges: `manage_ilm`", "operationId": "ilm-retry", "parameters": [ { @@ -12948,7 +12950,7 @@ "ilm" ], "summary": "Start the ILM plugin", - "description": "Start the index lifecycle management plugin if it is currently stopped.\nILM is started automatically when the cluster is formed.\nRestarting ILM is necessary only when it has been stopped using the stop ILM API.", + "description": "Start the index lifecycle management plugin if it is currently stopped.\nILM is started automatically when the cluster is formed.\nRestarting ILM is necessary only when it has been stopped using the stop ILM API.\n ##Required authorization\n* Cluster privileges: `manage_ilm`", "operationId": "ilm-start", "parameters": [ { @@ -12999,7 +13001,7 @@ "ilm" ], "summary": "Stop the ILM plugin", - "description": "Halt all lifecycle management operations and stop the index lifecycle management plugin.\nThis is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices.\n\nThe API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped.\nUse the get ILM status API to check whether ILM is running.", + "description": "Halt all lifecycle management operations and stop the index lifecycle management plugin.\nThis is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices.\n\nThe API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped.\nUse the get ILM status API to check whether ILM is running.\n ##Required authorization\n* Cluster privileges: `manage_ilm`", "operationId": "ilm-stop", "parameters": [ { @@ -13050,7 +13052,7 @@ "document" ], "summary": "Create or update a document in an index", - "description": "Add a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.", + "description": "Add a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.\n ##Required authorization\n* Index privileges: `index`", "externalDocs": { "url": "https://www.elastic.co/docs/manage-data/data-store/data-streams" }, @@ -13247,7 +13249,7 @@ "indices" ], "summary": "Get tokens from text analysis", - "description": "The analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.", + "description": "The analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.\n ##Required authorization\n* Index privileges: `index`", "externalDocs": { "url": "https://www.elastic.co/docs/manage-data/data-store/text-analysis" }, @@ -13278,7 +13280,7 @@ "indices" ], "summary": "Get tokens from text analysis", - "description": "The analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.", + "description": "The analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.\n ##Required authorization\n* Index privileges: `index`", "externalDocs": { "url": "https://www.elastic.co/docs/manage-data/data-store/text-analysis" }, @@ -13311,7 +13313,7 @@ "indices" ], "summary": "Get tokens from text analysis", - "description": "The analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.", + "description": "The analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.\n ##Required authorization\n* Index privileges: `index`", "externalDocs": { "url": "https://www.elastic.co/docs/manage-data/data-store/text-analysis" }, @@ -13345,7 +13347,7 @@ "indices" ], "summary": "Get tokens from text analysis", - "description": "The analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.", + "description": "The analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.\n ##Required authorization\n* Index privileges: `index`", "externalDocs": { "url": "https://www.elastic.co/docs/manage-data/data-store/text-analysis" }, @@ -13417,7 +13419,7 @@ "indices" ], "summary": "Clear the cache", - "description": "Clear the cache of one or more indices.\nFor data streams, the API clears the caches of the stream's backing indices.\n\nBy default, the clear cache API clears all caches.\nTo clear only specific caches, use the `fielddata`, `query`, or `request` parameters.\nTo clear the cache only of specific fields, use the `fields` parameter.", + "description": "Clear the cache of one or more indices.\nFor data streams, the API clears the caches of the stream's backing indices.\n\nBy default, the clear cache API clears all caches.\nTo clear only specific caches, use the `fielddata`, `query`, or `request` parameters.\nTo clear the cache only of specific fields, use the `fields` parameter.\n ##Required authorization\n* Index privileges: `manage`", "operationId": "indices-clear-cache", "parameters": [ { @@ -13459,7 +13461,7 @@ "indices" ], "summary": "Clear the cache", - "description": "Clear the cache of one or more indices.\nFor data streams, the API clears the caches of the stream's backing indices.\n\nBy default, the clear cache API clears all caches.\nTo clear only specific caches, use the `fielddata`, `query`, or `request` parameters.\nTo clear the cache only of specific fields, use the `fields` parameter.", + "description": "Clear the cache of one or more indices.\nFor data streams, the API clears the caches of the stream's backing indices.\n\nBy default, the clear cache API clears all caches.\nTo clear only specific caches, use the `fielddata`, `query`, or `request` parameters.\nTo clear the cache only of specific fields, use the `fields` parameter.\n ##Required authorization\n* Index privileges: `manage`", "operationId": "indices-clear-cache-1", "parameters": [ { @@ -13504,7 +13506,7 @@ "indices" ], "summary": "Clone an index", - "description": "Clone an existing index into a new index.\nEach original primary shard is cloned into a new primary shard in the new index.\n\nIMPORTANT: Elasticsearch does not apply index templates to the resulting index.\nThe API also does not copy index metadata from the original index.\nIndex metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information.\nFor example, if you clone a CCR follower index, the resulting clone will not be a follower index.\n\nThe clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`.\nTo set the number of replicas in the resulting index, configure these settings in the clone request.\n\nCloning works as follows:\n\n* First, it creates a new target index with the same definition as the source index.\n* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Finally, it recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be cloned if they meet the following requirements:\n\n* The index must be marked as read-only and have a cluster health status of green.\n* The target index must not exist.\n* The source index must have the same number of primary shards as the target index.\n* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index.\n\nThe current write index on a data stream cannot be cloned.\nIn order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned.\n\nNOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index.\n\n**Monitor the cloning process**\n\nThe cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`.\n\nThe `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated.\nAt this point, all shards are in the state unassigned.\nIf, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node.\n\nOnce the primary shard is allocated, it moves to state initializing, and the clone process begins.\nWhen the clone operation completes, the shard will become active.\nAt that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node.\n\n**Wait for active shards**\n\nBecause the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well.", + "description": "Clone an existing index into a new index.\nEach original primary shard is cloned into a new primary shard in the new index.\n\nIMPORTANT: Elasticsearch does not apply index templates to the resulting index.\nThe API also does not copy index metadata from the original index.\nIndex metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information.\nFor example, if you clone a CCR follower index, the resulting clone will not be a follower index.\n\nThe clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`.\nTo set the number of replicas in the resulting index, configure these settings in the clone request.\n\nCloning works as follows:\n\n* First, it creates a new target index with the same definition as the source index.\n* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Finally, it recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be cloned if they meet the following requirements:\n\n* The index must be marked as read-only and have a cluster health status of green.\n* The target index must not exist.\n* The source index must have the same number of primary shards as the target index.\n* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index.\n\nThe current write index on a data stream cannot be cloned.\nIn order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned.\n\nNOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index.\n\n**Monitor the cloning process**\n\nThe cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`.\n\nThe `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated.\nAt this point, all shards are in the state unassigned.\nIf, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node.\n\nOnce the primary shard is allocated, it moves to state initializing, and the clone process begins.\nWhen the clone operation completes, the shard will become active.\nAt that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node.\n\n**Wait for active shards**\n\nBecause the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well.\n ##Required authorization\n* Index privileges: `manage`", "operationId": "indices-clone", "parameters": [ { @@ -13544,7 +13546,7 @@ "indices" ], "summary": "Clone an index", - "description": "Clone an existing index into a new index.\nEach original primary shard is cloned into a new primary shard in the new index.\n\nIMPORTANT: Elasticsearch does not apply index templates to the resulting index.\nThe API also does not copy index metadata from the original index.\nIndex metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information.\nFor example, if you clone a CCR follower index, the resulting clone will not be a follower index.\n\nThe clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`.\nTo set the number of replicas in the resulting index, configure these settings in the clone request.\n\nCloning works as follows:\n\n* First, it creates a new target index with the same definition as the source index.\n* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Finally, it recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be cloned if they meet the following requirements:\n\n* The index must be marked as read-only and have a cluster health status of green.\n* The target index must not exist.\n* The source index must have the same number of primary shards as the target index.\n* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index.\n\nThe current write index on a data stream cannot be cloned.\nIn order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned.\n\nNOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index.\n\n**Monitor the cloning process**\n\nThe cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`.\n\nThe `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated.\nAt this point, all shards are in the state unassigned.\nIf, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node.\n\nOnce the primary shard is allocated, it moves to state initializing, and the clone process begins.\nWhen the clone operation completes, the shard will become active.\nAt that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node.\n\n**Wait for active shards**\n\nBecause the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well.", + "description": "Clone an existing index into a new index.\nEach original primary shard is cloned into a new primary shard in the new index.\n\nIMPORTANT: Elasticsearch does not apply index templates to the resulting index.\nThe API also does not copy index metadata from the original index.\nIndex metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information.\nFor example, if you clone a CCR follower index, the resulting clone will not be a follower index.\n\nThe clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`.\nTo set the number of replicas in the resulting index, configure these settings in the clone request.\n\nCloning works as follows:\n\n* First, it creates a new target index with the same definition as the source index.\n* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Finally, it recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be cloned if they meet the following requirements:\n\n* The index must be marked as read-only and have a cluster health status of green.\n* The target index must not exist.\n* The source index must have the same number of primary shards as the target index.\n* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index.\n\nThe current write index on a data stream cannot be cloned.\nIn order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned.\n\nNOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index.\n\n**Monitor the cloning process**\n\nThe cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`.\n\nThe `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated.\nAt this point, all shards are in the state unassigned.\nIf, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node.\n\nOnce the primary shard is allocated, it moves to state initializing, and the clone process begins.\nWhen the clone operation completes, the shard will become active.\nAt that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node.\n\n**Wait for active shards**\n\nBecause the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well.\n ##Required authorization\n* Index privileges: `manage`", "operationId": "indices-clone-1", "parameters": [ { @@ -13586,7 +13588,7 @@ "indices" ], "summary": "Close an index", - "description": "A closed index is blocked for read or write operations and does not allow all operations that opened indices allow.\nIt is not possible to index documents or to search for documents in a closed index.\nClosed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster.\n\nWhen opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index.\nThe shards will then go through the normal recovery process.\nThe data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times.\n\nYou can open and close multiple indices.\nAn error is thrown if the request explicitly refers to a missing index.\nThis behaviour can be turned off using the `ignore_unavailable=true` parameter.\n\nBy default, you must explicitly name the indices you are opening or closing.\nTo open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API.\n\nClosed indices consume a significant amount of disk-space which can cause problems in managed environments.\nClosing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`.", + "description": "A closed index is blocked for read or write operations and does not allow all operations that opened indices allow.\nIt is not possible to index documents or to search for documents in a closed index.\nClosed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster.\n\nWhen opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index.\nThe shards will then go through the normal recovery process.\nThe data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times.\n\nYou can open and close multiple indices.\nAn error is thrown if the request explicitly refers to a missing index.\nThis behaviour can be turned off using the `ignore_unavailable=true` parameter.\n\nBy default, you must explicitly name the indices you are opening or closing.\nTo open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API.\n\nClosed indices consume a significant amount of disk-space which can cause problems in managed environments.\nClosing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`.\n ##Required authorization\n* Index privileges: `manage`", "operationId": "indices-close", "parameters": [ { @@ -13707,7 +13709,7 @@ "indices" ], "summary": "Get index information", - "description": "Get information about one or more indices. For data streams, the API returns information about the\nstream’s backing indices.", + "description": "Get information about one or more indices. For data streams, the API returns information about the\nstream’s backing indices.\n ##Required authorization\n* Index privileges: `view_index_metadata`,`manage`", "operationId": "indices-get", "parameters": [ { @@ -13824,7 +13826,7 @@ "indices" ], "summary": "Create an index", - "description": "You can use the create index API to add a new index to an Elasticsearch cluster.\nWhen creating an index, you can specify the following:\n\n* Settings for the index.\n* Mappings for fields in the index.\n* Index aliases\n\n**Wait for active shards**\n\nBy default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out.\nThe index creation response will indicate what happened.\nFor example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out.\nNote that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful.\nThese values simply indicate whether the operation completed before the timeout.\nIf `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon.\nIf `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`).\n\nYou can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`.\nNote that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations.", + "description": "You can use the create index API to add a new index to an Elasticsearch cluster.\nWhen creating an index, you can specify the following:\n\n* Settings for the index.\n* Mappings for fields in the index.\n* Index aliases\n\n**Wait for active shards**\n\nBy default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out.\nThe index creation response will indicate what happened.\nFor example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out.\nNote that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful.\nThese values simply indicate whether the operation completed before the timeout.\nIf `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon.\nIf `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`).\n\nYou can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`.\nNote that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations.\n ##Required authorization\n* Index privileges: `create_index`,`manage`", "operationId": "indices-create", "parameters": [ { @@ -13951,7 +13953,7 @@ "indices" ], "summary": "Delete indices", - "description": "Deleting an index deletes its documents, shards, and metadata.\nIt does not delete related Kibana components, such as data views, visualizations, or dashboards.\n\nYou cannot delete the current write index of a data stream.\nTo delete the index, you must roll over the data stream so a new write index is created.\nYou can then use the delete index API to delete the previous write index.", + "description": "Deleting an index deletes its documents, shards, and metadata.\nIt does not delete related Kibana components, such as data views, visualizations, or dashboards.\n\nYou cannot delete the current write index of a data stream.\nTo delete the index, you must roll over the data stream so a new write index is created.\nYou can then use the delete index API to delete the previous write index.\n ##Required authorization\n* Index privileges: `delete_index`", "operationId": "indices-delete", "parameters": [ { @@ -14127,7 +14129,7 @@ "data stream" ], "summary": "Get data streams", - "description": "Get information about one or more data streams.", + "description": "Get information about one or more data streams.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-data-stream-1", "parameters": [ { @@ -14158,7 +14160,7 @@ "data stream" ], "summary": "Create a data stream", - "description": "You must have a matching index template with data stream enabled.", + "description": "You must have a matching index template with data stream enabled.\n ##Required authorization\n* Index privileges: `create_index`", "operationId": "indices-create-data-stream", "parameters": [ { @@ -14212,7 +14214,7 @@ "data stream" ], "summary": "Delete data streams", - "description": "Deletes one or more data streams and their backing indices.", + "description": "Deletes one or more data streams and their backing indices.\n ##Required authorization\n* Index privileges: `delete_index`", "operationId": "indices-delete-data-stream", "parameters": [ { @@ -14320,7 +14322,7 @@ "data stream" ], "summary": "Get data stream stats", - "description": "Get statistics for one or more data streams.", + "description": "Get statistics for one or more data streams.\n ##Required authorization\n* Index privileges: `monitor`", "operationId": "indices-data-streams-stats", "parameters": [ { @@ -14341,7 +14343,7 @@ "data stream" ], "summary": "Get data stream stats", - "description": "Get statistics for one or more data streams.", + "description": "Get statistics for one or more data streams.\n ##Required authorization\n* Index privileges: `monitor`", "operationId": "indices-data-streams-stats-1", "parameters": [ { @@ -14365,7 +14367,7 @@ "indices" ], "summary": "Get aliases", - "description": "Retrieves information for one or more data stream or index aliases.", + "description": "Retrieves information for one or more data stream or index aliases.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-alias-2", "parameters": [ { @@ -14473,7 +14475,7 @@ "indices" ], "summary": "Delete an alias", - "description": "Removes a data stream or index from an alias.", + "description": "Removes a data stream or index from an alias.\n ##Required authorization\n* Index privileges: `manage`", "operationId": "indices-delete-alias", "parameters": [ { @@ -14611,7 +14613,7 @@ "indices" ], "summary": "Delete an alias", - "description": "Removes a data stream or index from an alias.", + "description": "Removes a data stream or index from an alias.\n ##Required authorization\n* Index privileges: `manage`", "operationId": "indices-delete-alias-1", "parameters": [ { @@ -15123,7 +15125,7 @@ "indices" ], "summary": "Get index templates", - "description": "Get information about one or more index templates.", + "description": "Get information about one or more index templates.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "indices-get-index-template-1", "parameters": [ { @@ -15154,7 +15156,7 @@ "indices" ], "summary": "Create or update an index template", - "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.\n\nElasticsearch applies templates to new indices based on an wildcard pattern that matches the index name.\nIndex templates are applied during data stream or index creation.\nFor data streams, these settings and mappings are applied when the stream's backing indices are created.\nSettings and mappings specified in a create index API request override any settings or mappings specified in an index template.\nChanges to index templates do not affect existing indices, including the existing backing indices of a data stream.\n\nYou can use C-style `/* *\\/` block comments in index templates.\nYou can include comments anywhere in the request body, except before the opening curly bracket.\n\n**Multiple matching templates**\n\nIf multiple index templates match the name of a new index or data stream, the template with the highest priority is used.\n\nMultiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities.\n\n**Composing aliases, mappings, and settings**\n\nWhen multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates.\nAny mappings, settings, or aliases from the parent index template are merged in next.\nFinally, any configuration on the index request itself is merged.\nMapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration.\nIf a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one.\nThis recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`.\nIf an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end.\nIf an entry already exists with the same key, then it is overwritten by the new definition.", + "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.\n\nElasticsearch applies templates to new indices based on an wildcard pattern that matches the index name.\nIndex templates are applied during data stream or index creation.\nFor data streams, these settings and mappings are applied when the stream's backing indices are created.\nSettings and mappings specified in a create index API request override any settings or mappings specified in an index template.\nChanges to index templates do not affect existing indices, including the existing backing indices of a data stream.\n\nYou can use C-style `/* *\\/` block comments in index templates.\nYou can include comments anywhere in the request body, except before the opening curly bracket.\n\n**Multiple matching templates**\n\nIf multiple index templates match the name of a new index or data stream, the template with the highest priority is used.\n\nMultiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities.\n\n**Composing aliases, mappings, and settings**\n\nWhen multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates.\nAny mappings, settings, or aliases from the parent index template are merged in next.\nFinally, any configuration on the index request itself is merged.\nMapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration.\nIf a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one.\nThis recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`.\nIf an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end.\nIf an entry already exists with the same key, then it is overwritten by the new definition.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "indices-put-index-template", "parameters": [ { @@ -15191,7 +15193,7 @@ "indices" ], "summary": "Create or update an index template", - "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.\n\nElasticsearch applies templates to new indices based on an wildcard pattern that matches the index name.\nIndex templates are applied during data stream or index creation.\nFor data streams, these settings and mappings are applied when the stream's backing indices are created.\nSettings and mappings specified in a create index API request override any settings or mappings specified in an index template.\nChanges to index templates do not affect existing indices, including the existing backing indices of a data stream.\n\nYou can use C-style `/* *\\/` block comments in index templates.\nYou can include comments anywhere in the request body, except before the opening curly bracket.\n\n**Multiple matching templates**\n\nIf multiple index templates match the name of a new index or data stream, the template with the highest priority is used.\n\nMultiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities.\n\n**Composing aliases, mappings, and settings**\n\nWhen multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates.\nAny mappings, settings, or aliases from the parent index template are merged in next.\nFinally, any configuration on the index request itself is merged.\nMapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration.\nIf a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one.\nThis recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`.\nIf an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end.\nIf an entry already exists with the same key, then it is overwritten by the new definition.", + "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.\n\nElasticsearch applies templates to new indices based on an wildcard pattern that matches the index name.\nIndex templates are applied during data stream or index creation.\nFor data streams, these settings and mappings are applied when the stream's backing indices are created.\nSettings and mappings specified in a create index API request override any settings or mappings specified in an index template.\nChanges to index templates do not affect existing indices, including the existing backing indices of a data stream.\n\nYou can use C-style `/* *\\/` block comments in index templates.\nYou can include comments anywhere in the request body, except before the opening curly bracket.\n\n**Multiple matching templates**\n\nIf multiple index templates match the name of a new index or data stream, the template with the highest priority is used.\n\nMultiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities.\n\n**Composing aliases, mappings, and settings**\n\nWhen multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates.\nAny mappings, settings, or aliases from the parent index template are merged in next.\nFinally, any configuration on the index request itself is merged.\nMapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration.\nIf a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one.\nThis recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`.\nIf an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end.\nIf an entry already exists with the same key, then it is overwritten by the new definition.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "indices-put-index-template-1", "parameters": [ { @@ -15228,7 +15230,7 @@ "indices" ], "summary": "Delete an index template", - "description": "The provided may contain multiple template names separated by a comma. If multiple template\nnames are specified then there is no wildcard support and the provided names should match completely with\nexisting templates.", + "description": "The provided may contain multiple template names separated by a comma. If multiple template\nnames are specified then there is no wildcard support and the provided names should match completely with\nexisting templates.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "indices-delete-index-template", "parameters": [ { @@ -15344,7 +15346,7 @@ "indices" ], "summary": "Get legacy index templates", - "description": "Get information about one or more index templates.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.", + "description": "Get information about one or more index templates.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "externalDocs": { "url": "https://www.elastic.co/docs/manage-data/data-store/templates" }, @@ -15376,7 +15378,7 @@ "indices" ], "summary": "Create or update a legacy index template", - "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.\nElasticsearch applies templates to new indices based on an index pattern that matches the index name.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.\n\nComposable templates always take precedence over legacy templates.\nIf no composable template matches a new index, matching legacy templates are applied according to their order.\n\nIndex templates are only applied during index creation.\nChanges to index templates do not affect existing indices.\nSettings and mappings specified in create index API requests override any settings or mappings specified in an index template.\n\nYou can use C-style `/* *\\/` block comments in index templates.\nYou can include comments anywhere in the request body, except before the opening curly bracket.\n\n**Indices matching multiple templates**\n\nMultiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index.\nThe order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them.\nNOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order.", + "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.\nElasticsearch applies templates to new indices based on an index pattern that matches the index name.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.\n\nComposable templates always take precedence over legacy templates.\nIf no composable template matches a new index, matching legacy templates are applied according to their order.\n\nIndex templates are only applied during index creation.\nChanges to index templates do not affect existing indices.\nSettings and mappings specified in create index API requests override any settings or mappings specified in an index template.\n\nYou can use C-style `/* *\\/` block comments in index templates.\nYou can include comments anywhere in the request body, except before the opening curly bracket.\n\n**Indices matching multiple templates**\n\nMultiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index.\nThe order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them.\nNOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`,`manage`", "externalDocs": { "url": "https://www.elastic.co/docs/manage-data/data-store/templates" }, @@ -15414,7 +15416,7 @@ "indices" ], "summary": "Create or update a legacy index template", - "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.\nElasticsearch applies templates to new indices based on an index pattern that matches the index name.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.\n\nComposable templates always take precedence over legacy templates.\nIf no composable template matches a new index, matching legacy templates are applied according to their order.\n\nIndex templates are only applied during index creation.\nChanges to index templates do not affect existing indices.\nSettings and mappings specified in create index API requests override any settings or mappings specified in an index template.\n\nYou can use C-style `/* *\\/` block comments in index templates.\nYou can include comments anywhere in the request body, except before the opening curly bracket.\n\n**Indices matching multiple templates**\n\nMultiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index.\nThe order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them.\nNOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order.", + "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.\nElasticsearch applies templates to new indices based on an index pattern that matches the index name.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.\n\nComposable templates always take precedence over legacy templates.\nIf no composable template matches a new index, matching legacy templates are applied according to their order.\n\nIndex templates are only applied during index creation.\nChanges to index templates do not affect existing indices.\nSettings and mappings specified in create index API requests override any settings or mappings specified in an index template.\n\nYou can use C-style `/* *\\/` block comments in index templates.\nYou can include comments anywhere in the request body, except before the opening curly bracket.\n\n**Indices matching multiple templates**\n\nMultiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index.\nThe order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them.\nNOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`,`manage`", "externalDocs": { "url": "https://www.elastic.co/docs/manage-data/data-store/templates" }, @@ -15452,7 +15454,7 @@ "indices" ], "summary": "Delete a legacy index template", - "description": "IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.", + "description": "IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "indices-delete-template", "parameters": [ { @@ -15507,7 +15509,7 @@ "indices" ], "summary": "Check existence of index templates", - "description": "Get information about whether index templates exist.\nIndex templates define settings, mappings, and aliases that can be applied automatically to new indices.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.", + "description": "Get information about whether index templates exist.\nIndex templates define settings, mappings, and aliases that can be applied automatically to new indices.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "externalDocs": { "url": "https://www.elastic.co/docs/manage-data/data-store/templates" }, @@ -15726,7 +15728,7 @@ "indices" ], "summary": "Get aliases", - "description": "Retrieves information for one or more data stream or index aliases.", + "description": "Retrieves information for one or more data stream or index aliases.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-alias-1", "parameters": [ { @@ -15875,7 +15877,7 @@ "indices" ], "summary": "Get field usage stats", - "description": "Get field usage information for each shard and field of an index.\nField usage statistics are automatically captured when queries are running on a cluster.\nA shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use.\n\nThe response body reports the per-shard usage count of the data structures that back the fields in the index.\nA given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times.", + "description": "Get field usage information for each shard and field of an index.\nField usage statistics are automatically captured when queries are running on a cluster.\nA shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use.\n\nThe response body reports the per-shard usage count of the data structures that back the fields in the index.\nA given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times.\n ##Required authorization\n* Index privileges: `manage`", "operationId": "indices-field-usage-stats", "parameters": [ { @@ -15963,7 +15965,7 @@ "indices" ], "summary": "Flush data streams or indices", - "description": "Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index.\nWhen restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart.\nElasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush.\n\nAfter each operation has been flushed it is permanently stored in the Lucene index.\nThis may mean that there is no need to maintain an additional copy of it in the transaction log.\nThe transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space.\n\nIt is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly.\nIf you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called.", + "description": "Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index.\nWhen restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart.\nElasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush.\n\nAfter each operation has been flushed it is permanently stored in the Lucene index.\nThis may mean that there is no need to maintain an additional copy of it in the transaction log.\nThe transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space.\n\nIt is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly.\nIf you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called.\n ##Required authorization\n* Index privileges: `maintenance`", "operationId": "indices-flush-1", "parameters": [ { @@ -15994,7 +15996,7 @@ "indices" ], "summary": "Flush data streams or indices", - "description": "Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index.\nWhen restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart.\nElasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush.\n\nAfter each operation has been flushed it is permanently stored in the Lucene index.\nThis may mean that there is no need to maintain an additional copy of it in the transaction log.\nThe transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space.\n\nIt is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly.\nIf you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called.", + "description": "Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index.\nWhen restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart.\nElasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush.\n\nAfter each operation has been flushed it is permanently stored in the Lucene index.\nThis may mean that there is no need to maintain an additional copy of it in the transaction log.\nThe transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space.\n\nIt is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly.\nIf you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called.\n ##Required authorization\n* Index privileges: `maintenance`", "operationId": "indices-flush", "parameters": [ { @@ -16027,7 +16029,7 @@ "indices" ], "summary": "Flush data streams or indices", - "description": "Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index.\nWhen restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart.\nElasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush.\n\nAfter each operation has been flushed it is permanently stored in the Lucene index.\nThis may mean that there is no need to maintain an additional copy of it in the transaction log.\nThe transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space.\n\nIt is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly.\nIf you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called.", + "description": "Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index.\nWhen restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart.\nElasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush.\n\nAfter each operation has been flushed it is permanently stored in the Lucene index.\nThis may mean that there is no need to maintain an additional copy of it in the transaction log.\nThe transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space.\n\nIt is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly.\nIf you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called.\n ##Required authorization\n* Index privileges: `maintenance`", "operationId": "indices-flush-3", "parameters": [ { @@ -16061,7 +16063,7 @@ "indices" ], "summary": "Flush data streams or indices", - "description": "Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index.\nWhen restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart.\nElasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush.\n\nAfter each operation has been flushed it is permanently stored in the Lucene index.\nThis may mean that there is no need to maintain an additional copy of it in the transaction log.\nThe transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space.\n\nIt is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly.\nIf you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called.", + "description": "Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index.\nWhen restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart.\nElasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush.\n\nAfter each operation has been flushed it is permanently stored in the Lucene index.\nThis may mean that there is no need to maintain an additional copy of it in the transaction log.\nThe transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space.\n\nIt is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly.\nIf you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called.\n ##Required authorization\n* Index privileges: `maintenance`", "operationId": "indices-flush-2", "parameters": [ { @@ -16097,7 +16099,7 @@ "indices" ], "summary": "Force a merge", - "description": "Perform the force merge operation on the shards of one or more indices.\nFor data streams, the API forces a merge on the shards of the stream's backing indices.\n\nMerging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents.\nMerging normally happens automatically, but sometimes it is useful to trigger a merge manually.\n\nWARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes).\nWhen documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a \"tombstone\".\nThese soft-deleted documents are automatically cleaned up during regular segment merges.\nBut force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges.\nSo the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance.\nIf you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally.\n\n**Blocks during a force merge**\n\nCalls to this API block until the merge is complete (unless request contains `wait_for_completion=false`).\nIf the client connection is lost before completion then the force merge process will continue in the background.\nAny new requests to force merge the same indices will also block until the ongoing force merge is complete.\n\n**Running force merge asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task.\nHowever, you can not cancel this task as the force merge task is not cancelable.\nElasticsearch creates a record of this task as a document at `_tasks/`.\nWhen you are done with a task, you should delete the task document so Elasticsearch can reclaim the space.\n\n**Force merging multiple indices**\n\nYou can force merge multiple indices with a single request by targeting:\n\n* One or more data streams that contain multiple backing indices\n* Multiple indices\n* One or more aliases\n* All data streams and indices in a cluster\n\nEach targeted shard is force-merged separately using the force_merge threadpool.\nBy default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time.\nIf you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel\n\nForce merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one.\n\n**Data streams and time-based indices**\n\nForce-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover.\nIn these cases, each index only receives indexing traffic for a certain period of time.\nOnce an index receive no more writes, its shards can be force-merged to a single segment.\nThis can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches.\nFor example:\n\n```\nPOST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1\n```", + "description": "Perform the force merge operation on the shards of one or more indices.\nFor data streams, the API forces a merge on the shards of the stream's backing indices.\n\nMerging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents.\nMerging normally happens automatically, but sometimes it is useful to trigger a merge manually.\n\nWARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes).\nWhen documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a \"tombstone\".\nThese soft-deleted documents are automatically cleaned up during regular segment merges.\nBut force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges.\nSo the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance.\nIf you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally.\n\n**Blocks during a force merge**\n\nCalls to this API block until the merge is complete (unless request contains `wait_for_completion=false`).\nIf the client connection is lost before completion then the force merge process will continue in the background.\nAny new requests to force merge the same indices will also block until the ongoing force merge is complete.\n\n**Running force merge asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task.\nHowever, you can not cancel this task as the force merge task is not cancelable.\nElasticsearch creates a record of this task as a document at `_tasks/`.\nWhen you are done with a task, you should delete the task document so Elasticsearch can reclaim the space.\n\n**Force merging multiple indices**\n\nYou can force merge multiple indices with a single request by targeting:\n\n* One or more data streams that contain multiple backing indices\n* Multiple indices\n* One or more aliases\n* All data streams and indices in a cluster\n\nEach targeted shard is force-merged separately using the force_merge threadpool.\nBy default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time.\nIf you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel\n\nForce merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one.\n\n**Data streams and time-based indices**\n\nForce-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover.\nIn these cases, each index only receives indexing traffic for a certain period of time.\nOnce an index receive no more writes, its shards can be force-merged to a single segment.\nThis can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches.\nFor example:\n\n```\nPOST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1\n```\n ##Required authorization\n* Index privileges: `maintenance`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/index-settings/merge" }, @@ -16139,7 +16141,7 @@ "indices" ], "summary": "Force a merge", - "description": "Perform the force merge operation on the shards of one or more indices.\nFor data streams, the API forces a merge on the shards of the stream's backing indices.\n\nMerging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents.\nMerging normally happens automatically, but sometimes it is useful to trigger a merge manually.\n\nWARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes).\nWhen documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a \"tombstone\".\nThese soft-deleted documents are automatically cleaned up during regular segment merges.\nBut force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges.\nSo the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance.\nIf you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally.\n\n**Blocks during a force merge**\n\nCalls to this API block until the merge is complete (unless request contains `wait_for_completion=false`).\nIf the client connection is lost before completion then the force merge process will continue in the background.\nAny new requests to force merge the same indices will also block until the ongoing force merge is complete.\n\n**Running force merge asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task.\nHowever, you can not cancel this task as the force merge task is not cancelable.\nElasticsearch creates a record of this task as a document at `_tasks/`.\nWhen you are done with a task, you should delete the task document so Elasticsearch can reclaim the space.\n\n**Force merging multiple indices**\n\nYou can force merge multiple indices with a single request by targeting:\n\n* One or more data streams that contain multiple backing indices\n* Multiple indices\n* One or more aliases\n* All data streams and indices in a cluster\n\nEach targeted shard is force-merged separately using the force_merge threadpool.\nBy default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time.\nIf you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel\n\nForce merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one.\n\n**Data streams and time-based indices**\n\nForce-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover.\nIn these cases, each index only receives indexing traffic for a certain period of time.\nOnce an index receive no more writes, its shards can be force-merged to a single segment.\nThis can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches.\nFor example:\n\n```\nPOST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1\n```", + "description": "Perform the force merge operation on the shards of one or more indices.\nFor data streams, the API forces a merge on the shards of the stream's backing indices.\n\nMerging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents.\nMerging normally happens automatically, but sometimes it is useful to trigger a merge manually.\n\nWARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes).\nWhen documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a \"tombstone\".\nThese soft-deleted documents are automatically cleaned up during regular segment merges.\nBut force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges.\nSo the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance.\nIf you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally.\n\n**Blocks during a force merge**\n\nCalls to this API block until the merge is complete (unless request contains `wait_for_completion=false`).\nIf the client connection is lost before completion then the force merge process will continue in the background.\nAny new requests to force merge the same indices will also block until the ongoing force merge is complete.\n\n**Running force merge asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task.\nHowever, you can not cancel this task as the force merge task is not cancelable.\nElasticsearch creates a record of this task as a document at `_tasks/`.\nWhen you are done with a task, you should delete the task document so Elasticsearch can reclaim the space.\n\n**Force merging multiple indices**\n\nYou can force merge multiple indices with a single request by targeting:\n\n* One or more data streams that contain multiple backing indices\n* Multiple indices\n* One or more aliases\n* All data streams and indices in a cluster\n\nEach targeted shard is force-merged separately using the force_merge threadpool.\nBy default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time.\nIf you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel\n\nForce merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one.\n\n**Data streams and time-based indices**\n\nForce-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover.\nIn these cases, each index only receives indexing traffic for a certain period of time.\nOnce an index receive no more writes, its shards can be force-merged to a single segment.\nThis can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches.\nFor example:\n\n```\nPOST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1\n```\n ##Required authorization\n* Index privileges: `maintenance`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/index-settings/merge" }, @@ -16184,7 +16186,7 @@ "indices" ], "summary": "Get aliases", - "description": "Retrieves information for one or more data stream or index aliases.", + "description": "Retrieves information for one or more data stream or index aliases.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-alias", "parameters": [ { @@ -16214,7 +16216,7 @@ "indices" ], "summary": "Get aliases", - "description": "Retrieves information for one or more data stream or index aliases.", + "description": "Retrieves information for one or more data stream or index aliases.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-alias-3", "parameters": [ { @@ -16247,7 +16249,7 @@ "data stream" ], "summary": "Get data stream lifecycle stats", - "description": "Get statistics about the data streams that are managed by a data stream lifecycle.", + "description": "Get statistics about the data streams that are managed by a data stream lifecycle.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "indices-get-data-lifecycle-stats", "responses": { "200": { @@ -16305,7 +16307,7 @@ "data stream" ], "summary": "Get data streams", - "description": "Get information about one or more data streams.", + "description": "Get information about one or more data streams.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-data-stream", "parameters": [ { @@ -16335,7 +16337,7 @@ "data stream" ], "summary": "Get data stream settings", - "description": "Get setting information for one or more data streams.", + "description": "Get setting information for one or more data streams.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-data-stream-settings", "parameters": [ { @@ -16397,7 +16399,7 @@ "data stream" ], "summary": "Update data stream settings", - "description": "This API can be used to override settings on specific data streams. These overrides will take precedence over what\nis specified in the template that the data stream matches. To prevent your data stream from getting into an invalid state,\nonly certain settings are allowed. If possible, the setting change is applied to all\nbacking indices. Otherwise, it will be applied when the data stream is next rolled over.", + "description": "This API can be used to override settings on specific data streams. These overrides will take precedence over what\nis specified in the template that the data stream matches. To prevent your data stream from getting into an invalid state,\nonly certain settings are allowed. If possible, the setting change is applied to all\nbacking indices. Otherwise, it will be applied when the data stream is next rolled over.\n ##Required authorization\n* Index privileges: `manage`", "operationId": "indices-put-data-stream-settings", "parameters": [ { @@ -16508,7 +16510,7 @@ "indices" ], "summary": "Get mapping definitions", - "description": "Retrieves mapping definitions for one or more fields.\nFor data streams, the API retrieves field mappings for the stream’s backing indices.\n\nThis API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields.", + "description": "Retrieves mapping definitions for one or more fields.\nFor data streams, the API retrieves field mappings for the stream’s backing indices.\n\nThis API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-field-mapping", "parameters": [ { @@ -16547,7 +16549,7 @@ "indices" ], "summary": "Get mapping definitions", - "description": "Retrieves mapping definitions for one or more fields.\nFor data streams, the API retrieves field mappings for the stream’s backing indices.\n\nThis API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields.", + "description": "Retrieves mapping definitions for one or more fields.\nFor data streams, the API retrieves field mappings for the stream’s backing indices.\n\nThis API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-field-mapping-1", "parameters": [ { @@ -16589,7 +16591,7 @@ "indices" ], "summary": "Get index templates", - "description": "Get information about one or more index templates.", + "description": "Get information about one or more index templates.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "indices-get-index-template", "parameters": [ { @@ -16619,7 +16621,7 @@ "indices" ], "summary": "Get mapping definitions", - "description": "For data streams, the API retrieves mappings for the stream’s backing indices.", + "description": "For data streams, the API retrieves mappings for the stream’s backing indices.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-mapping", "parameters": [ { @@ -16652,7 +16654,7 @@ "indices" ], "summary": "Get mapping definitions", - "description": "For data streams, the API retrieves mappings for the stream’s backing indices.", + "description": "For data streams, the API retrieves mappings for the stream’s backing indices.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-mapping-1", "parameters": [ { @@ -16686,7 +16688,7 @@ "indices" ], "summary": "Update field mappings", - "description": "Add new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields and add new properties to existing object fields.\nFor data streams, these changes are applied to all backing indices by default.\n\n**Add multi-fields to an existing field**\n\nMulti-fields let you index the same field in different ways.\nYou can use this API to update the fields mapping parameter and enable multi-fields for an existing field.\nWARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field.\nYou can populate the new multi-field with the update by query API.\n\n**Change supported mapping parameters for an existing field**\n\nThe documentation for each mapping parameter indicates whether you can update it for an existing field using this API.\nFor example, you can use the update mapping API to update the `ignore_above` parameter.\n\n**Change the mapping of an existing field**\n\nExcept for supported mapping parameters, you can't change the mapping or field type of an existing field.\nChanging an existing field could invalidate data that's already indexed.\n\nIf you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams.\nIf you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index.\n\n**Rename a field**\n\nRenaming a field would invalidate data already indexed under the old field name.\nInstead, add an alias field to create an alternate field name.", + "description": "Add new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields and add new properties to existing object fields.\nFor data streams, these changes are applied to all backing indices by default.\n\n**Add multi-fields to an existing field**\n\nMulti-fields let you index the same field in different ways.\nYou can use this API to update the fields mapping parameter and enable multi-fields for an existing field.\nWARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field.\nYou can populate the new multi-field with the update by query API.\n\n**Change supported mapping parameters for an existing field**\n\nThe documentation for each mapping parameter indicates whether you can update it for an existing field using this API.\nFor example, you can use the update mapping API to update the `ignore_above` parameter.\n\n**Change the mapping of an existing field**\n\nExcept for supported mapping parameters, you can't change the mapping or field type of an existing field.\nChanging an existing field could invalidate data that's already indexed.\n\nIf you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams.\nIf you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index.\n\n**Rename a field**\n\nRenaming a field would invalidate data already indexed under the old field name.\nInstead, add an alias field to create an alternate field name.\n ##Required authorization\n* Index privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/mapping-reference/mapping-parameters" }, @@ -16735,7 +16737,7 @@ "indices" ], "summary": "Update field mappings", - "description": "Add new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields and add new properties to existing object fields.\nFor data streams, these changes are applied to all backing indices by default.\n\n**Add multi-fields to an existing field**\n\nMulti-fields let you index the same field in different ways.\nYou can use this API to update the fields mapping parameter and enable multi-fields for an existing field.\nWARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field.\nYou can populate the new multi-field with the update by query API.\n\n**Change supported mapping parameters for an existing field**\n\nThe documentation for each mapping parameter indicates whether you can update it for an existing field using this API.\nFor example, you can use the update mapping API to update the `ignore_above` parameter.\n\n**Change the mapping of an existing field**\n\nExcept for supported mapping parameters, you can't change the mapping or field type of an existing field.\nChanging an existing field could invalidate data that's already indexed.\n\nIf you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams.\nIf you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index.\n\n**Rename a field**\n\nRenaming a field would invalidate data already indexed under the old field name.\nInstead, add an alias field to create an alternate field name.", + "description": "Add new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields and add new properties to existing object fields.\nFor data streams, these changes are applied to all backing indices by default.\n\n**Add multi-fields to an existing field**\n\nMulti-fields let you index the same field in different ways.\nYou can use this API to update the fields mapping parameter and enable multi-fields for an existing field.\nWARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field.\nYou can populate the new multi-field with the update by query API.\n\n**Change supported mapping parameters for an existing field**\n\nThe documentation for each mapping parameter indicates whether you can update it for an existing field using this API.\nFor example, you can use the update mapping API to update the `ignore_above` parameter.\n\n**Change the mapping of an existing field**\n\nExcept for supported mapping parameters, you can't change the mapping or field type of an existing field.\nChanging an existing field could invalidate data that's already indexed.\n\nIf you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams.\nIf you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index.\n\n**Rename a field**\n\nRenaming a field would invalidate data already indexed under the old field name.\nInstead, add an alias field to create an alternate field name.\n ##Required authorization\n* Index privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/mapping-reference/mapping-parameters" }, @@ -16870,7 +16872,7 @@ "indices" ], "summary": "Get index settings", - "description": "Get setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.", + "description": "Get setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-settings", "parameters": [ { @@ -16907,7 +16909,7 @@ "indices" ], "summary": "Update index settings", - "description": "Changes dynamic index settings in real time.\nFor data streams, index setting changes are applied to all backing indices by default.\n\nTo revert a setting to the default value, use a null value.\nThe list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation.\nTo preserve existing settings from being updated, set the `preserve_existing` parameter to `true`.\n\n There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example:\n\n```\n{\n \"number_of_replicas\": 1\n}\n```\n\nOr you can use an `index` setting object:\n```\n{\n \"index\": {\n \"number_of_replicas\": 1\n }\n}\n```\n\nOr you can use dot annotation:\n```\n{\n \"index.number_of_replicas\": 1\n}\n```\n\nOr you can embed any of the aforementioned options in a `settings` object. For example:\n\n```\n{\n \"settings\": {\n \"index\": {\n \"number_of_replicas\": 1\n }\n }\n}\n```\n\nNOTE: You can only define new analyzers on closed indices.\nTo add an analyzer, you must close the index, define the analyzer, and reopen the index.\nYou cannot close the write index of a data stream.\nTo update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream.\nThen roll over the data stream to apply the new analyzer to the stream's write index and future backing indices.\nThis affects searches and any new data added to the stream after the rollover.\nHowever, it does not affect the data stream's backing indices or their existing data.\nTo change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.", + "description": "Changes dynamic index settings in real time.\nFor data streams, index setting changes are applied to all backing indices by default.\n\nTo revert a setting to the default value, use a null value.\nThe list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation.\nTo preserve existing settings from being updated, set the `preserve_existing` parameter to `true`.\n\n There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example:\n\n```\n{\n \"number_of_replicas\": 1\n}\n```\n\nOr you can use an `index` setting object:\n```\n{\n \"index\": {\n \"number_of_replicas\": 1\n }\n}\n```\n\nOr you can use dot annotation:\n```\n{\n \"index.number_of_replicas\": 1\n}\n```\n\nOr you can embed any of the aforementioned options in a `settings` object. For example:\n\n```\n{\n \"settings\": {\n \"index\": {\n \"number_of_replicas\": 1\n }\n }\n}\n```\n\nNOTE: You can only define new analyzers on closed indices.\nTo add an analyzer, you must close the index, define the analyzer, and reopen the index.\nYou cannot close the write index of a data stream.\nTo update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream.\nThen roll over the data stream to apply the new analyzer to the stream's write index and future backing indices.\nThis affects searches and any new data added to the stream after the rollover.\nHowever, it does not affect the data stream's backing indices or their existing data.\nTo change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.\n ##Required authorization\n* Index privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/index-settings/" }, @@ -16961,7 +16963,7 @@ "indices" ], "summary": "Get index settings", - "description": "Get setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.", + "description": "Get setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-settings-1", "parameters": [ { @@ -17001,7 +17003,7 @@ "indices" ], "summary": "Update index settings", - "description": "Changes dynamic index settings in real time.\nFor data streams, index setting changes are applied to all backing indices by default.\n\nTo revert a setting to the default value, use a null value.\nThe list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation.\nTo preserve existing settings from being updated, set the `preserve_existing` parameter to `true`.\n\n There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example:\n\n```\n{\n \"number_of_replicas\": 1\n}\n```\n\nOr you can use an `index` setting object:\n```\n{\n \"index\": {\n \"number_of_replicas\": 1\n }\n}\n```\n\nOr you can use dot annotation:\n```\n{\n \"index.number_of_replicas\": 1\n}\n```\n\nOr you can embed any of the aforementioned options in a `settings` object. For example:\n\n```\n{\n \"settings\": {\n \"index\": {\n \"number_of_replicas\": 1\n }\n }\n}\n```\n\nNOTE: You can only define new analyzers on closed indices.\nTo add an analyzer, you must close the index, define the analyzer, and reopen the index.\nYou cannot close the write index of a data stream.\nTo update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream.\nThen roll over the data stream to apply the new analyzer to the stream's write index and future backing indices.\nThis affects searches and any new data added to the stream after the rollover.\nHowever, it does not affect the data stream's backing indices or their existing data.\nTo change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.", + "description": "Changes dynamic index settings in real time.\nFor data streams, index setting changes are applied to all backing indices by default.\n\nTo revert a setting to the default value, use a null value.\nThe list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation.\nTo preserve existing settings from being updated, set the `preserve_existing` parameter to `true`.\n\n There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example:\n\n```\n{\n \"number_of_replicas\": 1\n}\n```\n\nOr you can use an `index` setting object:\n```\n{\n \"index\": {\n \"number_of_replicas\": 1\n }\n}\n```\n\nOr you can use dot annotation:\n```\n{\n \"index.number_of_replicas\": 1\n}\n```\n\nOr you can embed any of the aforementioned options in a `settings` object. For example:\n\n```\n{\n \"settings\": {\n \"index\": {\n \"number_of_replicas\": 1\n }\n }\n}\n```\n\nNOTE: You can only define new analyzers on closed indices.\nTo add an analyzer, you must close the index, define the analyzer, and reopen the index.\nYou cannot close the write index of a data stream.\nTo update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream.\nThen roll over the data stream to apply the new analyzer to the stream's write index and future backing indices.\nThis affects searches and any new data added to the stream after the rollover.\nHowever, it does not affect the data stream's backing indices or their existing data.\nTo change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.\n ##Required authorization\n* Index privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/index-settings/" }, @@ -17058,7 +17060,7 @@ "indices" ], "summary": "Get index settings", - "description": "Get setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.", + "description": "Get setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-settings-2", "parameters": [ { @@ -17103,7 +17105,7 @@ "indices" ], "summary": "Get index settings", - "description": "Get setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.", + "description": "Get setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-settings-3", "parameters": [ { @@ -17145,7 +17147,7 @@ "indices" ], "summary": "Get legacy index templates", - "description": "Get information about one or more index templates.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.", + "description": "Get information about one or more index templates.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "externalDocs": { "url": "https://www.elastic.co/docs/manage-data/data-store/templates" }, @@ -17209,7 +17211,7 @@ "data stream" ], "summary": "Convert an index alias to a data stream", - "description": "Converts an index alias to a data stream.\nYou must have a matching index template that is data stream enabled.\nThe alias must meet the following criteria:\nThe alias must have a write index;\nAll indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type;\nThe alias must not have any filters;\nThe alias must not use custom routing.\nIf successful, the request removes the alias and creates a data stream with the same name.\nThe indices for the alias become hidden backing indices for the stream.\nThe write index for the alias becomes the write index for the stream.", + "description": "Converts an index alias to a data stream.\nYou must have a matching index template that is data stream enabled.\nThe alias must meet the following criteria:\nThe alias must have a write index;\nAll indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type;\nThe alias must not have any filters;\nThe alias must not use custom routing.\nIf successful, the request removes the alias and creates a data stream with the same name.\nThe indices for the alias become hidden backing indices for the stream.\nThe write index for the alias becomes the write index for the stream.\n ##Required authorization\n* Index privileges: `manage`", "operationId": "indices-migrate-to-data-stream", "parameters": [ { @@ -17310,7 +17312,7 @@ "indices" ], "summary": "Open a closed index", - "description": "For data streams, the API opens any closed backing indices.\n\nA closed index is blocked for read/write operations and does not allow all operations that opened indices allow.\nIt is not possible to index documents or to search for documents in a closed index.\nThis allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster.\n\nWhen opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index.\nThe shards will then go through the normal recovery process.\nThe data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times.\n\nYou can open and close multiple indices.\nAn error is thrown if the request explicitly refers to a missing index.\nThis behavior can be turned off by using the `ignore_unavailable=true` parameter.\n\nBy default, you must explicitly name the indices you are opening or closing.\nTo open or close indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`.\nThis setting can also be changed with the cluster update settings API.\n\nClosed indices consume a significant amount of disk-space which can cause problems in managed environments.\nClosing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`.\n\nBecause opening or closing an index allocates its shards, the `wait_for_active_shards` setting on index creation applies to the `_open` and `_close` index actions as well.", + "description": "For data streams, the API opens any closed backing indices.\n\nA closed index is blocked for read/write operations and does not allow all operations that opened indices allow.\nIt is not possible to index documents or to search for documents in a closed index.\nThis allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster.\n\nWhen opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index.\nThe shards will then go through the normal recovery process.\nThe data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times.\n\nYou can open and close multiple indices.\nAn error is thrown if the request explicitly refers to a missing index.\nThis behavior can be turned off by using the `ignore_unavailable=true` parameter.\n\nBy default, you must explicitly name the indices you are opening or closing.\nTo open or close indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`.\nThis setting can also be changed with the cluster update settings API.\n\nClosed indices consume a significant amount of disk-space which can cause problems in managed environments.\nClosing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`.\n\nBecause opening or closing an index allocates its shards, the `wait_for_active_shards` setting on index creation applies to the `_open` and `_close` index actions as well.\n ##Required authorization\n* Index privileges: `manage`", "operationId": "indices-open", "parameters": [ { @@ -17470,7 +17472,7 @@ "indices" ], "summary": "Get index recovery information", - "description": "Get information about ongoing and completed shard recoveries for one or more indices.\nFor data streams, the API returns information for the stream's backing indices.\n\nAll recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time.\n\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard.\nWhen a shard recovery completes, the recovered shard is available for search and indexing.\n\nRecovery automatically occurs during the following processes:\n\n* When creating an index for the first time.\n* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path.\n* Creation of new replica shard copies from the primary.\n* Relocation of a shard copy to a different node in the same cluster.\n* A snapshot restore operation.\n* A clone, shrink, or split operation.\n\nYou can determine the cause of a shard recovery using the recovery or cat recovery APIs.\n\nThe index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster.\nIt only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist.\nThis means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API.", + "description": "Get information about ongoing and completed shard recoveries for one or more indices.\nFor data streams, the API returns information for the stream's backing indices.\n\nAll recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time.\n\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard.\nWhen a shard recovery completes, the recovered shard is available for search and indexing.\n\nRecovery automatically occurs during the following processes:\n\n* When creating an index for the first time.\n* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path.\n* Creation of new replica shard copies from the primary.\n* Relocation of a shard copy to a different node in the same cluster.\n* A snapshot restore operation.\n* A clone, shrink, or split operation.\n\nYou can determine the cause of a shard recovery using the recovery or cat recovery APIs.\n\nThe index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster.\nIt only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist.\nThis means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API.\n ##Required authorization\n* Index privileges: `monitor`", "operationId": "indices-recovery", "parameters": [ { @@ -17500,7 +17502,7 @@ "indices" ], "summary": "Get index recovery information", - "description": "Get information about ongoing and completed shard recoveries for one or more indices.\nFor data streams, the API returns information for the stream's backing indices.\n\nAll recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time.\n\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard.\nWhen a shard recovery completes, the recovered shard is available for search and indexing.\n\nRecovery automatically occurs during the following processes:\n\n* When creating an index for the first time.\n* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path.\n* Creation of new replica shard copies from the primary.\n* Relocation of a shard copy to a different node in the same cluster.\n* A snapshot restore operation.\n* A clone, shrink, or split operation.\n\nYou can determine the cause of a shard recovery using the recovery or cat recovery APIs.\n\nThe index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster.\nIt only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist.\nThis means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API.", + "description": "Get information about ongoing and completed shard recoveries for one or more indices.\nFor data streams, the API returns information for the stream's backing indices.\n\nAll recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time.\n\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard.\nWhen a shard recovery completes, the recovered shard is available for search and indexing.\n\nRecovery automatically occurs during the following processes:\n\n* When creating an index for the first time.\n* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path.\n* Creation of new replica shard copies from the primary.\n* Relocation of a shard copy to a different node in the same cluster.\n* A snapshot restore operation.\n* A clone, shrink, or split operation.\n\nYou can determine the cause of a shard recovery using the recovery or cat recovery APIs.\n\nThe index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster.\nIt only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist.\nThis means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API.\n ##Required authorization\n* Index privileges: `monitor`", "operationId": "indices-recovery-1", "parameters": [ { @@ -17533,7 +17535,7 @@ "indices" ], "summary": "Refresh an index", - "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.", + "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.\n ##Required authorization\n* Index privileges: `maintenance`", "operationId": "indices-refresh-1", "parameters": [ { @@ -17558,7 +17560,7 @@ "indices" ], "summary": "Refresh an index", - "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.", + "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.\n ##Required authorization\n* Index privileges: `maintenance`", "operationId": "indices-refresh", "parameters": [ { @@ -17585,7 +17587,7 @@ "indices" ], "summary": "Refresh an index", - "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.", + "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.\n ##Required authorization\n* Index privileges: `maintenance`", "operationId": "indices-refresh-3", "parameters": [ { @@ -17613,7 +17615,7 @@ "indices" ], "summary": "Refresh an index", - "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.", + "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.\n ##Required authorization\n* Index privileges: `maintenance`", "operationId": "indices-refresh-2", "parameters": [ { @@ -17643,7 +17645,7 @@ "indices" ], "summary": "Reload search analyzers", - "description": "Reload an index's search analyzers and their resources.\nFor data streams, the API reloads search analyzers and resources for the stream's backing indices.\n\nIMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer.\n\nYou can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer.\nTo be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers.\n\nNOTE: This API does not perform a reload for each shard of an index.\nInstead, it performs a reload for each node containing index shards.\nAs a result, the total shard count returned by the API can differ from the number of index shards.\nBecause reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API.\nThis ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future.", + "description": "Reload an index's search analyzers and their resources.\nFor data streams, the API reloads search analyzers and resources for the stream's backing indices.\n\nIMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer.\n\nYou can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer.\nTo be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers.\n\nNOTE: This API does not perform a reload for each shard of an index.\nInstead, it performs a reload for each node containing index shards.\nAs a result, the total shard count returned by the API can differ from the number of index shards.\nBecause reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API.\nThis ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future.\n ##Required authorization\n* Index privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/mapping-reference/search-analyzer" }, @@ -17683,7 +17685,7 @@ "indices" ], "summary": "Reload search analyzers", - "description": "Reload an index's search analyzers and their resources.\nFor data streams, the API reloads search analyzers and resources for the stream's backing indices.\n\nIMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer.\n\nYou can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer.\nTo be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers.\n\nNOTE: This API does not perform a reload for each shard of an index.\nInstead, it performs a reload for each node containing index shards.\nAs a result, the total shard count returned by the API can differ from the number of index shards.\nBecause reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API.\nThis ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future.", + "description": "Reload an index's search analyzers and their resources.\nFor data streams, the API reloads search analyzers and resources for the stream's backing indices.\n\nIMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer.\n\nYou can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer.\nTo be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers.\n\nNOTE: This API does not perform a reload for each shard of an index.\nInstead, it performs a reload for each node containing index shards.\nAs a result, the total shard count returned by the API can differ from the number of index shards.\nBecause reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API.\nThis ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future.\n ##Required authorization\n* Index privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/mapping-reference/search-analyzer" }, @@ -17725,7 +17727,7 @@ "indices" ], "summary": "Resolve the cluster", - "description": "Resolve the specified index expressions to return information about each cluster, including the local \"querying\" cluster, if included.\nIf no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster.\n\nThis endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search.\n\nYou use the same index expression with this endpoint as you would for cross-cluster search.\nIndex and cluster exclusions are also supported with this endpoint.\n\nFor each cluster in the index expression, information is returned about:\n\n* Whether the querying (\"local\") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the `remote/info` endpoint.\n* Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`.\n* Whether there are any indices, aliases, or data streams on that cluster that match the index expression.\n* Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index).\n* Cluster version information, including the Elasticsearch server version.\n\nFor example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`.\nEach cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`.\n\n## Note on backwards compatibility\nThe ability to query without an index expression was added in version 8.18, so when\nquerying remote clusters older than that, the local cluster will send the index\nexpression `dummy*` to those remote clusters. Thus, if an errors occur, you may see a reference\nto that index expression even though you didn't request it. If it causes a problem, you can\ninstead include an index expression like `*:*` to bypass the issue.\n\n## Advantages of using this endpoint before a cross-cluster search\n\nYou may want to exclude a cluster or index from a search when:\n\n* A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail.\n* A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search.\n* The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the \"error\" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.)\n* A remote cluster is an older version that does not support the feature you want to use in your search.\n\n## Test availability of remote clusters\n\nThe `remote/info` endpoint is commonly used to test whether the \"local\" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not.\nThe remote cluster may be available, while the local cluster is not currently connected to it.\n\nYou can use the `_resolve/cluster` API to attempt to reconnect to remote clusters.\nFor example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`.\nThe `connected` field in the response will indicate whether it was successful.\nIf a connection was (re-)established, this will also cause the `remote/info` endpoint to now indicate a connected status.", + "description": "Resolve the specified index expressions to return information about each cluster, including the local \"querying\" cluster, if included.\nIf no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster.\n\nThis endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search.\n\nYou use the same index expression with this endpoint as you would for cross-cluster search.\nIndex and cluster exclusions are also supported with this endpoint.\n\nFor each cluster in the index expression, information is returned about:\n\n* Whether the querying (\"local\") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the `remote/info` endpoint.\n* Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`.\n* Whether there are any indices, aliases, or data streams on that cluster that match the index expression.\n* Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index).\n* Cluster version information, including the Elasticsearch server version.\n\nFor example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`.\nEach cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`.\n\n## Note on backwards compatibility\nThe ability to query without an index expression was added in version 8.18, so when\nquerying remote clusters older than that, the local cluster will send the index\nexpression `dummy*` to those remote clusters. Thus, if an errors occur, you may see a reference\nto that index expression even though you didn't request it. If it causes a problem, you can\ninstead include an index expression like `*:*` to bypass the issue.\n\n## Advantages of using this endpoint before a cross-cluster search\n\nYou may want to exclude a cluster or index from a search when:\n\n* A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail.\n* A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search.\n* The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the \"error\" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.)\n* A remote cluster is an older version that does not support the feature you want to use in your search.\n\n## Test availability of remote clusters\n\nThe `remote/info` endpoint is commonly used to test whether the \"local\" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not.\nThe remote cluster may be available, while the local cluster is not currently connected to it.\n\nYou can use the `_resolve/cluster` API to attempt to reconnect to remote clusters.\nFor example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`.\nThe `connected` field in the response will indicate whether it was successful.\nIf a connection was (re-)established, this will also cause the `remote/info` endpoint to now indicate a connected status.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-resolve-cluster", "parameters": [ { @@ -17764,7 +17766,7 @@ "indices" ], "summary": "Resolve the cluster", - "description": "Resolve the specified index expressions to return information about each cluster, including the local \"querying\" cluster, if included.\nIf no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster.\n\nThis endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search.\n\nYou use the same index expression with this endpoint as you would for cross-cluster search.\nIndex and cluster exclusions are also supported with this endpoint.\n\nFor each cluster in the index expression, information is returned about:\n\n* Whether the querying (\"local\") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the `remote/info` endpoint.\n* Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`.\n* Whether there are any indices, aliases, or data streams on that cluster that match the index expression.\n* Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index).\n* Cluster version information, including the Elasticsearch server version.\n\nFor example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`.\nEach cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`.\n\n## Note on backwards compatibility\nThe ability to query without an index expression was added in version 8.18, so when\nquerying remote clusters older than that, the local cluster will send the index\nexpression `dummy*` to those remote clusters. Thus, if an errors occur, you may see a reference\nto that index expression even though you didn't request it. If it causes a problem, you can\ninstead include an index expression like `*:*` to bypass the issue.\n\n## Advantages of using this endpoint before a cross-cluster search\n\nYou may want to exclude a cluster or index from a search when:\n\n* A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail.\n* A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search.\n* The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the \"error\" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.)\n* A remote cluster is an older version that does not support the feature you want to use in your search.\n\n## Test availability of remote clusters\n\nThe `remote/info` endpoint is commonly used to test whether the \"local\" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not.\nThe remote cluster may be available, while the local cluster is not currently connected to it.\n\nYou can use the `_resolve/cluster` API to attempt to reconnect to remote clusters.\nFor example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`.\nThe `connected` field in the response will indicate whether it was successful.\nIf a connection was (re-)established, this will also cause the `remote/info` endpoint to now indicate a connected status.", + "description": "Resolve the specified index expressions to return information about each cluster, including the local \"querying\" cluster, if included.\nIf no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster.\n\nThis endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search.\n\nYou use the same index expression with this endpoint as you would for cross-cluster search.\nIndex and cluster exclusions are also supported with this endpoint.\n\nFor each cluster in the index expression, information is returned about:\n\n* Whether the querying (\"local\") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the `remote/info` endpoint.\n* Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`.\n* Whether there are any indices, aliases, or data streams on that cluster that match the index expression.\n* Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index).\n* Cluster version information, including the Elasticsearch server version.\n\nFor example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`.\nEach cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`.\n\n## Note on backwards compatibility\nThe ability to query without an index expression was added in version 8.18, so when\nquerying remote clusters older than that, the local cluster will send the index\nexpression `dummy*` to those remote clusters. Thus, if an errors occur, you may see a reference\nto that index expression even though you didn't request it. If it causes a problem, you can\ninstead include an index expression like `*:*` to bypass the issue.\n\n## Advantages of using this endpoint before a cross-cluster search\n\nYou may want to exclude a cluster or index from a search when:\n\n* A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail.\n* A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search.\n* The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the \"error\" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.)\n* A remote cluster is an older version that does not support the feature you want to use in your search.\n\n## Test availability of remote clusters\n\nThe `remote/info` endpoint is commonly used to test whether the \"local\" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not.\nThe remote cluster may be available, while the local cluster is not currently connected to it.\n\nYou can use the `_resolve/cluster` API to attempt to reconnect to remote clusters.\nFor example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`.\nThe `connected` field in the response will indicate whether it was successful.\nIf a connection was (re-)established, this will also cause the `remote/info` endpoint to now indicate a connected status.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-resolve-cluster-1", "parameters": [ { @@ -17806,7 +17808,7 @@ "indices" ], "summary": "Resolve indices", - "description": "Resolve the names and/or index patterns for indices, aliases, and data streams.\nMultiple patterns and remote clusters are supported.", + "description": "Resolve the names and/or index patterns for indices, aliases, and data streams.\nMultiple patterns and remote clusters are supported.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-resolve-index", "parameters": [ { @@ -17909,7 +17911,7 @@ "indices" ], "summary": "Roll over to a new index", - "description": "TIP: It is recommended to use the index lifecycle rollover action to automate rollovers.\n\nThe rollover API creates a new index for a data stream or index alias.\nThe API behavior depends on the rollover target.\n\n**Roll over a data stream**\n\nIf you roll over a data stream, the API creates a new write index for the stream.\nThe stream's previous write index becomes a regular backing index.\nA rollover also increments the data stream's generation.\n\n**Roll over an index alias with a write index**\n\nTIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data.\nData streams replace this functionality, require less maintenance, and automatically integrate with data tiers.\n\nIf an index alias points to multiple indices, one of the indices must be a write index.\nThe rollover API creates a new write index for the alias with `is_write_index` set to `true`.\nThe API also `sets is_write_index` to `false` for the previous write index.\n\n**Roll over an index alias with one index**\n\nIf you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias.\n\nNOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting.\n\n**Increment index names for an alias**\n\nWhen you roll over an index alias, you can specify a name for the new index.\nIf you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number.\nFor example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`.\nThis number is always six characters and zero-padded, regardless of the previous index's name.\n\nIf you use an index alias for time series data, you can use date math in the index name to track the rollover date.\nFor example, you can create an alias that points to an index named ``.\nIf you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`.\nIf you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`.", + "description": "TIP: It is recommended to use the index lifecycle rollover action to automate rollovers.\n\nThe rollover API creates a new index for a data stream or index alias.\nThe API behavior depends on the rollover target.\n\n**Roll over a data stream**\n\nIf you roll over a data stream, the API creates a new write index for the stream.\nThe stream's previous write index becomes a regular backing index.\nA rollover also increments the data stream's generation.\n\n**Roll over an index alias with a write index**\n\nTIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data.\nData streams replace this functionality, require less maintenance, and automatically integrate with data tiers.\n\nIf an index alias points to multiple indices, one of the indices must be a write index.\nThe rollover API creates a new write index for the alias with `is_write_index` set to `true`.\nThe API also `sets is_write_index` to `false` for the previous write index.\n\n**Roll over an index alias with one index**\n\nIf you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias.\n\nNOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting.\n\n**Increment index names for an alias**\n\nWhen you roll over an index alias, you can specify a name for the new index.\nIf you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number.\nFor example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`.\nThis number is always six characters and zero-padded, regardless of the previous index's name.\n\nIf you use an index alias for time series data, you can use date math in the index name to track the rollover date.\nFor example, you can create an alias that points to an index named ``.\nIf you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`.\nIf you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`.\n ##Required authorization\n* Index privileges: `manage`", "operationId": "indices-rollover", "parameters": [ { @@ -17954,7 +17956,7 @@ "indices" ], "summary": "Roll over to a new index", - "description": "TIP: It is recommended to use the index lifecycle rollover action to automate rollovers.\n\nThe rollover API creates a new index for a data stream or index alias.\nThe API behavior depends on the rollover target.\n\n**Roll over a data stream**\n\nIf you roll over a data stream, the API creates a new write index for the stream.\nThe stream's previous write index becomes a regular backing index.\nA rollover also increments the data stream's generation.\n\n**Roll over an index alias with a write index**\n\nTIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data.\nData streams replace this functionality, require less maintenance, and automatically integrate with data tiers.\n\nIf an index alias points to multiple indices, one of the indices must be a write index.\nThe rollover API creates a new write index for the alias with `is_write_index` set to `true`.\nThe API also `sets is_write_index` to `false` for the previous write index.\n\n**Roll over an index alias with one index**\n\nIf you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias.\n\nNOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting.\n\n**Increment index names for an alias**\n\nWhen you roll over an index alias, you can specify a name for the new index.\nIf you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number.\nFor example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`.\nThis number is always six characters and zero-padded, regardless of the previous index's name.\n\nIf you use an index alias for time series data, you can use date math in the index name to track the rollover date.\nFor example, you can create an alias that points to an index named ``.\nIf you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`.\nIf you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`.", + "description": "TIP: It is recommended to use the index lifecycle rollover action to automate rollovers.\n\nThe rollover API creates a new index for a data stream or index alias.\nThe API behavior depends on the rollover target.\n\n**Roll over a data stream**\n\nIf you roll over a data stream, the API creates a new write index for the stream.\nThe stream's previous write index becomes a regular backing index.\nA rollover also increments the data stream's generation.\n\n**Roll over an index alias with a write index**\n\nTIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data.\nData streams replace this functionality, require less maintenance, and automatically integrate with data tiers.\n\nIf an index alias points to multiple indices, one of the indices must be a write index.\nThe rollover API creates a new write index for the alias with `is_write_index` set to `true`.\nThe API also `sets is_write_index` to `false` for the previous write index.\n\n**Roll over an index alias with one index**\n\nIf you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias.\n\nNOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting.\n\n**Increment index names for an alias**\n\nWhen you roll over an index alias, you can specify a name for the new index.\nIf you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number.\nFor example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`.\nThis number is always six characters and zero-padded, regardless of the previous index's name.\n\nIf you use an index alias for time series data, you can use date math in the index name to track the rollover date.\nFor example, you can create an alias that points to an index named ``.\nIf you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`.\nIf you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`.\n ##Required authorization\n* Index privileges: `manage`", "operationId": "indices-rollover-1", "parameters": [ { @@ -18002,7 +18004,7 @@ "indices" ], "summary": "Get index segments", - "description": "Get low-level information about the Lucene segments in index shards.\nFor data streams, the API returns information about the stream's backing indices.", + "description": "Get low-level information about the Lucene segments in index shards.\nFor data streams, the API returns information about the stream's backing indices.\n ##Required authorization\n* Index privileges: `monitor`", "operationId": "indices-segments", "parameters": [ { @@ -18029,7 +18031,7 @@ "indices" ], "summary": "Get index segments", - "description": "Get low-level information about the Lucene segments in index shards.\nFor data streams, the API returns information about the stream's backing indices.", + "description": "Get low-level information about the Lucene segments in index shards.\nFor data streams, the API returns information about the stream's backing indices.\n ##Required authorization\n* Index privileges: `monitor`", "operationId": "indices-segments-1", "parameters": [ { @@ -18059,7 +18061,7 @@ "indices" ], "summary": "Get index shard stores", - "description": "Get store information about replica shards in one or more indices.\nFor data streams, the API retrieves store information for the stream's backing indices.\n\nThe index shard stores API returns the following information:\n\n* The node on which each replica shard exists.\n* The allocation ID for each replica shard.\n* A unique ID for each replica shard.\n* Any errors encountered while opening the shard index or from an earlier failure.\n\nBy default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards.", + "description": "Get store information about replica shards in one or more indices.\nFor data streams, the API retrieves store information for the stream's backing indices.\n\nThe index shard stores API returns the following information:\n\n* The node on which each replica shard exists.\n* The allocation ID for each replica shard.\n* A unique ID for each replica shard.\n* Any errors encountered while opening the shard index or from an earlier failure.\n\nBy default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards.\n ##Required authorization\n* Index privileges: `monitor`", "operationId": "indices-shard-stores", "parameters": [ { @@ -18095,7 +18097,7 @@ "indices" ], "summary": "Get index shard stores", - "description": "Get store information about replica shards in one or more indices.\nFor data streams, the API retrieves store information for the stream's backing indices.\n\nThe index shard stores API returns the following information:\n\n* The node on which each replica shard exists.\n* The allocation ID for each replica shard.\n* A unique ID for each replica shard.\n* Any errors encountered while opening the shard index or from an earlier failure.\n\nBy default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards.", + "description": "Get store information about replica shards in one or more indices.\nFor data streams, the API retrieves store information for the stream's backing indices.\n\nThe index shard stores API returns the following information:\n\n* The node on which each replica shard exists.\n* The allocation ID for each replica shard.\n* A unique ID for each replica shard.\n* Any errors encountered while opening the shard index or from an earlier failure.\n\nBy default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards.\n ##Required authorization\n* Index privileges: `monitor`", "operationId": "indices-shard-stores-1", "parameters": [ { @@ -18134,7 +18136,7 @@ "indices" ], "summary": "Shrink an index", - "description": "Shrink an index into a new index with fewer primary shards.\n\nBefore you can shrink an index:\n\n* The index must be read-only.\n* A copy of every shard in the index must reside on the same node.\n* The index must have a green health status.\n\nTo make shard allocation easier, we recommend you also remove the index's replica shards.\nYou can later re-add replica shards as part of the shrink operation.\n\nThe requested number of primary shards in the target index must be a factor of the number of shards in the source index.\nFor example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1.\nIf the number of shards in the index is a prime number it can only be shrunk into a single primary shard\n Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node.\n\nThe current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk.\n\nA shrink operation:\n\n* Creates a new target index with the same definition as the source index, but with a smaller number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks.\n* Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting.\n\nIMPORTANT: Indices can only be shrunk if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have more primary shards than the target index.\n* The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index.\n* The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard.\n* The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index.", + "description": "Shrink an index into a new index with fewer primary shards.\n\nBefore you can shrink an index:\n\n* The index must be read-only.\n* A copy of every shard in the index must reside on the same node.\n* The index must have a green health status.\n\nTo make shard allocation easier, we recommend you also remove the index's replica shards.\nYou can later re-add replica shards as part of the shrink operation.\n\nThe requested number of primary shards in the target index must be a factor of the number of shards in the source index.\nFor example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1.\nIf the number of shards in the index is a prime number it can only be shrunk into a single primary shard\n Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node.\n\nThe current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk.\n\nA shrink operation:\n\n* Creates a new target index with the same definition as the source index, but with a smaller number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks.\n* Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting.\n\nIMPORTANT: Indices can only be shrunk if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have more primary shards than the target index.\n* The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index.\n* The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard.\n* The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index.\n ##Required authorization\n* Index privileges: `manage`", "operationId": "indices-shrink", "parameters": [ { @@ -18174,7 +18176,7 @@ "indices" ], "summary": "Shrink an index", - "description": "Shrink an index into a new index with fewer primary shards.\n\nBefore you can shrink an index:\n\n* The index must be read-only.\n* A copy of every shard in the index must reside on the same node.\n* The index must have a green health status.\n\nTo make shard allocation easier, we recommend you also remove the index's replica shards.\nYou can later re-add replica shards as part of the shrink operation.\n\nThe requested number of primary shards in the target index must be a factor of the number of shards in the source index.\nFor example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1.\nIf the number of shards in the index is a prime number it can only be shrunk into a single primary shard\n Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node.\n\nThe current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk.\n\nA shrink operation:\n\n* Creates a new target index with the same definition as the source index, but with a smaller number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks.\n* Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting.\n\nIMPORTANT: Indices can only be shrunk if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have more primary shards than the target index.\n* The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index.\n* The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard.\n* The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index.", + "description": "Shrink an index into a new index with fewer primary shards.\n\nBefore you can shrink an index:\n\n* The index must be read-only.\n* A copy of every shard in the index must reside on the same node.\n* The index must have a green health status.\n\nTo make shard allocation easier, we recommend you also remove the index's replica shards.\nYou can later re-add replica shards as part of the shrink operation.\n\nThe requested number of primary shards in the target index must be a factor of the number of shards in the source index.\nFor example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1.\nIf the number of shards in the index is a prime number it can only be shrunk into a single primary shard\n Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node.\n\nThe current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk.\n\nA shrink operation:\n\n* Creates a new target index with the same definition as the source index, but with a smaller number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks.\n* Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting.\n\nIMPORTANT: Indices can only be shrunk if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have more primary shards than the target index.\n* The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index.\n* The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard.\n* The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index.\n ##Required authorization\n* Index privileges: `manage`", "operationId": "indices-shrink-1", "parameters": [ { @@ -18216,7 +18218,7 @@ "indices" ], "summary": "Simulate an index", - "description": "Get the index configuration that would be applied to the specified index from an existing index template.", + "description": "Get the index configuration that would be applied to the specified index from an existing index template.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "indices-simulate-index-template", "parameters": [ { @@ -18318,7 +18320,7 @@ "indices" ], "summary": "Simulate an index template", - "description": "Get the index configuration that would be applied by a particular index template.", + "description": "Get the index configuration that would be applied by a particular index template.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "indices-simulate-template", "parameters": [ { @@ -18357,7 +18359,7 @@ "indices" ], "summary": "Simulate an index template", - "description": "Get the index configuration that would be applied by a particular index template.", + "description": "Get the index configuration that would be applied by a particular index template.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "indices-simulate-template-1", "parameters": [ { @@ -18399,7 +18401,7 @@ "indices" ], "summary": "Split an index", - "description": "Split an index into a new index with more primary shards.\n* Before you can split an index:\n\n* The index must be read-only.\n* The cluster health status must be green.\n\nYou can do make an index read-only with the following request using the add index block API:\n\n```\nPUT /my_source_index/_block/write\n```\n\nThe current write index on a data stream cannot be split.\nIn order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split.\n\nThe number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting.\nThe number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing.\nFor instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3.\n\nA split operation:\n\n* Creates a new target index with the same definition as the source index, but with a larger number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard.\n* Recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be split if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have fewer primary shards than the target index.\n* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index.\n* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index.", + "description": "Split an index into a new index with more primary shards.\n* Before you can split an index:\n\n* The index must be read-only.\n* The cluster health status must be green.\n\nYou can do make an index read-only with the following request using the add index block API:\n\n```\nPUT /my_source_index/_block/write\n```\n\nThe current write index on a data stream cannot be split.\nIn order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split.\n\nThe number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting.\nThe number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing.\nFor instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3.\n\nA split operation:\n\n* Creates a new target index with the same definition as the source index, but with a larger number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard.\n* Recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be split if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have fewer primary shards than the target index.\n* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index.\n* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index.\n ##Required authorization\n* Index privileges: `manage`", "operationId": "indices-split", "parameters": [ { @@ -18439,7 +18441,7 @@ "indices" ], "summary": "Split an index", - "description": "Split an index into a new index with more primary shards.\n* Before you can split an index:\n\n* The index must be read-only.\n* The cluster health status must be green.\n\nYou can do make an index read-only with the following request using the add index block API:\n\n```\nPUT /my_source_index/_block/write\n```\n\nThe current write index on a data stream cannot be split.\nIn order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split.\n\nThe number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting.\nThe number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing.\nFor instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3.\n\nA split operation:\n\n* Creates a new target index with the same definition as the source index, but with a larger number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard.\n* Recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be split if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have fewer primary shards than the target index.\n* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index.\n* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index.", + "description": "Split an index into a new index with more primary shards.\n* Before you can split an index:\n\n* The index must be read-only.\n* The cluster health status must be green.\n\nYou can do make an index read-only with the following request using the add index block API:\n\n```\nPUT /my_source_index/_block/write\n```\n\nThe current write index on a data stream cannot be split.\nIn order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split.\n\nThe number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting.\nThe number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing.\nFor instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3.\n\nA split operation:\n\n* Creates a new target index with the same definition as the source index, but with a larger number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard.\n* Recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be split if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have fewer primary shards than the target index.\n* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index.\n* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index.\n ##Required authorization\n* Index privileges: `manage`", "operationId": "indices-split-1", "parameters": [ { @@ -18481,7 +18483,7 @@ "indices" ], "summary": "Get index statistics", - "description": "For data streams, the API retrieves statistics for the stream's backing indices.\n\nBy default, the returned statistics are index-level with `primaries` and `total` aggregations.\n`primaries` are the values for only the primary shards.\n`total` are the accumulated values for both primary and replica shards.\n\nTo get shard-level statistics, set the `level` parameter to `shards`.\n\nNOTE: When moving to another node, the shard-level statistics for a shard are cleared.\nAlthough the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed.", + "description": "For data streams, the API retrieves statistics for the stream's backing indices.\n\nBy default, the returned statistics are index-level with `primaries` and `total` aggregations.\n`primaries` are the values for only the primary shards.\n`total` are the accumulated values for both primary and replica shards.\n\nTo get shard-level statistics, set the `level` parameter to `shards`.\n\nNOTE: When moving to another node, the shard-level statistics for a shard are cleared.\nAlthough the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed.\n ##Required authorization\n* Index privileges: `monitor`", "operationId": "indices-stats", "parameters": [ { @@ -18526,7 +18528,7 @@ "indices" ], "summary": "Get index statistics", - "description": "For data streams, the API retrieves statistics for the stream's backing indices.\n\nBy default, the returned statistics are index-level with `primaries` and `total` aggregations.\n`primaries` are the values for only the primary shards.\n`total` are the accumulated values for both primary and replica shards.\n\nTo get shard-level statistics, set the `level` parameter to `shards`.\n\nNOTE: When moving to another node, the shard-level statistics for a shard are cleared.\nAlthough the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed.", + "description": "For data streams, the API retrieves statistics for the stream's backing indices.\n\nBy default, the returned statistics are index-level with `primaries` and `total` aggregations.\n`primaries` are the values for only the primary shards.\n`total` are the accumulated values for both primary and replica shards.\n\nTo get shard-level statistics, set the `level` parameter to `shards`.\n\nNOTE: When moving to another node, the shard-level statistics for a shard are cleared.\nAlthough the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed.\n ##Required authorization\n* Index privileges: `monitor`", "operationId": "indices-stats-1", "parameters": [ { @@ -18574,7 +18576,7 @@ "indices" ], "summary": "Get index statistics", - "description": "For data streams, the API retrieves statistics for the stream's backing indices.\n\nBy default, the returned statistics are index-level with `primaries` and `total` aggregations.\n`primaries` are the values for only the primary shards.\n`total` are the accumulated values for both primary and replica shards.\n\nTo get shard-level statistics, set the `level` parameter to `shards`.\n\nNOTE: When moving to another node, the shard-level statistics for a shard are cleared.\nAlthough the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed.", + "description": "For data streams, the API retrieves statistics for the stream's backing indices.\n\nBy default, the returned statistics are index-level with `primaries` and `total` aggregations.\n`primaries` are the values for only the primary shards.\n`total` are the accumulated values for both primary and replica shards.\n\nTo get shard-level statistics, set the `level` parameter to `shards`.\n\nNOTE: When moving to another node, the shard-level statistics for a shard are cleared.\nAlthough the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed.\n ##Required authorization\n* Index privileges: `monitor`", "operationId": "indices-stats-2", "parameters": [ { @@ -18622,7 +18624,7 @@ "indices" ], "summary": "Get index statistics", - "description": "For data streams, the API retrieves statistics for the stream's backing indices.\n\nBy default, the returned statistics are index-level with `primaries` and `total` aggregations.\n`primaries` are the values for only the primary shards.\n`total` are the accumulated values for both primary and replica shards.\n\nTo get shard-level statistics, set the `level` parameter to `shards`.\n\nNOTE: When moving to another node, the shard-level statistics for a shard are cleared.\nAlthough the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed.", + "description": "For data streams, the API retrieves statistics for the stream's backing indices.\n\nBy default, the returned statistics are index-level with `primaries` and `total` aggregations.\n`primaries` are the values for only the primary shards.\n`total` are the accumulated values for both primary and replica shards.\n\nTo get shard-level statistics, set the `level` parameter to `shards`.\n\nNOTE: When moving to another node, the shard-level statistics for a shard are cleared.\nAlthough the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed.\n ##Required authorization\n* Index privileges: `monitor`", "operationId": "indices-stats-3", "parameters": [ { @@ -19166,7 +19168,7 @@ "inference" ], "summary": "Create an inference endpoint", - "description": "IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.", + "description": "IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put", "parameters": [ { @@ -19188,7 +19190,7 @@ "inference" ], "summary": "Perform inference on the service", - "description": "This API enables you to use machine learning models to perform specific tasks on data that you provide as an input.\nIt returns a response with the results of the tasks.\nThe inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.\n\nFor details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation.\n\n> info\n> The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.", + "description": "This API enables you to use machine learning models to perform specific tasks on data that you provide as an input.\nIt returns a response with the results of the tasks.\nThe inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.\n\nFor details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation.\n\n> info\n> The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n ##Required authorization\n* Cluster privileges: `monitor_inference`", "operationId": "inference-inference", "parameters": [ { @@ -19260,7 +19262,7 @@ "inference" ], "summary": "Create an inference endpoint", - "description": "IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.", + "description": "IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-1", "parameters": [ { @@ -19285,7 +19287,7 @@ "inference" ], "summary": "Perform inference on the service", - "description": "This API enables you to use machine learning models to perform specific tasks on data that you provide as an input.\nIt returns a response with the results of the tasks.\nThe inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.\n\nFor details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation.\n\n> info\n> The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.", + "description": "This API enables you to use machine learning models to perform specific tasks on data that you provide as an input.\nIt returns a response with the results of the tasks.\nThe inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.\n\nFor details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation.\n\n> info\n> The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n ##Required authorization\n* Cluster privileges: `monitor_inference`", "operationId": "inference-inference-1", "parameters": [ { @@ -19357,7 +19359,7 @@ "inference" ], "summary": "Create an AlibabaCloud AI Search inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service.", + "description": "Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-alibabacloud", "parameters": [ { @@ -19459,7 +19461,7 @@ "inference" ], "summary": "Create an Amazon Bedrock inference endpoint", - "description": "Creates an inference endpoint to perform an inference task with the `amazonbedrock` service.\n\n>info\n> You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.", + "description": "Creates an inference endpoint to perform an inference task with the `amazonbedrock` service.\n\n>info\n> You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-amazonbedrock", "parameters": [ { @@ -19551,7 +19553,7 @@ "inference" ], "summary": "Create an Anthropic inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `anthropic` service.", + "description": "Create an inference endpoint to perform an inference task with the `anthropic` service.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-anthropic", "parameters": [ { @@ -19637,7 +19639,7 @@ "inference" ], "summary": "Create an Azure AI studio inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `azureaistudio` service.", + "description": "Create an inference endpoint to perform an inference task with the `azureaistudio` service.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-azureaistudio", "parameters": [ { @@ -19729,7 +19731,7 @@ "inference" ], "summary": "Create an Azure OpenAI inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `azureopenai` service.\n\nThe list of chat completion models that you can choose from in your Azure OpenAI deployment include:\n\n* [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models)\n* [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35)\n\nThe list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings).", + "description": "Create an inference endpoint to perform an inference task with the `azureopenai` service.\n\nThe list of chat completion models that you can choose from in your Azure OpenAI deployment include:\n\n* [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models)\n* [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35)\n\nThe list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings).\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-azureopenai", "parameters": [ { @@ -19821,7 +19823,7 @@ "inference" ], "summary": "Create a Cohere inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `cohere` service.", + "description": "Create an inference endpoint to perform an inference task with the `cohere` service.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-cohere", "parameters": [ { @@ -19913,7 +19915,7 @@ "inference" ], "summary": "Create an Elasticsearch inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `elasticsearch` service.\n\n> info\n> Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings.\n\nIf you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet.\n\n> info\n> You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.\n\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "description": "Create an inference endpoint to perform an inference task with the `elasticsearch` service.\n\n> info\n> Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings.\n\nIf you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet.\n\n> info\n> You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.\n\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-elasticsearch", "parameters": [ { @@ -20031,7 +20033,7 @@ "inference" ], "summary": "Create an ELSER inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `elser` service.\nYou can also deploy ELSER by using the Elasticsearch inference integration.\n\n> info\n> Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings.\n\nThe API request will automatically download and deploy the ELSER model if it isn't already downloaded.\n\n> info\n> You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.\n\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "description": "Create an inference endpoint to perform an inference task with the `elser` service.\nYou can also deploy ELSER by using the Elasticsearch inference integration.\n\n> info\n> Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings.\n\nThe API request will automatically download and deploy the ELSER model if it isn't already downloaded.\n\n> info\n> You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.\n\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-elser", "parameters": [ { @@ -20127,7 +20129,7 @@ "inference" ], "summary": "Create an Google AI Studio inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `googleaistudio` service.", + "description": "Create an inference endpoint to perform an inference task with the `googleaistudio` service.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-googleaistudio", "parameters": [ { @@ -20211,7 +20213,7 @@ "inference" ], "summary": "Create a Google Vertex AI inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `googlevertexai` service.", + "description": "Create an inference endpoint to perform an inference task with the `googlevertexai` service.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-googlevertexai", "parameters": [ { @@ -20303,7 +20305,7 @@ "inference" ], "summary": "Create a Hugging Face inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `hugging_face` service.\n\nYou must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL.\nSelect the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section.\nCreate the endpoint and copy the URL after the endpoint initialization has been finished.\n\nThe following models are recommended for the Hugging Face service:\n\n* `all-MiniLM-L6-v2`\n* `all-MiniLM-L12-v2`\n* `all-mpnet-base-v2`\n* `e5-base-v2`\n* `e5-small-v2`\n* `multilingual-e5-base`\n* `multilingual-e5-small`", + "description": "Create an inference endpoint to perform an inference task with the `hugging_face` service.\n\nYou must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL.\nSelect the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section.\nCreate the endpoint and copy the URL after the endpoint initialization has been finished.\n\nThe following models are recommended for the Hugging Face service:\n\n* `all-MiniLM-L6-v2`\n* `all-MiniLM-L12-v2`\n* `all-mpnet-base-v2`\n* `e5-base-v2`\n* `e5-small-v2`\n* `multilingual-e5-base`\n* `multilingual-e5-small`\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-hugging-face", "parameters": [ { @@ -20387,7 +20389,7 @@ "inference" ], "summary": "Create an JinaAI inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `jinaai` service.\n\nTo review the available `rerank` models, refer to .\nTo review the available `text_embedding` models, refer to the .", + "description": "Create an inference endpoint to perform an inference task with the `jinaai` service.\n\nTo review the available `rerank` models, refer to .\nTo review the available `text_embedding` models, refer to the .\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-jinaai", "parameters": [ { @@ -20479,7 +20481,7 @@ "inference" ], "summary": "Create a Mistral inference endpoint", - "description": "Creates an inference endpoint to perform an inference task with the `mistral` service.", + "description": "Creates an inference endpoint to perform an inference task with the `mistral` service.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-mistral", "parameters": [ { @@ -20562,7 +20564,7 @@ "inference" ], "summary": "Create an OpenAI inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs.", + "description": "Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-openai", "parameters": [ { @@ -20654,7 +20656,7 @@ "inference" ], "summary": "Create a VoyageAI inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `voyageai` service.\n\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "description": "Create an inference endpoint to perform an inference task with the `voyageai` service.\n\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-voyageai", "parameters": [ { @@ -20746,7 +20748,7 @@ "inference" ], "summary": "Create a Watsonx inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `watsonxai` service.\nYou need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service.\nYou can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.", + "description": "Create an inference endpoint to perform an inference task with the `watsonxai` service.\nYou need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service.\nYou can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-watsonx", "parameters": [ { @@ -20826,6 +20828,7 @@ "inference" ], "summary": "Perform rereanking inference on the service", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_inference`", "operationId": "inference-rerank", "parameters": [ { @@ -21023,7 +21026,7 @@ "inference" ], "summary": "Perform streaming inference", - "description": "Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation.\nThis API works only with the completion task type.\n\nIMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n\nThis API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming.", + "description": "Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation.\nThis API works only with the completion task type.\n\nIMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n\nThis API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming.\n ##Required authorization\n* Cluster privileges: `monitor_inference`", "operationId": "inference-stream-completion", "parameters": [ { @@ -21199,7 +21202,7 @@ "inference" ], "summary": "Update an inference endpoint", - "description": "Modify `task_settings`, secrets (within `service_settings`), or `num_allocations` for an inference endpoint, depending on the specific endpoint service and `task_type`.\n\nIMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.", + "description": "Modify `task_settings`, secrets (within `service_settings`), or `num_allocations` for an inference endpoint, depending on the specific endpoint service and `task_type`.\n\nIMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-update", "parameters": [ { @@ -21223,7 +21226,7 @@ "inference" ], "summary": "Update an inference endpoint", - "description": "Modify `task_settings`, secrets (within `service_settings`), or `num_allocations` for an inference endpoint, depending on the specific endpoint service and `task_type`.\n\nIMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.", + "description": "Modify `task_settings`, secrets (within `service_settings`), or `num_allocations` for an inference endpoint, depending on the specific endpoint service and `task_type`.\n\nIMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-update-1", "parameters": [ { @@ -21250,7 +21253,7 @@ "info" ], "summary": "Get cluster info", - "description": "Get basic build, version, and cluster information.", + "description": "Get basic build, version, and cluster information.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "info", "responses": { "200": { @@ -21477,6 +21480,7 @@ "ingest" ], "summary": "Get IP geolocation database configurations", + "description": "\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "ingest-get-ip-location-database-1", "parameters": [ { @@ -21498,6 +21502,7 @@ "ingest" ], "summary": "Create or update an IP geolocation database configuration", + "description": "\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "ingest-put-ip-location-database", "parameters": [ { @@ -21561,6 +21566,7 @@ "ingest" ], "summary": "Delete IP geolocation database configurations", + "description": "\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "ingest-delete-ip-location-database", "parameters": [ { @@ -21885,6 +21891,7 @@ "ingest" ], "summary": "Get IP geolocation database configurations", + "description": "\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "ingest-get-ip-location-database", "parameters": [ { @@ -21969,7 +21976,7 @@ "ingest" ], "summary": "Simulate a pipeline", - "description": "Run an ingest pipeline against a set of provided documents.\nYou can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.", + "description": "Run an ingest pipeline against a set of provided documents.\nYou can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.\n ##Required authorization\n* Cluster privileges: `read_pipeline`", "operationId": "ingest-simulate", "parameters": [ { @@ -21997,7 +22004,7 @@ "ingest" ], "summary": "Simulate a pipeline", - "description": "Run an ingest pipeline against a set of provided documents.\nYou can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.", + "description": "Run an ingest pipeline against a set of provided documents.\nYou can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.\n ##Required authorization\n* Cluster privileges: `read_pipeline`", "operationId": "ingest-simulate-1", "parameters": [ { @@ -22027,7 +22034,7 @@ "ingest" ], "summary": "Simulate a pipeline", - "description": "Run an ingest pipeline against a set of provided documents.\nYou can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.", + "description": "Run an ingest pipeline against a set of provided documents.\nYou can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.\n ##Required authorization\n* Cluster privileges: `read_pipeline`", "operationId": "ingest-simulate-2", "parameters": [ { @@ -22058,7 +22065,7 @@ "ingest" ], "summary": "Simulate a pipeline", - "description": "Run an ingest pipeline against a set of provided documents.\nYou can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.", + "description": "Run an ingest pipeline against a set of provided documents.\nYou can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.\n ##Required authorization\n* Cluster privileges: `read_pipeline`", "operationId": "ingest-simulate-3", "parameters": [ { @@ -22154,7 +22161,7 @@ "license" ], "summary": "Update the license", - "description": "You can update your license at runtime without shutting down your nodes.\nLicense updates take effect immediately.\nIf the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response.\nYou must then re-submit the API request with the acknowledge parameter set to true.\n\nNOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license.\nIf the operator privileges feature is enabled, only operator users can use this API.", + "description": "You can update your license at runtime without shutting down your nodes.\nLicense updates take effect immediately.\nIf the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response.\nYou must then re-submit the API request with the acknowledge parameter set to true.\n\nNOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license.\nIf the operator privileges feature is enabled, only operator users can use this API.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "license-post", "parameters": [ { @@ -22188,7 +22195,7 @@ "license" ], "summary": "Update the license", - "description": "You can update your license at runtime without shutting down your nodes.\nLicense updates take effect immediately.\nIf the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response.\nYou must then re-submit the API request with the acknowledge parameter set to true.\n\nNOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license.\nIf the operator privileges feature is enabled, only operator users can use this API.", + "description": "You can update your license at runtime without shutting down your nodes.\nLicense updates take effect immediately.\nIf the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response.\nYou must then re-submit the API request with the acknowledge parameter set to true.\n\nNOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license.\nIf the operator privileges feature is enabled, only operator users can use this API.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "license-post-1", "parameters": [ { @@ -22222,7 +22229,7 @@ "license" ], "summary": "Delete the license", - "description": "When the license expires, your subscription level reverts to Basic.\n\nIf the operator privileges feature is enabled, only operator users can use this API.", + "description": "When the license expires, your subscription level reverts to Basic.\n\nIf the operator privileges feature is enabled, only operator users can use this API.\n ##Required authorization\n* Cluster privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/license/manage-your-license-in-self-managed-cluster" }, @@ -22270,6 +22277,7 @@ "license" ], "summary": "Get the basic license status", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "license-get-basic-status", "responses": { "200": { @@ -22312,6 +22320,7 @@ "license" ], "summary": "Get the trial status", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "license-get-trial-status", "responses": { "200": { @@ -22354,7 +22363,7 @@ "license" ], "summary": "Start a basic license", - "description": "Start an indefinite basic license, which gives access to all the basic features.\n\nNOTE: In order to start a basic license, you must not currently have a basic license.\n\nIf the basic license does not support all of the features that are available with your current license, however, you are notified in the response.\nYou must then re-submit the API request with the `acknowledge` parameter set to `true`.\n\nTo check the status of your basic license, use the get basic license API.", + "description": "Start an indefinite basic license, which gives access to all the basic features.\n\nNOTE: In order to start a basic license, you must not currently have a basic license.\n\nIf the basic license does not support all of the features that are available with your current license, however, you are notified in the response.\nYou must then re-submit the API request with the `acknowledge` parameter set to `true`.\n\nTo check the status of your basic license, use the get basic license API.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "license-post-start-basic", "parameters": [ { @@ -22455,7 +22464,7 @@ "license" ], "summary": "Start a trial", - "description": "Start a 30-day trial, which gives access to all subscription features.\n\nNOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version.\nFor example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension.\n\nTo check the status of your trial, use the get trial status API.", + "description": "Start a 30-day trial, which gives access to all subscription features.\n\nNOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version.\nFor example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension.\n\nTo check the status of your trial, use the get trial status API.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "license-post-start-trial", "parameters": [ { @@ -22539,7 +22548,7 @@ "logstash" ], "summary": "Get Logstash pipelines", - "description": "Get pipelines that are used for Logstash Central Management.", + "description": "Get pipelines that are used for Logstash Central Management.\n ##Required authorization\n* Cluster privileges: `manage_logstash_pipelines`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/logstash/logstash-centralized-pipeline-management" }, @@ -22567,7 +22576,7 @@ "logstash" ], "summary": "Create or update a Logstash pipeline", - "description": "Create a pipeline that is used for Logstash Central Management.\nIf the specified pipeline exists, it is replaced.", + "description": "Create a pipeline that is used for Logstash Central Management.\nIf the specified pipeline exists, it is replaced.\n ##Required authorization\n* Cluster privileges: `manage_logstash_pipelines`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/logstash/logstash-centralized-pipeline-management" }, @@ -22623,7 +22632,7 @@ "logstash" ], "summary": "Delete a Logstash pipeline", - "description": "Delete a pipeline that is used for Logstash Central Management.\nIf the request succeeds, you receive an empty response with an appropriate status code.", + "description": "Delete a pipeline that is used for Logstash Central Management.\nIf the request succeeds, you receive an empty response with an appropriate status code.\n ##Required authorization\n* Cluster privileges: `manage_logstash_pipelines`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/logstash/logstash-centralized-pipeline-management" }, @@ -22658,7 +22667,7 @@ "logstash" ], "summary": "Get Logstash pipelines", - "description": "Get pipelines that are used for Logstash Central Management.", + "description": "Get pipelines that are used for Logstash Central Management.\n ##Required authorization\n* Cluster privileges: `manage_logstash_pipelines`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/logstash/logstash-centralized-pipeline-management" }, @@ -22683,7 +22692,7 @@ "document" ], "summary": "Get multiple documents", - "description": "Get multiple JSON documents by ID from one or more indices.\nIf you specify an index in the request URI, you only need to specify the document IDs in the request body.\nTo ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.\n\n**Filter source fields**\n\nBy default, the `_source` field is returned for every document (if stored).\nUse the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document.\nYou can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions.\n\n**Get stored fields**\n\nUse the `stored_fields` attribute to specify the set of stored fields you want to retrieve.\nAny requested fields that are not stored are ignored.\nYou can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions.", + "description": "Get multiple JSON documents by ID from one or more indices.\nIf you specify an index in the request URI, you only need to specify the document IDs in the request body.\nTo ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.\n\n**Filter source fields**\n\nBy default, the `_source` field is returned for every document (if stored).\nUse the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document.\nYou can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions.\n\n**Get stored fields**\n\nUse the `stored_fields` attribute to specify the set of stored fields you want to retrieve.\nAny requested fields that are not stored are ignored.\nYou can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions.\n ##Required authorization\n* Index privileges: `read`", "operationId": "mget", "parameters": [ { @@ -22732,7 +22741,7 @@ "document" ], "summary": "Get multiple documents", - "description": "Get multiple JSON documents by ID from one or more indices.\nIf you specify an index in the request URI, you only need to specify the document IDs in the request body.\nTo ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.\n\n**Filter source fields**\n\nBy default, the `_source` field is returned for every document (if stored).\nUse the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document.\nYou can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions.\n\n**Get stored fields**\n\nUse the `stored_fields` attribute to specify the set of stored fields you want to retrieve.\nAny requested fields that are not stored are ignored.\nYou can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions.", + "description": "Get multiple JSON documents by ID from one or more indices.\nIf you specify an index in the request URI, you only need to specify the document IDs in the request body.\nTo ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.\n\n**Filter source fields**\n\nBy default, the `_source` field is returned for every document (if stored).\nUse the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document.\nYou can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions.\n\n**Get stored fields**\n\nUse the `stored_fields` attribute to specify the set of stored fields you want to retrieve.\nAny requested fields that are not stored are ignored.\nYou can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions.\n ##Required authorization\n* Index privileges: `read`", "operationId": "mget-1", "parameters": [ { @@ -22783,7 +22792,7 @@ "document" ], "summary": "Get multiple documents", - "description": "Get multiple JSON documents by ID from one or more indices.\nIf you specify an index in the request URI, you only need to specify the document IDs in the request body.\nTo ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.\n\n**Filter source fields**\n\nBy default, the `_source` field is returned for every document (if stored).\nUse the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document.\nYou can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions.\n\n**Get stored fields**\n\nUse the `stored_fields` attribute to specify the set of stored fields you want to retrieve.\nAny requested fields that are not stored are ignored.\nYou can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions.", + "description": "Get multiple JSON documents by ID from one or more indices.\nIf you specify an index in the request URI, you only need to specify the document IDs in the request body.\nTo ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.\n\n**Filter source fields**\n\nBy default, the `_source` field is returned for every document (if stored).\nUse the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document.\nYou can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions.\n\n**Get stored fields**\n\nUse the `stored_fields` attribute to specify the set of stored fields you want to retrieve.\nAny requested fields that are not stored are ignored.\nYou can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions.\n ##Required authorization\n* Index privileges: `read`", "operationId": "mget-2", "parameters": [ { @@ -22835,7 +22844,7 @@ "document" ], "summary": "Get multiple documents", - "description": "Get multiple JSON documents by ID from one or more indices.\nIf you specify an index in the request URI, you only need to specify the document IDs in the request body.\nTo ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.\n\n**Filter source fields**\n\nBy default, the `_source` field is returned for every document (if stored).\nUse the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document.\nYou can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions.\n\n**Get stored fields**\n\nUse the `stored_fields` attribute to specify the set of stored fields you want to retrieve.\nAny requested fields that are not stored are ignored.\nYou can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions.", + "description": "Get multiple JSON documents by ID from one or more indices.\nIf you specify an index in the request URI, you only need to specify the document IDs in the request body.\nTo ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.\n\n**Filter source fields**\n\nBy default, the `_source` field is returned for every document (if stored).\nUse the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document.\nYou can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions.\n\n**Get stored fields**\n\nUse the `stored_fields` attribute to specify the set of stored fields you want to retrieve.\nAny requested fields that are not stored are ignored.\nYou can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions.\n ##Required authorization\n* Index privileges: `read`", "operationId": "mget-3", "parameters": [ { @@ -22889,7 +22898,7 @@ "migration" ], "summary": "Get deprecation information", - "description": "Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version.\n\nTIP: This APIs is designed for indirect use by the Upgrade Assistant.\nYou are strongly recommended to use the Upgrade Assistant.", + "description": "Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version.\n\nTIP: This APIs is designed for indirect use by the Upgrade Assistant.\nYou are strongly recommended to use the Upgrade Assistant.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "migration-deprecations", "responses": { "200": { @@ -22911,7 +22920,7 @@ "migration" ], "summary": "Get deprecation information", - "description": "Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version.\n\nTIP: This APIs is designed for indirect use by the Upgrade Assistant.\nYou are strongly recommended to use the Upgrade Assistant.", + "description": "Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version.\n\nTIP: This APIs is designed for indirect use by the Upgrade Assistant.\nYou are strongly recommended to use the Upgrade Assistant.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "migration-deprecations-1", "parameters": [ { @@ -22938,7 +22947,7 @@ "migration" ], "summary": "Get feature migration information", - "description": "Version upgrades sometimes require changes to how features store configuration information and data in system indices.\nCheck which features need to be migrated and the status of any migrations that are in progress.\n\nTIP: This API is designed for indirect use by the Upgrade Assistant.\nYou are strongly recommended to use the Upgrade Assistant.", + "description": "Version upgrades sometimes require changes to how features store configuration information and data in system indices.\nCheck which features need to be migrated and the status of any migrations that are in progress.\n\nTIP: This API is designed for indirect use by the Upgrade Assistant.\nYou are strongly recommended to use the Upgrade Assistant.\n ##Required authorization\n* Index privileges: `manage`* Cluster privileges: `manage`", "operationId": "migration-get-feature-upgrade-status", "responses": { "200": { @@ -22986,7 +22995,7 @@ "migration" ], "summary": "Start the feature migration", - "description": "Version upgrades sometimes require changes to how features store configuration information and data in system indices.\nThis API starts the automatic migration process.\n\nSome functionality might be temporarily unavailable during the migration process.\n\nTIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant.", + "description": "Version upgrades sometimes require changes to how features store configuration information and data in system indices.\nThis API starts the automatic migration process.\n\nSome functionality might be temporarily unavailable during the migration process.\n\nTIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant.\n ##Required authorization\n* Index privileges: `manage`* Cluster privileges: `manage`", "operationId": "migration-post-feature-upgrade", "responses": { "200": { @@ -23038,7 +23047,7 @@ "ml trained model" ], "summary": "Clear trained model deployment cache", - "description": "Cache will be cleared on all nodes where the trained model is assigned.\nA trained model deployment may have an inference cache enabled.\nAs requests are handled by each allocated node, their responses may be cached on that individual node.\nCalling this API clears the caches without restarting the deployment.", + "description": "Cache will be cleared on all nodes where the trained model is assigned.\nA trained model deployment may have an inference cache enabled.\nAs requests are handled by each allocated node, their responses may be cached on that individual node.\nCalling this API clears the caches without restarting the deployment.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-clear-trained-model-deployment-cache", "parameters": [ { @@ -23088,7 +23097,7 @@ "ml anomaly" ], "summary": "Close anomaly detection jobs", - "description": "A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results.\nWhen you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data.\nIf you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request.\nWhen a datafeed that has a specified end date stops, it automatically closes its associated job.", + "description": "A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results.\nWhen you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data.\nIf you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request.\nWhen a datafeed that has a specified end date stops, it automatically closes its associated job.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-close-job", "parameters": [ { @@ -23190,6 +23199,7 @@ "ml anomaly" ], "summary": "Get calendar configuration info", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-calendars-2", "parameters": [ { @@ -23217,6 +23227,7 @@ "ml anomaly" ], "summary": "Create a calendar", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-put-calendar", "parameters": [ { @@ -23288,6 +23299,7 @@ "ml anomaly" ], "summary": "Get calendar configuration info", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-calendars-3", "parameters": [ { @@ -23315,7 +23327,7 @@ "ml anomaly" ], "summary": "Delete a calendar", - "description": "Remove all scheduled events from a calendar, then delete it.", + "description": "Remove all scheduled events from a calendar, then delete it.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-delete-calendar", "parameters": [ { @@ -23409,6 +23421,7 @@ "ml anomaly" ], "summary": "Add anomaly detection job to calendar", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-put-calendar-job", "parameters": [ { @@ -23469,6 +23482,7 @@ "ml anomaly" ], "summary": "Delete anomaly jobs from a calendar", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-delete-calendar-job", "parameters": [ { @@ -23537,7 +23551,7 @@ "ml data frame" ], "summary": "Get data frame analytics job configuration info", - "description": "You can get information for multiple data frame analytics jobs in a single\nAPI request by using a comma-separated list of data frame analytics jobs or a\nwildcard expression.", + "description": "You can get information for multiple data frame analytics jobs in a single\nAPI request by using a comma-separated list of data frame analytics jobs or a\nwildcard expression.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-data-frame-analytics", "parameters": [ { @@ -23568,7 +23582,7 @@ "ml data frame" ], "summary": "Create a data frame analytics job", - "description": "This API creates a data frame analytics job that performs an analysis on the\nsource indices and stores the outcome in a destination index.\nBy default, the query used in the source configuration is `{\"match_all\": {}}`.\n\nIf the destination index does not exist, it is created automatically when you start the job.\n\nIf you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters.", + "description": "This API creates a data frame analytics job that performs an analysis on the\nsource indices and stores the outcome in a destination index.\nBy default, the query used in the source configuration is `{\"match_all\": {}}`.\n\nIf the destination index does not exist, it is created automatically when you start the job.\n\nIf you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters.\n ##Required authorization\n* Index privileges: `create_index`,`index`,`manage`,`read`,`view_index_metadata`* Cluster privileges: `manage_ml`", "operationId": "ml-put-data-frame-analytics", "parameters": [ { @@ -23708,6 +23722,7 @@ "ml data frame" ], "summary": "Delete a data frame analytics job", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-delete-data-frame-analytics", "parameters": [ { @@ -23769,7 +23784,7 @@ "ml anomaly" ], "summary": "Get datafeeds configuration info", - "description": "You can get information for multiple datafeeds in a single API request by\nusing a comma-separated list of datafeeds or a wildcard expression. You can\nget information for all datafeeds by using `_all`, by specifying `*` as the\n``, or by omitting the ``.\nThis API returns a maximum of 10,000 datafeeds.", + "description": "You can get information for multiple datafeeds in a single API request by\nusing a comma-separated list of datafeeds or a wildcard expression. You can\nget information for all datafeeds by using `_all`, by specifying `*` as the\n``, or by omitting the ``.\nThis API returns a maximum of 10,000 datafeeds.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-datafeeds", "parameters": [ { @@ -23794,7 +23809,7 @@ "ml anomaly" ], "summary": "Create a datafeed", - "description": "Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job.\nYou can associate only one datafeed with each anomaly detection job.\nThe datafeed contains a query that runs at a defined interval (`frequency`).\nIf you are concerned about delayed data, you can add a delay (`query_delay') at each interval.\nBy default, the datafeed uses the following query: `{\"match_all\": {\"boost\": 1}}`.\n\nWhen Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had\nat the time of creation and runs the query using those same roles. If you provide secondary authorization headers,\nthose credentials are used instead.\nYou must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed\ndirectly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index.", + "description": "Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job.\nYou can associate only one datafeed with each anomaly detection job.\nThe datafeed contains a query that runs at a defined interval (`frequency`).\nIf you are concerned about delayed data, you can add a delay (`query_delay') at each interval.\nBy default, the datafeed uses the following query: `{\"match_all\": {\"boost\": 1}}`.\n\nWhen Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had\nat the time of creation and runs the query using those same roles. If you provide secondary authorization headers,\nthose credentials are used instead.\nYou must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed\ndirectly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index.\n ##Required authorization\n* Index privileges: `read`* Cluster privileges: `manage_ml`", "operationId": "ml-put-datafeed", "parameters": [ { @@ -23997,6 +24012,7 @@ "ml anomaly" ], "summary": "Delete a datafeed", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-delete-datafeed", "parameters": [ { @@ -24048,7 +24064,7 @@ "ml anomaly" ], "summary": "Delete expired ML data", - "description": "Delete all job results, model snapshots and forecast data that have exceeded\ntheir retention days period. Machine learning state documents that are not\nassociated with any job are also deleted.\nYou can limit the request to a single or set of anomaly detection jobs by\nusing a job identifier, a group name, a comma-separated list of jobs, or a\nwildcard expression. You can delete expired data for all anomaly detection\njobs by using `_all`, by specifying `*` as the ``, or by omitting the\n``.", + "description": "Delete all job results, model snapshots and forecast data that have exceeded\ntheir retention days period. Machine learning state documents that are not\nassociated with any job are also deleted.\nYou can limit the request to a single or set of anomaly detection jobs by\nusing a job identifier, a group name, a comma-separated list of jobs, or a\nwildcard expression. You can delete expired data for all anomaly detection\njobs by using `_all`, by specifying `*` as the ``, or by omitting the\n``.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-delete-expired-data", "parameters": [ { @@ -24078,7 +24094,7 @@ "ml anomaly" ], "summary": "Delete expired ML data", - "description": "Delete all job results, model snapshots and forecast data that have exceeded\ntheir retention days period. Machine learning state documents that are not\nassociated with any job are also deleted.\nYou can limit the request to a single or set of anomaly detection jobs by\nusing a job identifier, a group name, a comma-separated list of jobs, or a\nwildcard expression. You can delete expired data for all anomaly detection\njobs by using `_all`, by specifying `*` as the ``, or by omitting the\n``.", + "description": "Delete all job results, model snapshots and forecast data that have exceeded\ntheir retention days period. Machine learning state documents that are not\nassociated with any job are also deleted.\nYou can limit the request to a single or set of anomaly detection jobs by\nusing a job identifier, a group name, a comma-separated list of jobs, or a\nwildcard expression. You can delete expired data for all anomaly detection\njobs by using `_all`, by specifying `*` as the ``, or by omitting the\n``.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-delete-expired-data-1", "parameters": [ { @@ -24105,7 +24121,7 @@ "ml anomaly" ], "summary": "Get filters", - "description": "You can get a single filter or all filters.", + "description": "You can get a single filter or all filters.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-get-filters-1", "parameters": [ { @@ -24130,7 +24146,7 @@ "ml anomaly" ], "summary": "Create a filter", - "description": "A filter contains a list of strings. It can be used by one or more anomaly detection jobs.\nSpecifically, filters are referenced in the `custom_rules` property of detector configuration objects.", + "description": "A filter contains a list of strings. It can be used by one or more anomaly detection jobs.\nSpecifically, filters are referenced in the `custom_rules` property of detector configuration objects.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-put-filter", "parameters": [ { @@ -24206,7 +24222,7 @@ "ml anomaly" ], "summary": "Delete a filter", - "description": "If an anomaly detection job references the filter, you cannot delete the\nfilter. You must update or delete the job before you can delete the filter.", + "description": "If an anomaly detection job references the filter, you cannot delete the\nfilter. You must update or delete the job before you can delete the filter.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-delete-filter", "parameters": [ { @@ -24248,7 +24264,7 @@ "ml anomaly" ], "summary": "Predict future behavior of a time series", - "description": "Forecasts are not supported for jobs that perform population analysis; an\nerror occurs if you try to create a forecast for a job that has an\n`over_field_name` in its configuration. Forcasts predict future behavior\nbased on historical data.", + "description": "Forecasts are not supported for jobs that perform population analysis; an\nerror occurs if you try to create a forecast for a job that has an\n`over_field_name` in its configuration. Forcasts predict future behavior\nbased on historical data.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-forecast", "parameters": [ { @@ -24345,7 +24361,7 @@ "ml anomaly" ], "summary": "Delete forecasts from a job", - "description": "By default, forecasts are retained for 14 days. You can specify a\ndifferent retention period with the `expires_in` parameter in the forecast\njobs API. The delete forecast API enables you to delete one or more\nforecasts before they expire.", + "description": "By default, forecasts are retained for 14 days. You can specify a\ndifferent retention period with the `expires_in` parameter in the forecast\njobs API. The delete forecast API enables you to delete one or more\nforecasts before they expire.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-delete-forecast", "parameters": [ { @@ -24372,7 +24388,7 @@ "ml anomaly" ], "summary": "Delete forecasts from a job", - "description": "By default, forecasts are retained for 14 days. You can specify a\ndifferent retention period with the `expires_in` parameter in the forecast\njobs API. The delete forecast API enables you to delete one or more\nforecasts before they expire.", + "description": "By default, forecasts are retained for 14 days. You can specify a\ndifferent retention period with the `expires_in` parameter in the forecast\njobs API. The delete forecast API enables you to delete one or more\nforecasts before they expire.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-delete-forecast-1", "parameters": [ { @@ -24402,7 +24418,7 @@ "ml anomaly" ], "summary": "Get anomaly detection jobs configuration info", - "description": "You can get information for multiple anomaly detection jobs in a single API\nrequest by using a group name, a comma-separated list of jobs, or a wildcard\nexpression. You can get information for all anomaly detection jobs by using\n`_all`, by specifying `*` as the ``, or by omitting the ``.", + "description": "You can get information for multiple anomaly detection jobs in a single API\nrequest by using a group name, a comma-separated list of jobs, or a wildcard\nexpression. You can get information for all anomaly detection jobs by using\n`_all`, by specifying `*` as the ``, or by omitting the ``.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-jobs", "parameters": [ { @@ -24427,7 +24443,7 @@ "ml anomaly" ], "summary": "Create an anomaly detection job", - "description": "If you include a `datafeed_config`, you must have read index privileges on the source index.\nIf you include a `datafeed_config` but do not provide a query, the datafeed uses `{\"match_all\": {\"boost\": 1}}`.", + "description": "If you include a `datafeed_config`, you must have read index privileges on the source index.\nIf you include a `datafeed_config` but do not provide a query, the datafeed uses `{\"match_all\": {\"boost\": 1}}`.\n ##Required authorization\n* Index privileges: `read`* Cluster privileges: `manage_ml`", "operationId": "ml-put-job", "parameters": [ { @@ -24665,7 +24681,7 @@ "ml anomaly" ], "summary": "Delete an anomaly detection job", - "description": "All job configuration, model state and results are deleted.\nIt is not currently possible to delete multiple jobs using wildcards or a\ncomma separated list. If you delete a job that has a datafeed, the request\nfirst tries to delete the datafeed. This behavior is equivalent to calling\nthe delete datafeed API with the same timeout and force parameters as the\ndelete job request.", + "description": "All job configuration, model state and results are deleted.\nIt is not currently possible to delete multiple jobs using wildcards or a\ncomma separated list. If you delete a job that has a datafeed, the request\nfirst tries to delete the datafeed. This behavior is equivalent to calling\nthe delete datafeed API with the same timeout and force parameters as the\ndelete job request.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-delete-job", "parameters": [ { @@ -24743,6 +24759,7 @@ "ml anomaly" ], "summary": "Get model snapshots info", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-model-snapshots", "parameters": [ { @@ -24785,6 +24802,7 @@ "ml anomaly" ], "summary": "Get model snapshots info", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-model-snapshots-1", "parameters": [ { @@ -24827,7 +24845,7 @@ "ml anomaly" ], "summary": "Delete a model snapshot", - "description": "You cannot delete the active model snapshot. To delete that snapshot, first\nrevert to a different one. To identify the active model snapshot, refer to\nthe `model_snapshot_id` in the results from the get jobs API.", + "description": "You cannot delete the active model snapshot. To delete that snapshot, first\nrevert to a different one. To identify the active model snapshot, refer to\nthe `model_snapshot_id` in the results from the get jobs API.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-delete-model-snapshot", "parameters": [ { @@ -24880,6 +24898,7 @@ "ml trained model" ], "summary": "Get trained model configuration info", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-trained-models", "parameters": [ { @@ -24919,7 +24938,7 @@ "ml trained model" ], "summary": "Create a trained model", - "description": "Enable you to supply a trained model that is not created by data frame analytics.", + "description": "Enable you to supply a trained model that is not created by data frame analytics.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-put-trained-model", "parameters": [ { @@ -25027,7 +25046,7 @@ "ml trained model" ], "summary": "Delete an unreferenced trained model", - "description": "The request deletes a trained inference model that is not referenced by an ingest pipeline.", + "description": "The request deletes a trained inference model that is not referenced by an ingest pipeline.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-delete-trained-model", "parameters": [ { @@ -25089,7 +25108,7 @@ "ml trained model" ], "summary": "Create or update a trained model alias", - "description": "A trained model alias is a logical name used to reference a single trained\nmodel.\nYou can use aliases instead of trained model identifiers to make it easier to\nreference your models. For example, you can use aliases in inference\naggregations and processors.\nAn alias must be unique and refer to only a single trained model. However,\nyou can have multiple aliases for each trained model.\nIf you use this API to update an alias such that it references a different\ntrained model ID and the model uses a different type of data frame analytics,\nan error occurs. For example, this situation occurs if you have a trained\nmodel for regression analysis and a trained model for classification\nanalysis; you cannot reassign an alias from one type of trained model to\nanother.\nIf you use this API to update an alias and there are very few input fields in\ncommon between the old and new trained models for the model alias, the API\nreturns a warning.", + "description": "A trained model alias is a logical name used to reference a single trained\nmodel.\nYou can use aliases instead of trained model identifiers to make it easier to\nreference your models. For example, you can use aliases in inference\naggregations and processors.\nAn alias must be unique and refer to only a single trained model. However,\nyou can have multiple aliases for each trained model.\nIf you use this API to update an alias such that it references a different\ntrained model ID and the model uses a different type of data frame analytics,\nan error occurs. For example, this situation occurs if you have a trained\nmodel for regression analysis and a trained model for classification\nanalysis; you cannot reassign an alias from one type of trained model to\nanother.\nIf you use this API to update an alias and there are very few input fields in\ncommon between the old and new trained models for the model alias, the API\nreturns a warning.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-put-trained-model-alias", "parameters": [ { @@ -25144,7 +25163,7 @@ "ml trained model" ], "summary": "Delete a trained model alias", - "description": "This API deletes an existing model alias that refers to a trained model. If\nthe model alias is missing or refers to a model other than the one identified\nby the `model_id`, this API returns an error.", + "description": "This API deletes an existing model alias that refers to a trained model. If\nthe model alias is missing or refers to a model other than the one identified\nby the `model_id`, this API returns an error.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-delete-trained-model-alias", "parameters": [ { @@ -25197,7 +25216,7 @@ "ml anomaly" ], "summary": "Estimate job model memory usage", - "description": "Make an estimation of the memory usage for an anomaly detection job model.\nThe estimate is based on analysis configuration details for the job and cardinality\nestimates for the fields it references.", + "description": "Make an estimation of the memory usage for an anomaly detection job model.\nThe estimate is based on analysis configuration details for the job and cardinality\nestimates for the fields it references.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-estimate-model-memory", "requestBody": { "content": { @@ -25275,7 +25294,7 @@ "ml data frame" ], "summary": "Evaluate data frame analytics", - "description": "The API packages together commonly used evaluation metrics for various types\nof machine learning features. This has been designed for use on indexes\ncreated by data frame analytics. Evaluation requires both a ground truth\nfield and an analytics result field to be present.", + "description": "The API packages together commonly used evaluation metrics for various types\nof machine learning features. This has been designed for use on indexes\ncreated by data frame analytics. Evaluation requires both a ground truth\nfield and an analytics result field to be present.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-evaluate-data-frame", "requestBody": { "content": { @@ -25384,7 +25403,7 @@ "ml data frame" ], "summary": "Explain data frame analytics config", - "description": "This API provides explanations for a data frame analytics config that either\nexists already or one that has not been created yet. The following\nexplanations are provided:\n* which fields are included or not in the analysis and why,\n* how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on.\nIf you have object fields or fields that are excluded via source filtering, they are not included in the explanation.", + "description": "This API provides explanations for a data frame analytics config that either\nexists already or one that has not been created yet. The following\nexplanations are provided:\n* which fields are included or not in the analysis and why,\n* how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on.\nIf you have object fields or fields that are excluded via source filtering, they are not included in the explanation.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-explain-data-frame-analytics", "requestBody": { "$ref": "#/components/requestBodies/ml.explain_data_frame_analytics" @@ -25407,7 +25426,7 @@ "ml data frame" ], "summary": "Explain data frame analytics config", - "description": "This API provides explanations for a data frame analytics config that either\nexists already or one that has not been created yet. The following\nexplanations are provided:\n* which fields are included or not in the analysis and why,\n* how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on.\nIf you have object fields or fields that are excluded via source filtering, they are not included in the explanation.", + "description": "This API provides explanations for a data frame analytics config that either\nexists already or one that has not been created yet. The following\nexplanations are provided:\n* which fields are included or not in the analysis and why,\n* how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on.\nIf you have object fields or fields that are excluded via source filtering, they are not included in the explanation.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-explain-data-frame-analytics-1", "requestBody": { "$ref": "#/components/requestBodies/ml.explain_data_frame_analytics" @@ -25432,7 +25451,7 @@ "ml data frame" ], "summary": "Explain data frame analytics config", - "description": "This API provides explanations for a data frame analytics config that either\nexists already or one that has not been created yet. The following\nexplanations are provided:\n* which fields are included or not in the analysis and why,\n* how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on.\nIf you have object fields or fields that are excluded via source filtering, they are not included in the explanation.", + "description": "This API provides explanations for a data frame analytics config that either\nexists already or one that has not been created yet. The following\nexplanations are provided:\n* which fields are included or not in the analysis and why,\n* how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on.\nIf you have object fields or fields that are excluded via source filtering, they are not included in the explanation.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-explain-data-frame-analytics-2", "parameters": [ { @@ -25460,7 +25479,7 @@ "ml data frame" ], "summary": "Explain data frame analytics config", - "description": "This API provides explanations for a data frame analytics config that either\nexists already or one that has not been created yet. The following\nexplanations are provided:\n* which fields are included or not in the analysis and why,\n* how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on.\nIf you have object fields or fields that are excluded via source filtering, they are not included in the explanation.", + "description": "This API provides explanations for a data frame analytics config that either\nexists already or one that has not been created yet. The following\nexplanations are provided:\n* which fields are included or not in the analysis and why,\n* how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on.\nIf you have object fields or fields that are excluded via source filtering, they are not included in the explanation.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-explain-data-frame-analytics-3", "parameters": [ { @@ -25490,7 +25509,7 @@ "ml anomaly" ], "summary": "Force buffered data to be processed", - "description": "The flush jobs API is only applicable when sending data for analysis using\nthe post data API. Depending on the content of the buffer, then it might\nadditionally calculate new results. Both flush and close operations are\nsimilar, however the flush is more efficient if you are expecting to send\nmore data for analysis. When flushing, the job remains open and is available\nto continue analyzing data. A close operation additionally prunes and\npersists the model state to disk and the job must be opened again before\nanalyzing further data.", + "description": "The flush jobs API is only applicable when sending data for analysis using\nthe post data API. Depending on the content of the buffer, then it might\nadditionally calculate new results. Both flush and close operations are\nsimilar, however the flush is more efficient if you are expecting to send\nmore data for analysis. When flushing, the job remains open and is available\nto continue analyzing data. A close operation additionally prunes and\npersists the model state to disk and the job must be opened again before\nanalyzing further data.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-flush-job", "parameters": [ { @@ -25616,7 +25635,7 @@ "ml anomaly" ], "summary": "Get anomaly detection job results for buckets", - "description": "The API presents a chronological view of the records, grouped by bucket.", + "description": "The API presents a chronological view of the records, grouped by bucket.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-buckets", "parameters": [ { @@ -25668,7 +25687,7 @@ "ml anomaly" ], "summary": "Get anomaly detection job results for buckets", - "description": "The API presents a chronological view of the records, grouped by bucket.", + "description": "The API presents a chronological view of the records, grouped by bucket.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-buckets-1", "parameters": [ { @@ -25722,7 +25741,7 @@ "ml anomaly" ], "summary": "Get anomaly detection job results for buckets", - "description": "The API presents a chronological view of the records, grouped by bucket.", + "description": "The API presents a chronological view of the records, grouped by bucket.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-buckets-2", "parameters": [ { @@ -25771,7 +25790,7 @@ "ml anomaly" ], "summary": "Get anomaly detection job results for buckets", - "description": "The API presents a chronological view of the records, grouped by bucket.", + "description": "The API presents a chronological view of the records, grouped by bucket.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-buckets-3", "parameters": [ { @@ -25822,6 +25841,7 @@ "ml anomaly" ], "summary": "Get info about events in calendars", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-calendar-events", "parameters": [ { @@ -25920,6 +25940,7 @@ "ml anomaly" ], "summary": "Add scheduled events to the calendar", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-post-calendar-events", "parameters": [ { @@ -25988,6 +26009,7 @@ "ml anomaly" ], "summary": "Get calendar configuration info", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-calendars", "parameters": [ { @@ -26012,6 +26034,7 @@ "ml anomaly" ], "summary": "Get calendar configuration info", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-calendars-1", "parameters": [ { @@ -26038,6 +26061,7 @@ "ml anomaly" ], "summary": "Get anomaly detection job results for categories", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-categories", "parameters": [ { @@ -26071,6 +26095,7 @@ "ml anomaly" ], "summary": "Get anomaly detection job results for categories", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-categories-1", "parameters": [ { @@ -26106,6 +26131,7 @@ "ml anomaly" ], "summary": "Get anomaly detection job results for categories", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-categories-2", "parameters": [ { @@ -26136,6 +26162,7 @@ "ml anomaly" ], "summary": "Get anomaly detection job results for categories", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-categories-3", "parameters": [ { @@ -26168,7 +26195,7 @@ "ml data frame" ], "summary": "Get data frame analytics job configuration info", - "description": "You can get information for multiple data frame analytics jobs in a single\nAPI request by using a comma-separated list of data frame analytics jobs or a\nwildcard expression.", + "description": "You can get information for multiple data frame analytics jobs in a single\nAPI request by using a comma-separated list of data frame analytics jobs or a\nwildcard expression.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-data-frame-analytics-1", "parameters": [ { @@ -26198,6 +26225,7 @@ "ml data frame" ], "summary": "Get data frame analytics job stats", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-data-frame-analytics-stats", "parameters": [ { @@ -26227,6 +26255,7 @@ "ml data frame" ], "summary": "Get data frame analytics job stats", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-data-frame-analytics-stats-1", "parameters": [ { @@ -26259,7 +26288,7 @@ "ml anomaly" ], "summary": "Get datafeed stats", - "description": "You can get statistics for multiple datafeeds in a single API request by\nusing a comma-separated list of datafeeds or a wildcard expression. You can\nget statistics for all datafeeds by using `_all`, by specifying `*` as the\n``, or by omitting the ``. If the datafeed is stopped, the\nonly information you receive is the `datafeed_id` and the `state`.\nThis API returns a maximum of 10,000 datafeeds.", + "description": "You can get statistics for multiple datafeeds in a single API request by\nusing a comma-separated list of datafeeds or a wildcard expression. You can\nget statistics for all datafeeds by using `_all`, by specifying `*` as the\n``, or by omitting the ``. If the datafeed is stopped, the\nonly information you receive is the `datafeed_id` and the `state`.\nThis API returns a maximum of 10,000 datafeeds.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-datafeed-stats", "parameters": [ { @@ -26283,7 +26312,7 @@ "ml anomaly" ], "summary": "Get datafeed stats", - "description": "You can get statistics for multiple datafeeds in a single API request by\nusing a comma-separated list of datafeeds or a wildcard expression. You can\nget statistics for all datafeeds by using `_all`, by specifying `*` as the\n``, or by omitting the ``. If the datafeed is stopped, the\nonly information you receive is the `datafeed_id` and the `state`.\nThis API returns a maximum of 10,000 datafeeds.", + "description": "You can get statistics for multiple datafeeds in a single API request by\nusing a comma-separated list of datafeeds or a wildcard expression. You can\nget statistics for all datafeeds by using `_all`, by specifying `*` as the\n``, or by omitting the ``. If the datafeed is stopped, the\nonly information you receive is the `datafeed_id` and the `state`.\nThis API returns a maximum of 10,000 datafeeds.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-datafeed-stats-1", "parameters": [ { @@ -26304,7 +26333,7 @@ "ml anomaly" ], "summary": "Get datafeeds configuration info", - "description": "You can get information for multiple datafeeds in a single API request by\nusing a comma-separated list of datafeeds or a wildcard expression. You can\nget information for all datafeeds by using `_all`, by specifying `*` as the\n``, or by omitting the ``.\nThis API returns a maximum of 10,000 datafeeds.", + "description": "You can get information for multiple datafeeds in a single API request by\nusing a comma-separated list of datafeeds or a wildcard expression. You can\nget information for all datafeeds by using `_all`, by specifying `*` as the\n``, or by omitting the ``.\nThis API returns a maximum of 10,000 datafeeds.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-datafeeds-1", "parameters": [ { @@ -26328,7 +26357,7 @@ "ml anomaly" ], "summary": "Get filters", - "description": "You can get a single filter or all filters.", + "description": "You can get a single filter or all filters.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-get-filters", "parameters": [ { @@ -26352,7 +26381,7 @@ "ml anomaly" ], "summary": "Get anomaly detection job results for influencers", - "description": "Influencers are the entities that have contributed to, or are to blame for,\nthe anomalies. Influencer results are available only if an\n`influencer_field_name` is specified in the job configuration.", + "description": "Influencers are the entities that have contributed to, or are to blame for,\nthe anomalies. Influencer results are available only if an\n`influencer_field_name` is specified in the job configuration.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-influencers", "parameters": [ { @@ -26398,7 +26427,7 @@ "ml anomaly" ], "summary": "Get anomaly detection job results for influencers", - "description": "Influencers are the entities that have contributed to, or are to blame for,\nthe anomalies. Influencer results are available only if an\n`influencer_field_name` is specified in the job configuration.", + "description": "Influencers are the entities that have contributed to, or are to blame for,\nthe anomalies. Influencer results are available only if an\n`influencer_field_name` is specified in the job configuration.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-influencers-1", "parameters": [ { @@ -26446,6 +26475,7 @@ "ml anomaly" ], "summary": "Get anomaly detection job stats", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-job-stats", "parameters": [ { @@ -26466,6 +26496,7 @@ "ml anomaly" ], "summary": "Get anomaly detection job stats", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-job-stats-1", "parameters": [ { @@ -26489,7 +26520,7 @@ "ml anomaly" ], "summary": "Get anomaly detection jobs configuration info", - "description": "You can get information for multiple anomaly detection jobs in a single API\nrequest by using a group name, a comma-separated list of jobs, or a wildcard\nexpression. You can get information for all anomaly detection jobs by using\n`_all`, by specifying `*` as the ``, or by omitting the ``.", + "description": "You can get information for multiple anomaly detection jobs in a single API\nrequest by using a group name, a comma-separated list of jobs, or a wildcard\nexpression. You can get information for all anomaly detection jobs by using\n`_all`, by specifying `*` as the ``, or by omitting the ``.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-jobs-1", "parameters": [ { @@ -26513,7 +26544,7 @@ "ml" ], "summary": "Get machine learning memory usage info", - "description": "Get information about how machine learning jobs and trained models are using memory,\non each node, both within the JVM heap, and natively, outside of the JVM.", + "description": "Get information about how machine learning jobs and trained models are using memory,\non each node, both within the JVM heap, and natively, outside of the JVM.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-memory-stats", "parameters": [ { @@ -26537,7 +26568,7 @@ "ml" ], "summary": "Get machine learning memory usage info", - "description": "Get information about how machine learning jobs and trained models are using memory,\non each node, both within the JVM heap, and natively, outside of the JVM.", + "description": "Get information about how machine learning jobs and trained models are using memory,\non each node, both within the JVM heap, and natively, outside of the JVM.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-memory-stats-1", "parameters": [ { @@ -26564,6 +26595,7 @@ "ml anomaly" ], "summary": "Get anomaly detection job model snapshot upgrade usage info", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-model-snapshot-upgrade-stats", "parameters": [ { @@ -26635,6 +26667,7 @@ "ml anomaly" ], "summary": "Get model snapshots info", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-model-snapshots-2", "parameters": [ { @@ -26674,6 +26707,7 @@ "ml anomaly" ], "summary": "Get model snapshots info", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-model-snapshots-3", "parameters": [ { @@ -26715,7 +26749,7 @@ "ml anomaly" ], "summary": "Get overall bucket results", - "description": "Retrievs overall bucket results that summarize the bucket results of\nmultiple anomaly detection jobs.\n\nThe `overall_score` is calculated by combining the scores of all the\nbuckets within the overall bucket span. First, the maximum\n`anomaly_score` per anomaly detection job in the overall bucket is\ncalculated. Then the `top_n` of those scores are averaged to result in\nthe `overall_score`. This means that you can fine-tune the\n`overall_score` so that it is more or less sensitive to the number of\njobs that detect an anomaly at the same time. For example, if you set\n`top_n` to `1`, the `overall_score` is the maximum bucket score in the\noverall bucket. Alternatively, if you set `top_n` to the number of jobs,\nthe `overall_score` is high only when all jobs detect anomalies in that\noverall bucket. If you set the `bucket_span` parameter (to a value\ngreater than its default), the `overall_score` is the maximum\n`overall_score` of the overall buckets that have a span equal to the\njobs' largest bucket span.", + "description": "Retrievs overall bucket results that summarize the bucket results of\nmultiple anomaly detection jobs.\n\nThe `overall_score` is calculated by combining the scores of all the\nbuckets within the overall bucket span. First, the maximum\n`anomaly_score` per anomaly detection job in the overall bucket is\ncalculated. Then the `top_n` of those scores are averaged to result in\nthe `overall_score`. This means that you can fine-tune the\n`overall_score` so that it is more or less sensitive to the number of\njobs that detect an anomaly at the same time. For example, if you set\n`top_n` to `1`, the `overall_score` is the maximum bucket score in the\noverall bucket. Alternatively, if you set `top_n` to the number of jobs,\nthe `overall_score` is high only when all jobs detect anomalies in that\noverall bucket. If you set the `bucket_span` parameter (to a value\ngreater than its default), the `overall_score` is the maximum\n`overall_score` of the overall buckets that have a span equal to the\njobs' largest bucket span.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-overall-buckets", "parameters": [ { @@ -26758,7 +26792,7 @@ "ml anomaly" ], "summary": "Get overall bucket results", - "description": "Retrievs overall bucket results that summarize the bucket results of\nmultiple anomaly detection jobs.\n\nThe `overall_score` is calculated by combining the scores of all the\nbuckets within the overall bucket span. First, the maximum\n`anomaly_score` per anomaly detection job in the overall bucket is\ncalculated. Then the `top_n` of those scores are averaged to result in\nthe `overall_score`. This means that you can fine-tune the\n`overall_score` so that it is more or less sensitive to the number of\njobs that detect an anomaly at the same time. For example, if you set\n`top_n` to `1`, the `overall_score` is the maximum bucket score in the\noverall bucket. Alternatively, if you set `top_n` to the number of jobs,\nthe `overall_score` is high only when all jobs detect anomalies in that\noverall bucket. If you set the `bucket_span` parameter (to a value\ngreater than its default), the `overall_score` is the maximum\n`overall_score` of the overall buckets that have a span equal to the\njobs' largest bucket span.", + "description": "Retrievs overall bucket results that summarize the bucket results of\nmultiple anomaly detection jobs.\n\nThe `overall_score` is calculated by combining the scores of all the\nbuckets within the overall bucket span. First, the maximum\n`anomaly_score` per anomaly detection job in the overall bucket is\ncalculated. Then the `top_n` of those scores are averaged to result in\nthe `overall_score`. This means that you can fine-tune the\n`overall_score` so that it is more or less sensitive to the number of\njobs that detect an anomaly at the same time. For example, if you set\n`top_n` to `1`, the `overall_score` is the maximum bucket score in the\noverall bucket. Alternatively, if you set `top_n` to the number of jobs,\nthe `overall_score` is high only when all jobs detect anomalies in that\noverall bucket. If you set the `bucket_span` parameter (to a value\ngreater than its default), the `overall_score` is the maximum\n`overall_score` of the overall buckets that have a span equal to the\njobs' largest bucket span.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-overall-buckets-1", "parameters": [ { @@ -26803,7 +26837,7 @@ "ml anomaly" ], "summary": "Get anomaly records for an anomaly detection job", - "description": "Records contain the detailed analytical results. They describe the anomalous\nactivity that has been identified in the input data based on the detector\nconfiguration.\nThere can be many anomaly records depending on the characteristics and size\nof the input data. In practice, there are often too many to be able to\nmanually process them. The machine learning features therefore perform a\nsophisticated aggregation of the anomaly records into buckets.\nThe number of record results depends on the number of anomalies found in each\nbucket, which relates to the number of time series being modeled and the\nnumber of detectors.", + "description": "Records contain the detailed analytical results. They describe the anomalous\nactivity that has been identified in the input data based on the detector\nconfiguration.\nThere can be many anomaly records depending on the characteristics and size\nof the input data. In practice, there are often too many to be able to\nmanually process them. The machine learning features therefore perform a\nsophisticated aggregation of the anomaly records into buckets.\nThe number of record results depends on the number of anomalies found in each\nbucket, which relates to the number of time series being modeled and the\nnumber of detectors.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-records", "parameters": [ { @@ -26849,7 +26883,7 @@ "ml anomaly" ], "summary": "Get anomaly records for an anomaly detection job", - "description": "Records contain the detailed analytical results. They describe the anomalous\nactivity that has been identified in the input data based on the detector\nconfiguration.\nThere can be many anomaly records depending on the characteristics and size\nof the input data. In practice, there are often too many to be able to\nmanually process them. The machine learning features therefore perform a\nsophisticated aggregation of the anomaly records into buckets.\nThe number of record results depends on the number of anomalies found in each\nbucket, which relates to the number of time series being modeled and the\nnumber of detectors.", + "description": "Records contain the detailed analytical results. They describe the anomalous\nactivity that has been identified in the input data based on the detector\nconfiguration.\nThere can be many anomaly records depending on the characteristics and size\nof the input data. In practice, there are often too many to be able to\nmanually process them. The machine learning features therefore perform a\nsophisticated aggregation of the anomaly records into buckets.\nThe number of record results depends on the number of anomalies found in each\nbucket, which relates to the number of time series being modeled and the\nnumber of detectors.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-records-1", "parameters": [ { @@ -26897,6 +26931,7 @@ "ml trained model" ], "summary": "Get trained model configuration info", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-trained-models-1", "parameters": [ { @@ -26935,7 +26970,7 @@ "ml trained model" ], "summary": "Get trained models usage info", - "description": "You can get usage information for multiple trained\nmodels in a single API request by using a comma-separated list of model IDs or a wildcard expression.", + "description": "You can get usage information for multiple trained\nmodels in a single API request by using a comma-separated list of model IDs or a wildcard expression.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-trained-models-stats", "parameters": [ { @@ -26965,7 +27000,7 @@ "ml trained model" ], "summary": "Get trained models usage info", - "description": "You can get usage information for multiple trained\nmodels in a single API request by using a comma-separated list of model IDs or a wildcard expression.", + "description": "You can get usage information for multiple trained\nmodels in a single API request by using a comma-separated list of model IDs or a wildcard expression.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-trained-models-stats-1", "parameters": [ { @@ -27076,7 +27111,7 @@ "ml" ], "summary": "Get machine learning information", - "description": "Get defaults and limits used by machine learning.\nThis endpoint is designed to be used by a user interface that needs to fully\nunderstand machine learning configurations where some options are not\nspecified, meaning that the defaults should be used. This endpoint may be\nused to find out what those defaults are. It also provides information about\nthe maximum size of machine learning jobs that could run in the current\ncluster configuration.", + "description": "Get defaults and limits used by machine learning.\nThis endpoint is designed to be used by a user interface that needs to fully\nunderstand machine learning configurations where some options are not\nspecified, meaning that the defaults should be used. This endpoint may be\nused to find out what those defaults are. It also provides information about\nthe maximum size of machine learning jobs that could run in the current\ncluster configuration.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-info", "responses": { "200": { @@ -27119,7 +27154,7 @@ "ml anomaly" ], "summary": "Open anomaly detection jobs", - "description": "An anomaly detection job must be opened to be ready to receive and analyze\ndata. It can be opened and closed multiple times throughout its lifecycle.\nWhen you open a new job, it starts with an empty model.\nWhen you open an existing job, the most recent model state is automatically\nloaded. The job is ready to resume its analysis from where it left off, once\nnew data is received.", + "description": "An anomaly detection job must be opened to be ready to receive and analyze\ndata. It can be opened and closed multiple times throughout its lifecycle.\nWhen you open a new job, it starts with an empty model.\nWhen you open an existing job, the most recent model state is automatically\nloaded. The job is ready to resume its analysis from where it left off, once\nnew data is received.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-open-job", "parameters": [ { @@ -27203,7 +27238,7 @@ "ml anomaly" ], "summary": "Send data to an anomaly detection job for analysis", - "description": "IMPORTANT: For each job, data can be accepted from only a single connection at a time.\nIt is not currently possible to post data to multiple jobs using wildcards or a comma-separated list.", + "description": "IMPORTANT: For each job, data can be accepted from only a single connection at a time.\nIt is not currently possible to post data to multiple jobs using wildcards or a comma-separated list.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-post-data", "parameters": [ { @@ -27343,7 +27378,7 @@ "ml data frame" ], "summary": "Preview features used by data frame analytics", - "description": "Preview the extracted features used by a data frame analytics config.", + "description": "Preview the extracted features used by a data frame analytics config.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-preview-data-frame-analytics", "requestBody": { "$ref": "#/components/requestBodies/ml.preview_data_frame_analytics" @@ -27360,7 +27395,7 @@ "ml data frame" ], "summary": "Preview features used by data frame analytics", - "description": "Preview the extracted features used by a data frame analytics config.", + "description": "Preview the extracted features used by a data frame analytics config.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-preview-data-frame-analytics-1", "requestBody": { "$ref": "#/components/requestBodies/ml.preview_data_frame_analytics" @@ -27379,7 +27414,7 @@ "ml data frame" ], "summary": "Preview features used by data frame analytics", - "description": "Preview the extracted features used by a data frame analytics config.", + "description": "Preview the extracted features used by a data frame analytics config.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-preview-data-frame-analytics-2", "parameters": [ { @@ -27401,7 +27436,7 @@ "ml data frame" ], "summary": "Preview features used by data frame analytics", - "description": "Preview the extracted features used by a data frame analytics config.", + "description": "Preview the extracted features used by a data frame analytics config.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-preview-data-frame-analytics-3", "parameters": [ { @@ -27425,7 +27460,7 @@ "ml anomaly" ], "summary": "Preview a datafeed", - "description": "This API returns the first \"page\" of search results from a datafeed.\nYou can preview an existing datafeed or provide configuration details for a datafeed\nand anomaly detection job in the API. The preview shows the structure of the data\nthat will be passed to the anomaly detection engine.\nIMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that\ncalled the API. However, when the datafeed starts it uses the roles of the last user that created or updated the\ndatafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials.\nYou can also use secondary authorization headers to supply the credentials.", + "description": "This API returns the first \"page\" of search results from a datafeed.\nYou can preview an existing datafeed or provide configuration details for a datafeed\nand anomaly detection job in the API. The preview shows the structure of the data\nthat will be passed to the anomaly detection engine.\nIMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that\ncalled the API. However, when the datafeed starts it uses the roles of the last user that created or updated the\ndatafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials.\nYou can also use secondary authorization headers to supply the credentials.\n ##Required authorization\n* Index privileges: `read`* Cluster privileges: `manage_ml`", "operationId": "ml-preview-datafeed", "parameters": [ { @@ -27453,7 +27488,7 @@ "ml anomaly" ], "summary": "Preview a datafeed", - "description": "This API returns the first \"page\" of search results from a datafeed.\nYou can preview an existing datafeed or provide configuration details for a datafeed\nand anomaly detection job in the API. The preview shows the structure of the data\nthat will be passed to the anomaly detection engine.\nIMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that\ncalled the API. However, when the datafeed starts it uses the roles of the last user that created or updated the\ndatafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials.\nYou can also use secondary authorization headers to supply the credentials.", + "description": "This API returns the first \"page\" of search results from a datafeed.\nYou can preview an existing datafeed or provide configuration details for a datafeed\nand anomaly detection job in the API. The preview shows the structure of the data\nthat will be passed to the anomaly detection engine.\nIMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that\ncalled the API. However, when the datafeed starts it uses the roles of the last user that created or updated the\ndatafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials.\nYou can also use secondary authorization headers to supply the credentials.\n ##Required authorization\n* Index privileges: `read`* Cluster privileges: `manage_ml`", "operationId": "ml-preview-datafeed-1", "parameters": [ { @@ -27483,7 +27518,7 @@ "ml anomaly" ], "summary": "Preview a datafeed", - "description": "This API returns the first \"page\" of search results from a datafeed.\nYou can preview an existing datafeed or provide configuration details for a datafeed\nand anomaly detection job in the API. The preview shows the structure of the data\nthat will be passed to the anomaly detection engine.\nIMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that\ncalled the API. However, when the datafeed starts it uses the roles of the last user that created or updated the\ndatafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials.\nYou can also use secondary authorization headers to supply the credentials.", + "description": "This API returns the first \"page\" of search results from a datafeed.\nYou can preview an existing datafeed or provide configuration details for a datafeed\nand anomaly detection job in the API. The preview shows the structure of the data\nthat will be passed to the anomaly detection engine.\nIMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that\ncalled the API. However, when the datafeed starts it uses the roles of the last user that created or updated the\ndatafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials.\nYou can also use secondary authorization headers to supply the credentials.\n ##Required authorization\n* Index privileges: `read`* Cluster privileges: `manage_ml`", "operationId": "ml-preview-datafeed-2", "parameters": [ { @@ -27508,7 +27543,7 @@ "ml anomaly" ], "summary": "Preview a datafeed", - "description": "This API returns the first \"page\" of search results from a datafeed.\nYou can preview an existing datafeed or provide configuration details for a datafeed\nand anomaly detection job in the API. The preview shows the structure of the data\nthat will be passed to the anomaly detection engine.\nIMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that\ncalled the API. However, when the datafeed starts it uses the roles of the last user that created or updated the\ndatafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials.\nYou can also use secondary authorization headers to supply the credentials.", + "description": "This API returns the first \"page\" of search results from a datafeed.\nYou can preview an existing datafeed or provide configuration details for a datafeed\nand anomaly detection job in the API. The preview shows the structure of the data\nthat will be passed to the anomaly detection engine.\nIMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that\ncalled the API. However, when the datafeed starts it uses the roles of the last user that created or updated the\ndatafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials.\nYou can also use secondary authorization headers to supply the credentials.\n ##Required authorization\n* Index privileges: `read`* Cluster privileges: `manage_ml`", "operationId": "ml-preview-datafeed-3", "parameters": [ { @@ -27535,6 +27570,7 @@ "ml trained model" ], "summary": "Create part of a trained model definition", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-put-trained-model-definition-part", "parameters": [ { @@ -27610,7 +27646,7 @@ "ml trained model" ], "summary": "Create a trained model vocabulary", - "description": "This API is supported only for natural language processing (NLP) models.\nThe vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition.", + "description": "This API is supported only for natural language processing (NLP) models.\nThe vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-put-trained-model-vocabulary", "parameters": [ { @@ -27684,7 +27720,7 @@ "ml anomaly" ], "summary": "Reset an anomaly detection job", - "description": "All model state and results are deleted. The job is ready to start over as if\nit had just been created.\nIt is not currently possible to reset multiple jobs using wildcards or a\ncomma separated list.", + "description": "All model state and results are deleted. The job is ready to start over as if\nit had just been created.\nIt is not currently possible to reset multiple jobs using wildcards or a\ncomma separated list.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-reset-job", "parameters": [ { @@ -27740,7 +27776,7 @@ "ml anomaly" ], "summary": "Revert to a snapshot", - "description": "The machine learning features react quickly to anomalous input, learning new\nbehaviors in data. Highly anomalous input increases the variance in the\nmodels whilst the system learns whether this is a new step-change in behavior\nor a one-off event. In the case where this anomalous input is known to be a\none-off, then it might be appropriate to reset the model state to a time\nbefore this event. For example, you might consider reverting to a saved\nsnapshot after Black Friday or a critical system failure.", + "description": "The machine learning features react quickly to anomalous input, learning new\nbehaviors in data. Highly anomalous input increases the variance in the\nmodels whilst the system learns whether this is a new step-change in behavior\nor a one-off event. In the case where this anomalous input is known to be a\none-off, then it might be appropriate to reset the model state to a time\nbefore this event. For example, you might consider reverting to a saved\nsnapshot after Black Friday or a critical system failure.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-revert-model-snapshot", "parameters": [ { @@ -27820,7 +27856,7 @@ "ml" ], "summary": "Set upgrade_mode for ML indices", - "description": "Sets a cluster wide upgrade_mode setting that prepares machine learning\nindices for an upgrade.\nWhen upgrading your cluster, in some circumstances you must restart your\nnodes and reindex your machine learning indices. In those circumstances,\nthere must be no machine learning jobs running. You can close the machine\nlearning jobs, do the upgrade, then open all the jobs again. Alternatively,\nyou can use this API to temporarily halt tasks associated with the jobs and\ndatafeeds and prevent new jobs from opening. You can also use this API\nduring upgrades that do not require you to reindex your machine learning\nindices, though stopping jobs is not a requirement in that case.\nYou can see the current value for the upgrade_mode setting by using the get\nmachine learning info API.", + "description": "Sets a cluster wide upgrade_mode setting that prepares machine learning\nindices for an upgrade.\nWhen upgrading your cluster, in some circumstances you must restart your\nnodes and reindex your machine learning indices. In those circumstances,\nthere must be no machine learning jobs running. You can close the machine\nlearning jobs, do the upgrade, then open all the jobs again. Alternatively,\nyou can use this API to temporarily halt tasks associated with the jobs and\ndatafeeds and prevent new jobs from opening. You can also use this API\nduring upgrades that do not require you to reindex your machine learning\nindices, though stopping jobs is not a requirement in that case.\nYou can see the current value for the upgrade_mode setting by using the get\nmachine learning info API.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-set-upgrade-mode", "parameters": [ { @@ -27865,7 +27901,7 @@ "ml data frame" ], "summary": "Start a data frame analytics job", - "description": "A data frame analytics job can be started and stopped multiple times\nthroughout its lifecycle.\nIf the destination index does not exist, it is created automatically the\nfirst time you start the data frame analytics job. The\n`index.number_of_shards` and `index.number_of_replicas` settings for the\ndestination index are copied from the source index. If there are multiple\nsource indices, the destination index copies the highest setting values. The\nmappings for the destination index are also copied from the source indices.\nIf there are any mapping conflicts, the job fails to start.\nIf the destination index exists, it is used as is. You can therefore set up\nthe destination index in advance with custom settings and mappings.", + "description": "A data frame analytics job can be started and stopped multiple times\nthroughout its lifecycle.\nIf the destination index does not exist, it is created automatically the\nfirst time you start the data frame analytics job. The\n`index.number_of_shards` and `index.number_of_replicas` settings for the\ndestination index are copied from the source index. If there are multiple\nsource indices, the destination index copies the highest setting values. The\nmappings for the destination index are also copied from the source indices.\nIf there are any mapping conflicts, the job fails to start.\nIf the destination index exists, it is used as is. You can therefore set up\nthe destination index in advance with custom settings and mappings.\n ##Required authorization\n* Index privileges: `create_index`,`index`,`manage`,`read`,`view_index_metadata`* Cluster privileges: `manage_ml`", "operationId": "ml-start-data-frame-analytics", "parameters": [ { @@ -27923,7 +27959,7 @@ "ml anomaly" ], "summary": "Start datafeeds", - "description": "A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped\nmultiple times throughout its lifecycle.\n\nBefore you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs.\n\nIf you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped.\nIf new data was indexed for that exact millisecond between stopping and starting, it will be ignored.\n\nWhen Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or\nupdate it had at the time of creation or update and runs the query using those same roles. If you provided secondary\nauthorization headers when you created or updated the datafeed, those credentials are used instead.", + "description": "A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped\nmultiple times throughout its lifecycle.\n\nBefore you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs.\n\nIf you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped.\nIf new data was indexed for that exact millisecond between stopping and starting, it will be ignored.\n\nWhen Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or\nupdate it had at the time of creation or update and runs the query using those same roles. If you provided secondary\nauthorization headers when you created or updated the datafeed, those credentials are used instead.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-start-datafeed", "parameters": [ { @@ -28022,7 +28058,7 @@ "ml trained model" ], "summary": "Start a trained model deployment", - "description": "It allocates the model to every machine learning node.", + "description": "It allocates the model to every machine learning node.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-start-trained-model-deployment", "parameters": [ { @@ -28160,7 +28196,7 @@ "ml data frame" ], "summary": "Stop data frame analytics jobs", - "description": "A data frame analytics job can be started and stopped multiple times\nthroughout its lifecycle.", + "description": "A data frame analytics job can be started and stopped multiple times\nthroughout its lifecycle.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-stop-data-frame-analytics", "parameters": [ { @@ -28234,7 +28270,7 @@ "ml anomaly" ], "summary": "Stop datafeeds", - "description": "A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped\nmultiple times throughout its lifecycle.", + "description": "A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped\nmultiple times throughout its lifecycle.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-stop-datafeed", "parameters": [ { @@ -28330,6 +28366,7 @@ "ml trained model" ], "summary": "Stop a trained model deployment", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-stop-trained-model-deployment", "parameters": [ { @@ -28393,6 +28430,7 @@ "ml data frame" ], "summary": "Update a data frame analytics job", + "description": "\n ##Required authorization\n* Index privileges: `read`,`create_index`,`manage`,`index`,`view_index_metadata`* Cluster privileges: `manage_ml`", "operationId": "ml-update-data-frame-analytics", "parameters": [ { @@ -28505,7 +28543,7 @@ "ml anomaly" ], "summary": "Update a datafeed", - "description": "You must stop and start the datafeed for the changes to be applied.\nWhen Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at\nthe time of the update and runs the query using those same roles. If you provide secondary authorization headers,\nthose credentials are used instead.", + "description": "You must stop and start the datafeed for the changes to be applied.\nWhen Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at\nthe time of the update and runs the query using those same roles. If you provide secondary authorization headers,\nthose credentials are used instead.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-update-datafeed", "parameters": [ { @@ -28711,7 +28749,7 @@ "ml anomaly" ], "summary": "Update a filter", - "description": "Updates the description of a filter, adds items, or removes items from the list.", + "description": "Updates the description of a filter, adds items, or removes items from the list.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-update-filter", "parameters": [ { @@ -28796,7 +28834,7 @@ "ml anomaly" ], "summary": "Update an anomaly detection job", - "description": "Updates certain properties of an anomaly detection job.", + "description": "Updates certain properties of an anomaly detection job.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-update-job", "parameters": [ { @@ -28994,7 +29032,7 @@ "ml anomaly" ], "summary": "Update a snapshot", - "description": "Updates certain properties of a snapshot.", + "description": "Updates certain properties of a snapshot.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-update-model-snapshot", "parameters": [ { @@ -29073,6 +29111,7 @@ "ml trained model" ], "summary": "Update a trained model deployment", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-update-trained-model-deployment", "parameters": [ { @@ -29144,7 +29183,7 @@ "ml anomaly" ], "summary": "Upgrade a snapshot", - "description": "Upgrade an anomaly detection model snapshot to the latest major version.\nOver time, older snapshot formats are deprecated and removed. Anomaly\ndetection jobs support only snapshots that are from the current or previous\nmajor version.\nThis API provides a means to upgrade a snapshot to the current major version.\nThis aids in preparing the cluster for an upgrade to the next major version.\nOnly one snapshot per anomaly detection job can be upgraded at a time and the\nupgraded snapshot cannot be the current snapshot of the anomaly detection\njob.", + "description": "Upgrade an anomaly detection model snapshot to the latest major version.\nOver time, older snapshot formats are deprecated and removed. Anomaly\ndetection jobs support only snapshots that are from the current or previous\nmajor version.\nThis API provides a means to upgrade a snapshot to the current major version.\nThis aids in preparing the cluster for an upgrade to the next major version.\nOnly one snapshot per anomaly detection job can be upgraded at a time and the\nupgraded snapshot cannot be the current snapshot of the anomaly detection\njob.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-upgrade-job-snapshot", "parameters": [ { @@ -29224,7 +29263,7 @@ "search" ], "summary": "Run multiple searches", - "description": "The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format.\nThe structure is as follows:\n\n```\nheader\\n\nbody\\n\nheader\\n\nbody\\n\n```\n\nThis structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node.\n\nIMPORTANT: The final line of data must end with a newline character `\\n`.\nEach newline character may be preceded by a carriage return `\\r`.\nWhen sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`.", + "description": "The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format.\nThe structure is as follows:\n\n```\nheader\\n\nbody\\n\nheader\\n\nbody\\n\n```\n\nThis structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node.\n\nIMPORTANT: The final line of data must end with a newline character `\\n`.\nEach newline character may be preceded by a carriage return `\\r`.\nWhen sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`.\n ##Required authorization\n* Index privileges: `read`", "operationId": "msearch", "parameters": [ { @@ -29282,7 +29321,7 @@ "search" ], "summary": "Run multiple searches", - "description": "The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format.\nThe structure is as follows:\n\n```\nheader\\n\nbody\\n\nheader\\n\nbody\\n\n```\n\nThis structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node.\n\nIMPORTANT: The final line of data must end with a newline character `\\n`.\nEach newline character may be preceded by a carriage return `\\r`.\nWhen sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`.", + "description": "The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format.\nThe structure is as follows:\n\n```\nheader\\n\nbody\\n\nheader\\n\nbody\\n\n```\n\nThis structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node.\n\nIMPORTANT: The final line of data must end with a newline character `\\n`.\nEach newline character may be preceded by a carriage return `\\r`.\nWhen sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`.\n ##Required authorization\n* Index privileges: `read`", "operationId": "msearch-1", "parameters": [ { @@ -29342,7 +29381,7 @@ "search" ], "summary": "Run multiple searches", - "description": "The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format.\nThe structure is as follows:\n\n```\nheader\\n\nbody\\n\nheader\\n\nbody\\n\n```\n\nThis structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node.\n\nIMPORTANT: The final line of data must end with a newline character `\\n`.\nEach newline character may be preceded by a carriage return `\\r`.\nWhen sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`.", + "description": "The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format.\nThe structure is as follows:\n\n```\nheader\\n\nbody\\n\nheader\\n\nbody\\n\n```\n\nThis structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node.\n\nIMPORTANT: The final line of data must end with a newline character `\\n`.\nEach newline character may be preceded by a carriage return `\\r`.\nWhen sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`.\n ##Required authorization\n* Index privileges: `read`", "operationId": "msearch-2", "parameters": [ { @@ -29403,7 +29442,7 @@ "search" ], "summary": "Run multiple searches", - "description": "The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format.\nThe structure is as follows:\n\n```\nheader\\n\nbody\\n\nheader\\n\nbody\\n\n```\n\nThis structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node.\n\nIMPORTANT: The final line of data must end with a newline character `\\n`.\nEach newline character may be preceded by a carriage return `\\r`.\nWhen sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`.", + "description": "The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format.\nThe structure is as follows:\n\n```\nheader\\n\nbody\\n\nheader\\n\nbody\\n\n```\n\nThis structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node.\n\nIMPORTANT: The final line of data must end with a newline character `\\n`.\nEach newline character may be preceded by a carriage return `\\r`.\nWhen sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`.\n ##Required authorization\n* Index privileges: `read`", "operationId": "msearch-3", "parameters": [ { @@ -29466,7 +29505,7 @@ "search" ], "summary": "Run multiple templated searches", - "description": "Run multiple templated searches with a single request.\nIf you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines.\nFor example:\n\n```\n$ cat requests\n{ \"index\": \"my-index\" }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ \"index\": \"my-other-index\" }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}\n\n$ curl -H \"Content-Type: application/x-ndjson\" -XGET localhost:9200/_msearch/template --data-binary \"@requests\"; echo\n```", + "description": "Run multiple templated searches with a single request.\nIf you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines.\nFor example:\n\n```\n$ cat requests\n{ \"index\": \"my-index\" }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ \"index\": \"my-other-index\" }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}\n\n$ curl -H \"Content-Type: application/x-ndjson\" -XGET localhost:9200/_msearch/template --data-binary \"@requests\"; echo\n```\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/search-templates" }, @@ -29509,7 +29548,7 @@ "search" ], "summary": "Run multiple templated searches", - "description": "Run multiple templated searches with a single request.\nIf you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines.\nFor example:\n\n```\n$ cat requests\n{ \"index\": \"my-index\" }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ \"index\": \"my-other-index\" }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}\n\n$ curl -H \"Content-Type: application/x-ndjson\" -XGET localhost:9200/_msearch/template --data-binary \"@requests\"; echo\n```", + "description": "Run multiple templated searches with a single request.\nIf you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines.\nFor example:\n\n```\n$ cat requests\n{ \"index\": \"my-index\" }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ \"index\": \"my-other-index\" }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}\n\n$ curl -H \"Content-Type: application/x-ndjson\" -XGET localhost:9200/_msearch/template --data-binary \"@requests\"; echo\n```\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/search-templates" }, @@ -29554,7 +29593,7 @@ "search" ], "summary": "Run multiple templated searches", - "description": "Run multiple templated searches with a single request.\nIf you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines.\nFor example:\n\n```\n$ cat requests\n{ \"index\": \"my-index\" }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ \"index\": \"my-other-index\" }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}\n\n$ curl -H \"Content-Type: application/x-ndjson\" -XGET localhost:9200/_msearch/template --data-binary \"@requests\"; echo\n```", + "description": "Run multiple templated searches with a single request.\nIf you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines.\nFor example:\n\n```\n$ cat requests\n{ \"index\": \"my-index\" }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ \"index\": \"my-other-index\" }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}\n\n$ curl -H \"Content-Type: application/x-ndjson\" -XGET localhost:9200/_msearch/template --data-binary \"@requests\"; echo\n```\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/search-templates" }, @@ -29600,7 +29639,7 @@ "search" ], "summary": "Run multiple templated searches", - "description": "Run multiple templated searches with a single request.\nIf you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines.\nFor example:\n\n```\n$ cat requests\n{ \"index\": \"my-index\" }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ \"index\": \"my-other-index\" }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}\n\n$ curl -H \"Content-Type: application/x-ndjson\" -XGET localhost:9200/_msearch/template --data-binary \"@requests\"; echo\n```", + "description": "Run multiple templated searches with a single request.\nIf you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines.\nFor example:\n\n```\n$ cat requests\n{ \"index\": \"my-index\" }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ \"index\": \"my-other-index\" }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}\n\n$ curl -H \"Content-Type: application/x-ndjson\" -XGET localhost:9200/_msearch/template --data-binary \"@requests\"; echo\n```\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/search-templates" }, @@ -29648,7 +29687,7 @@ "document" ], "summary": "Get multiple term vectors", - "description": "Get multiple term vectors with a single request.\nYou can specify existing documents by index and ID or provide artificial documents in the body of the request.\nYou can specify the index in the request body or request URI.\nThe response contains a `docs` array with all the fetched termvectors.\nEach element has the structure provided by the termvectors API.\n\n**Artificial documents**\n\nYou can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request.\nThe mapping used is determined by the specified `_index`.", + "description": "Get multiple term vectors with a single request.\nYou can specify existing documents by index and ID or provide artificial documents in the body of the request.\nYou can specify the index in the request body or request URI.\nThe response contains a `docs` array with all the fetched termvectors.\nEach element has the structure provided by the termvectors API.\n\n**Artificial documents**\n\nYou can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request.\nThe mapping used is determined by the specified `_index`.\n ##Required authorization\n* Index privileges: `read`", "operationId": "mtermvectors", "parameters": [ { @@ -29709,7 +29748,7 @@ "document" ], "summary": "Get multiple term vectors", - "description": "Get multiple term vectors with a single request.\nYou can specify existing documents by index and ID or provide artificial documents in the body of the request.\nYou can specify the index in the request body or request URI.\nThe response contains a `docs` array with all the fetched termvectors.\nEach element has the structure provided by the termvectors API.\n\n**Artificial documents**\n\nYou can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request.\nThe mapping used is determined by the specified `_index`.", + "description": "Get multiple term vectors with a single request.\nYou can specify existing documents by index and ID or provide artificial documents in the body of the request.\nYou can specify the index in the request body or request URI.\nThe response contains a `docs` array with all the fetched termvectors.\nEach element has the structure provided by the termvectors API.\n\n**Artificial documents**\n\nYou can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request.\nThe mapping used is determined by the specified `_index`.\n ##Required authorization\n* Index privileges: `read`", "operationId": "mtermvectors-1", "parameters": [ { @@ -29772,7 +29811,7 @@ "document" ], "summary": "Get multiple term vectors", - "description": "Get multiple term vectors with a single request.\nYou can specify existing documents by index and ID or provide artificial documents in the body of the request.\nYou can specify the index in the request body or request URI.\nThe response contains a `docs` array with all the fetched termvectors.\nEach element has the structure provided by the termvectors API.\n\n**Artificial documents**\n\nYou can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request.\nThe mapping used is determined by the specified `_index`.", + "description": "Get multiple term vectors with a single request.\nYou can specify existing documents by index and ID or provide artificial documents in the body of the request.\nYou can specify the index in the request body or request URI.\nThe response contains a `docs` array with all the fetched termvectors.\nEach element has the structure provided by the termvectors API.\n\n**Artificial documents**\n\nYou can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request.\nThe mapping used is determined by the specified `_index`.\n ##Required authorization\n* Index privileges: `read`", "operationId": "mtermvectors-2", "parameters": [ { @@ -29836,7 +29875,7 @@ "document" ], "summary": "Get multiple term vectors", - "description": "Get multiple term vectors with a single request.\nYou can specify existing documents by index and ID or provide artificial documents in the body of the request.\nYou can specify the index in the request body or request URI.\nThe response contains a `docs` array with all the fetched termvectors.\nEach element has the structure provided by the termvectors API.\n\n**Artificial documents**\n\nYou can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request.\nThe mapping used is determined by the specified `_index`.", + "description": "Get multiple term vectors with a single request.\nYou can specify existing documents by index and ID or provide artificial documents in the body of the request.\nYou can specify the index in the request body or request URI.\nThe response contains a `docs` array with all the fetched termvectors.\nEach element has the structure provided by the termvectors API.\n\n**Artificial documents**\n\nYou can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request.\nThe mapping used is determined by the specified `_index`.\n ##Required authorization\n* Index privileges: `read`", "operationId": "mtermvectors-3", "parameters": [ { @@ -29902,7 +29941,7 @@ "cluster" ], "summary": "Clear the archived repositories metering", - "description": "Clear the archived repositories metering information in the cluster.", + "description": "Clear the archived repositories metering information in the cluster.\n ##Required authorization\n* Cluster privileges: `monitor`,`manage`", "operationId": "nodes-clear-repositories-metering-archive", "parameters": [ { @@ -29949,7 +29988,7 @@ "cluster" ], "summary": "Get cluster repositories metering", - "description": "Get repositories metering information for a cluster.\nThis API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time.\nAdditionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts.", + "description": "Get repositories metering information for a cluster.\nThis API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time.\nAdditionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts.\n ##Required authorization\n* Cluster privileges: `monitor`,`manage`", "operationId": "nodes-get-repositories-metering-info", "parameters": [ { @@ -29985,7 +30024,7 @@ "cluster" ], "summary": "Get the hot threads for nodes", - "description": "Get a breakdown of the hot threads on each selected node in the cluster.\nThe output is plain text with a breakdown of the top hot threads for each node.", + "description": "Get a breakdown of the hot threads on each selected node in the cluster.\nThe output is plain text with a breakdown of the top hot threads for each node.\n ##Required authorization\n* Cluster privileges: `monitor`,`manage`", "operationId": "nodes-hot-threads", "parameters": [ { @@ -30024,7 +30063,7 @@ "cluster" ], "summary": "Get the hot threads for nodes", - "description": "Get a breakdown of the hot threads on each selected node in the cluster.\nThe output is plain text with a breakdown of the top hot threads for each node.", + "description": "Get a breakdown of the hot threads on each selected node in the cluster.\nThe output is plain text with a breakdown of the top hot threads for each node.\n ##Required authorization\n* Cluster privileges: `monitor`,`manage`", "operationId": "nodes-hot-threads-1", "parameters": [ { @@ -30237,7 +30276,7 @@ "cluster" ], "summary": "Get node statistics", - "description": "Get statistics for nodes in a cluster.\nBy default, all stats are returned. You can limit the returned information by using metrics.", + "description": "Get statistics for nodes in a cluster.\nBy default, all stats are returned. You can limit the returned information by using metrics.\n ##Required authorization\n* Cluster privileges: `monitor`,`manage`", "operationId": "nodes-stats", "parameters": [ { @@ -30282,7 +30321,7 @@ "cluster" ], "summary": "Get node statistics", - "description": "Get statistics for nodes in a cluster.\nBy default, all stats are returned. You can limit the returned information by using metrics.", + "description": "Get statistics for nodes in a cluster.\nBy default, all stats are returned. You can limit the returned information by using metrics.\n ##Required authorization\n* Cluster privileges: `monitor`,`manage`", "operationId": "nodes-stats-1", "parameters": [ { @@ -30330,7 +30369,7 @@ "cluster" ], "summary": "Get node statistics", - "description": "Get statistics for nodes in a cluster.\nBy default, all stats are returned. You can limit the returned information by using metrics.", + "description": "Get statistics for nodes in a cluster.\nBy default, all stats are returned. You can limit the returned information by using metrics.\n ##Required authorization\n* Cluster privileges: `monitor`,`manage`", "operationId": "nodes-stats-2", "parameters": [ { @@ -30378,7 +30417,7 @@ "cluster" ], "summary": "Get node statistics", - "description": "Get statistics for nodes in a cluster.\nBy default, all stats are returned. You can limit the returned information by using metrics.", + "description": "Get statistics for nodes in a cluster.\nBy default, all stats are returned. You can limit the returned information by using metrics.\n ##Required authorization\n* Cluster privileges: `monitor`,`manage`", "operationId": "nodes-stats-3", "parameters": [ { @@ -30429,7 +30468,7 @@ "cluster" ], "summary": "Get node statistics", - "description": "Get statistics for nodes in a cluster.\nBy default, all stats are returned. You can limit the returned information by using metrics.", + "description": "Get statistics for nodes in a cluster.\nBy default, all stats are returned. You can limit the returned information by using metrics.\n ##Required authorization\n* Cluster privileges: `monitor`,`manage`", "operationId": "nodes-stats-4", "parameters": [ { @@ -30480,7 +30519,7 @@ "cluster" ], "summary": "Get node statistics", - "description": "Get statistics for nodes in a cluster.\nBy default, all stats are returned. You can limit the returned information by using metrics.", + "description": "Get statistics for nodes in a cluster.\nBy default, all stats are returned. You can limit the returned information by using metrics.\n ##Required authorization\n* Cluster privileges: `monitor`,`manage`", "operationId": "nodes-stats-5", "parameters": [ { @@ -30534,6 +30573,7 @@ "cluster" ], "summary": "Get feature usage information", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor`,`manage`", "operationId": "nodes-usage", "parameters": [ { @@ -30554,6 +30594,7 @@ "cluster" ], "summary": "Get feature usage information", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor`,`manage`", "operationId": "nodes-usage-1", "parameters": [ { @@ -30577,6 +30618,7 @@ "cluster" ], "summary": "Get feature usage information", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor`,`manage`", "operationId": "nodes-usage-2", "parameters": [ { @@ -30600,6 +30642,7 @@ "cluster" ], "summary": "Get feature usage information", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor`,`manage`", "operationId": "nodes-usage-3", "parameters": [ { @@ -30626,7 +30669,7 @@ "search" ], "summary": "Open a point in time", - "description": "A search request by default runs against the most recent visible data of the target indices,\nwhich is called point in time. Elasticsearch pit (point in time) is a lightweight view into the\nstate of the data as it existed when initiated. In some cases, it’s preferred to perform multiple\nsearch requests using the same point in time. For example, if refreshes happen between\n`search_after` requests, then the results of those requests might not be consistent as changes happening\nbetween searches are only visible to the more recent point in time.\n\nA point in time must be opened explicitly before being used in search requests.\n\nA subsequent search request with the `pit` parameter must not specify `index`, `routing`, or `preference` values as these parameters are copied from the point in time.\n\nJust like regular searches, you can use `from` and `size` to page through point in time search results, up to the first 10,000 hits.\nIf you want to retrieve more hits, use PIT with `search_after`.\n\nIMPORTANT: The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request.\n\nWhen a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a `NoShardAvailableActionException` exception.\nTo get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime.\n\n**Keeping point in time alive**\n\nThe `keep_alive` parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time.\nThe value does not need to be long enough to process all data — it just needs to be long enough for the next request.\n\nNormally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments.\nOnce the smaller segments are no longer needed they are deleted.\nHowever, open point-in-times prevent the old segments from being deleted since they are still in use.\n\nTIP: Keeping older segments alive means that more disk space and file handles are needed.\nEnsure that you have configured your nodes to have ample free file handles.\n\nAdditionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request.\nEnsure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates.\nNote that a point-in-time doesn't prevent its associated indices from being deleted.\nYou can check how many point-in-times (that is, search contexts) are open with the nodes stats API.", + "description": "A search request by default runs against the most recent visible data of the target indices,\nwhich is called point in time. Elasticsearch pit (point in time) is a lightweight view into the\nstate of the data as it existed when initiated. In some cases, it’s preferred to perform multiple\nsearch requests using the same point in time. For example, if refreshes happen between\n`search_after` requests, then the results of those requests might not be consistent as changes happening\nbetween searches are only visible to the more recent point in time.\n\nA point in time must be opened explicitly before being used in search requests.\n\nA subsequent search request with the `pit` parameter must not specify `index`, `routing`, or `preference` values as these parameters are copied from the point in time.\n\nJust like regular searches, you can use `from` and `size` to page through point in time search results, up to the first 10,000 hits.\nIf you want to retrieve more hits, use PIT with `search_after`.\n\nIMPORTANT: The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request.\n\nWhen a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a `NoShardAvailableActionException` exception.\nTo get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime.\n\n**Keeping point in time alive**\n\nThe `keep_alive` parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time.\nThe value does not need to be long enough to process all data — it just needs to be long enough for the next request.\n\nNormally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments.\nOnce the smaller segments are no longer needed they are deleted.\nHowever, open point-in-times prevent the old segments from being deleted since they are still in use.\n\nTIP: Keeping older segments alive means that more disk space and file handles are needed.\nEnsure that you have configured your nodes to have ample free file handles.\n\nAdditionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request.\nEnsure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates.\nNote that a point-in-time doesn't prevent its associated indices from being deleted.\nYou can check how many point-in-times (that is, search contexts) are open with the nodes stats API.\n ##Required authorization\n* Index privileges: `read`", "operationId": "open-point-in-time", "parameters": [ { @@ -30771,7 +30814,7 @@ "script" ], "summary": "Create or update a script or search template", - "description": "Creates or updates a stored script or search template.", + "description": "Creates or updates a stored script or search template.\n ##Required authorization\n* Cluster privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/search-templates" }, @@ -30814,7 +30857,7 @@ "script" ], "summary": "Create or update a script or search template", - "description": "Creates or updates a stored script or search template.", + "description": "Creates or updates a stored script or search template.\n ##Required authorization\n* Cluster privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/search-templates" }, @@ -30859,7 +30902,7 @@ "query_rules" ], "summary": "Get a query rule", - "description": "Get details about a query rule within a query ruleset.", + "description": "Get details about a query rule within a query ruleset.\n ##Required authorization\n* Cluster privileges: `manage_search_query_rules`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/rest-apis/searching-with-query-rules" }, @@ -30919,7 +30962,7 @@ "query_rules" ], "summary": "Create or update a query rule", - "description": "Create or update a query rule within a query ruleset.\n\nIMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule.\nIt is advised to use one or the other in query rulesets, to avoid errors.\nAdditionally, pinned queries have a maximum limit of 100 pinned hits.\nIf multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.", + "description": "Create or update a query rule within a query ruleset.\n\nIMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule.\nIt is advised to use one or the other in query rulesets, to avoid errors.\nAdditionally, pinned queries have a maximum limit of 100 pinned hits.\nIf multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.\n ##Required authorization\n* Cluster privileges: `manage_search_query_rules`", "operationId": "query-rules-put-rule", "parameters": [ { @@ -31024,7 +31067,7 @@ "query_rules" ], "summary": "Delete a query rule", - "description": "Delete a query rule within a query ruleset.\nThis is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API.", + "description": "Delete a query rule within a query ruleset.\nThis is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API.\n ##Required authorization\n* Cluster privileges: `manage_search_query_rules`", "operationId": "query-rules-delete-rule", "parameters": [ { @@ -31071,7 +31114,7 @@ "query_rules" ], "summary": "Get a query ruleset", - "description": "Get details about a query ruleset.", + "description": "Get details about a query ruleset.\n ##Required authorization\n* Cluster privileges: `manage_search_query_rules`", "operationId": "query-rules-get-ruleset", "parameters": [ { @@ -31117,7 +31160,7 @@ "query_rules" ], "summary": "Create or update a query ruleset", - "description": "There is a limit of 100 rules per ruleset.\nThis limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting.\n\nIMPORTANT: Due to limitations within pinned queries, you can only select documents using `ids` or `docs`, but cannot use both in single rule.\nIt is advised to use one or the other in query rulesets, to avoid errors.\nAdditionally, pinned queries have a maximum limit of 100 pinned hits.\nIf multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.", + "description": "There is a limit of 100 rules per ruleset.\nThis limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting.\n\nIMPORTANT: Due to limitations within pinned queries, you can only select documents using `ids` or `docs`, but cannot use both in single rule.\nIt is advised to use one or the other in query rulesets, to avoid errors.\nAdditionally, pinned queries have a maximum limit of 100 pinned hits.\nIf multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.\n ##Required authorization\n* Cluster privileges: `manage_search_query_rules`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/rest-apis/searching-with-query-rules" }, @@ -31202,7 +31245,7 @@ "query_rules" ], "summary": "Delete a query ruleset", - "description": "Remove a query ruleset and its associated data.\nThis is a destructive action that is not recoverable.", + "description": "Remove a query ruleset and its associated data.\nThis is a destructive action that is not recoverable.\n ##Required authorization\n* Cluster privileges: `manage_search_query_rules`", "operationId": "query-rules-delete-ruleset", "parameters": [ { @@ -31238,7 +31281,7 @@ "query_rules" ], "summary": "Get all query rulesets", - "description": "Get summarized information about the query rulesets.", + "description": "Get summarized information about the query rulesets.\n ##Required authorization\n* Cluster privileges: `manage_search_query_rules`", "operationId": "query-rules-list-rulesets", "parameters": [ { @@ -31310,7 +31353,7 @@ "query_rules" ], "summary": "Test a query ruleset", - "description": "Evaluate match criteria against a query ruleset to identify the rules that would match that criteria.", + "description": "Evaluate match criteria against a query ruleset to identify the rules that would match that criteria.\n ##Required authorization\n* Cluster privileges: `manage_search_query_rules`", "operationId": "query-rules-test", "parameters": [ { @@ -31401,7 +31444,7 @@ "search" ], "summary": "Evaluate ranked search results", - "description": "Evaluate the quality of ranked search results over a set of typical search queries.", + "description": "Evaluate the quality of ranked search results over a set of typical search queries.\n ##Required authorization\n* Index privileges: `read`", "operationId": "rank-eval", "parameters": [ { @@ -31432,7 +31475,7 @@ "search" ], "summary": "Evaluate ranked search results", - "description": "Evaluate the quality of ranked search results over a set of typical search queries.", + "description": "Evaluate the quality of ranked search results over a set of typical search queries.\n ##Required authorization\n* Index privileges: `read`", "operationId": "rank-eval-1", "parameters": [ { @@ -31465,7 +31508,7 @@ "search" ], "summary": "Evaluate ranked search results", - "description": "Evaluate the quality of ranked search results over a set of typical search queries.", + "description": "Evaluate the quality of ranked search results over a set of typical search queries.\n ##Required authorization\n* Index privileges: `read`", "operationId": "rank-eval-2", "parameters": [ { @@ -31499,7 +31542,7 @@ "search" ], "summary": "Evaluate ranked search results", - "description": "Evaluate the quality of ranked search results over a set of typical search queries.", + "description": "Evaluate the quality of ranked search results over a set of typical search queries.\n ##Required authorization\n* Index privileges: `read`", "operationId": "rank-eval-3", "parameters": [ { @@ -31535,7 +31578,7 @@ "document" ], "summary": "Reindex documents", - "description": "Copy documents from a source to a destination.\nYou can copy all documents to the destination index or reindex a subset of the documents.\nThe source can be any existing index, alias, or data stream.\nThe destination must differ from the source.\nFor example, you cannot reindex a data stream into itself.\n\nIMPORTANT: Reindex requires `_source` to be enabled for all documents in the source.\nThe destination should be configured as wanted before calling the reindex API.\nReindex does not copy the settings from the source or its associated template.\nMappings, shard counts, and replicas, for example, must be configured ahead of time.\n\nIf the Elasticsearch security features are enabled, you must have the following security privileges:\n\n* The `read` index privilege for the source data stream, index, or alias.\n* The `write` index privilege for the destination data stream, index, or index alias.\n* To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias.\n* If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias.\n\nIf reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting.\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe `dest` element can be configured like the index API to control optimistic concurrency control.\nOmitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID.\n\nSetting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source.\n\nSetting `op_type` to `create` causes the reindex API to create only missing documents in the destination.\nAll existing documents will cause a version conflict.\n\nIMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`.\nA reindex can only add new documents to a destination data stream.\nIt cannot update existing documents in a destination data stream.\n\nBy default, version conflicts abort the reindex process.\nTo continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`.\nIn this case, the response includes a count of the version conflicts that were encountered.\nNote that the handling of other error types is unaffected by the `conflicts` property.\nAdditionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query.\n\nNOTE: The reindex API makes no effort to handle ID collisions.\nThe last document written will \"win\" but the order isn't usually predictable so it is not a good idea to rely on this behavior.\nInstead, make sure that IDs are unique by using a script.\n\n**Running reindex asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task.\nElasticsearch creates a record of this task as a document at `_tasks/`.\n\n**Reindex from multiple sources**\n\nIf you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources.\nThat way you can resume the process if there are any errors by removing the partially completed source and starting over.\nIt also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel.\n\nFor example, you can use a bash script like this:\n\n```\nfor index in i1 i2 i3 i4 i5; do\n curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{\n \"source\": {\n \"index\": \"'$index'\"\n },\n \"dest\": {\n \"index\": \"'$index'-reindexed\"\n }\n }'\ndone\n```\n\n**Throttling**\n\nSet `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations.\nRequests are throttled by padding each batch with a wait time.\nTo turn off throttling, set `requests_per_second` to `-1`.\n\nThe throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is `1000`, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nReindex supports sliced scroll to parallelize the reindexing process.\nThis parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nYou can slice a reindex request manually by providing a slice ID and total number of slices to each request.\nYou can also let reindex automatically parallelize by using sliced scroll to slice on `_id`.\nThe `slices` parameter specifies the number of slices to use.\n\nAdding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks:\n\n* You can see these requests in the tasks API. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with `slices` will cancel each sub-request.\n* Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed.\n* Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time.\n\nIf slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices.\nIf slicing manually or otherwise tuning automatic slicing, use the following guidelines.\n\nQuery performance is most efficient when the number of slices is equal to the number of shards in the index.\nIf that number is large (for example, `500`), choose a lower number as too many slices will hurt performance.\nSetting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n\nIndexing performance scales linearly across available resources with the number of slices.\n\nWhether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Modify documents during reindexing**\n\nLike `_update_by_query`, reindex operations support a script that modifies the document.\nUnlike `_update_by_query`, the script is allowed to modify the document's metadata.\n\nJust as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination.\nFor example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This \"no operation\" will be reported in the `noop` counter in the response body.\nSet `ctx.op` to `delete` if your script decides that the document must be deleted from the destination.\nThe deletion will be reported in the `deleted` counter in the response body.\nSetting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`.\n\nThink of the possibilities! Just be careful; you are able to change:\n\n* `_id`\n* `_index`\n* `_version`\n* `_routing`\n\nSetting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request.\nIt will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API.\n\n**Reindex from remote**\n\nReindex supports reindexing from a remote Elasticsearch cluster.\nThe `host` parameter must contain a scheme, host, port, and optional path.\nThe `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication.\nBe sure to use HTTPS when using basic authentication or the password will be sent in plain text.\nThere are a range of settings available to configure the behavior of the HTTPS connection.\n\nWhen using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key.\nRemote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting.\nIt can be set to a comma delimited list of allowed remote host and port combinations.\nScheme is ignored; only the host and port are used.\nFor example:\n\n```\nreindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*\"]\n```\n\nThe list of allowed hosts must be configured on any nodes that will coordinate the reindex.\nThis feature should work with remote clusters of any version of Elasticsearch.\nThis should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version.\n\nWARNING: Elasticsearch does not support forward compatibility across major versions.\nFor example, you cannot reindex from a 7.x cluster into a 6.x cluster.\n\nTo enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nReindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb.\nIf the remote index includes very large documents you'll need to use a smaller batch size.\nIt is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field.\nBoth default to 30 seconds.\n\n**Configuring SSL parameters**\n\nReindex from remote supports configurable SSL settings.\nThese must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore.\nIt is not possible to configure SSL in the body of the reindex request.", + "description": "Copy documents from a source to a destination.\nYou can copy all documents to the destination index or reindex a subset of the documents.\nThe source can be any existing index, alias, or data stream.\nThe destination must differ from the source.\nFor example, you cannot reindex a data stream into itself.\n\nIMPORTANT: Reindex requires `_source` to be enabled for all documents in the source.\nThe destination should be configured as wanted before calling the reindex API.\nReindex does not copy the settings from the source or its associated template.\nMappings, shard counts, and replicas, for example, must be configured ahead of time.\n\nIf the Elasticsearch security features are enabled, you must have the following security privileges:\n\n* The `read` index privilege for the source data stream, index, or alias.\n* The `write` index privilege for the destination data stream, index, or index alias.\n* To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias.\n* If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias.\n\nIf reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting.\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe `dest` element can be configured like the index API to control optimistic concurrency control.\nOmitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID.\n\nSetting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source.\n\nSetting `op_type` to `create` causes the reindex API to create only missing documents in the destination.\nAll existing documents will cause a version conflict.\n\nIMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`.\nA reindex can only add new documents to a destination data stream.\nIt cannot update existing documents in a destination data stream.\n\nBy default, version conflicts abort the reindex process.\nTo continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`.\nIn this case, the response includes a count of the version conflicts that were encountered.\nNote that the handling of other error types is unaffected by the `conflicts` property.\nAdditionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query.\n\nNOTE: The reindex API makes no effort to handle ID collisions.\nThe last document written will \"win\" but the order isn't usually predictable so it is not a good idea to rely on this behavior.\nInstead, make sure that IDs are unique by using a script.\n\n**Running reindex asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task.\nElasticsearch creates a record of this task as a document at `_tasks/`.\n\n**Reindex from multiple sources**\n\nIf you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources.\nThat way you can resume the process if there are any errors by removing the partially completed source and starting over.\nIt also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel.\n\nFor example, you can use a bash script like this:\n\n```\nfor index in i1 i2 i3 i4 i5; do\n curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{\n \"source\": {\n \"index\": \"'$index'\"\n },\n \"dest\": {\n \"index\": \"'$index'-reindexed\"\n }\n }'\ndone\n```\n\n**Throttling**\n\nSet `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations.\nRequests are throttled by padding each batch with a wait time.\nTo turn off throttling, set `requests_per_second` to `-1`.\n\nThe throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is `1000`, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nReindex supports sliced scroll to parallelize the reindexing process.\nThis parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nYou can slice a reindex request manually by providing a slice ID and total number of slices to each request.\nYou can also let reindex automatically parallelize by using sliced scroll to slice on `_id`.\nThe `slices` parameter specifies the number of slices to use.\n\nAdding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks:\n\n* You can see these requests in the tasks API. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with `slices` will cancel each sub-request.\n* Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed.\n* Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time.\n\nIf slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices.\nIf slicing manually or otherwise tuning automatic slicing, use the following guidelines.\n\nQuery performance is most efficient when the number of slices is equal to the number of shards in the index.\nIf that number is large (for example, `500`), choose a lower number as too many slices will hurt performance.\nSetting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n\nIndexing performance scales linearly across available resources with the number of slices.\n\nWhether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Modify documents during reindexing**\n\nLike `_update_by_query`, reindex operations support a script that modifies the document.\nUnlike `_update_by_query`, the script is allowed to modify the document's metadata.\n\nJust as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination.\nFor example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This \"no operation\" will be reported in the `noop` counter in the response body.\nSet `ctx.op` to `delete` if your script decides that the document must be deleted from the destination.\nThe deletion will be reported in the `deleted` counter in the response body.\nSetting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`.\n\nThink of the possibilities! Just be careful; you are able to change:\n\n* `_id`\n* `_index`\n* `_version`\n* `_routing`\n\nSetting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request.\nIt will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API.\n\n**Reindex from remote**\n\nReindex supports reindexing from a remote Elasticsearch cluster.\nThe `host` parameter must contain a scheme, host, port, and optional path.\nThe `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication.\nBe sure to use HTTPS when using basic authentication or the password will be sent in plain text.\nThere are a range of settings available to configure the behavior of the HTTPS connection.\n\nWhen using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key.\nRemote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting.\nIt can be set to a comma delimited list of allowed remote host and port combinations.\nScheme is ignored; only the host and port are used.\nFor example:\n\n```\nreindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*\"]\n```\n\nThe list of allowed hosts must be configured on any nodes that will coordinate the reindex.\nThis feature should work with remote clusters of any version of Elasticsearch.\nThis should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version.\n\nWARNING: Elasticsearch does not support forward compatibility across major versions.\nFor example, you cannot reindex from a 7.x cluster into a 6.x cluster.\n\nTo enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nReindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb.\nIf the remote index includes very large documents you'll need to use a smaller batch size.\nIt is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field.\nBoth default to 30 seconds.\n\n**Configuring SSL parameters**\n\nReindex from remote supports configurable SSL settings.\nThese must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore.\nIt is not possible to configure SSL in the body of the reindex request.\n ##Required authorization\n* Index privileges: `read`,`write`", "operationId": "reindex", "parameters": [ { @@ -31868,7 +31911,7 @@ "search" ], "summary": "Render a search template", - "description": "Render a search template as a search request body.", + "description": "Render a search template as a search request body.\n ##Required authorization\n* Index privileges: `read`", "operationId": "render-search-template", "requestBody": { "$ref": "#/components/requestBodies/render_search_template" @@ -31891,7 +31934,7 @@ "search" ], "summary": "Render a search template", - "description": "Render a search template as a search request body.", + "description": "Render a search template as a search request body.\n ##Required authorization\n* Index privileges: `read`", "operationId": "render-search-template-1", "requestBody": { "$ref": "#/components/requestBodies/render_search_template" @@ -31916,7 +31959,7 @@ "search" ], "summary": "Render a search template", - "description": "Render a search template as a search request body.", + "description": "Render a search template as a search request body.\n ##Required authorization\n* Index privileges: `read`", "operationId": "render-search-template-2", "parameters": [ { @@ -31944,7 +31987,7 @@ "search" ], "summary": "Render a search template", - "description": "Render a search template as a search request body.", + "description": "Render a search template as a search request body.\n ##Required authorization\n* Index privileges: `read`", "operationId": "render-search-template-3", "parameters": [ { @@ -31974,7 +32017,7 @@ "rollup" ], "summary": "Get rollup job information", - "description": "Get the configuration, stats, and status of rollup jobs.\n\nNOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs.\nIf a job was created, ran for a while, then was deleted, the API does not return any details about it.\nFor details about a historical rollup job, the rollup capabilities API may be more useful.", + "description": "Get the configuration, stats, and status of rollup jobs.\n\nNOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs.\nIf a job was created, ran for a while, then was deleted, the API does not return any details about it.\nFor details about a historical rollup job, the rollup capabilities API may be more useful.\n ##Required authorization\n* Cluster privileges: `monitor_rollup`", "operationId": "rollup-get-jobs", "parameters": [ { @@ -32000,7 +32043,7 @@ "rollup" ], "summary": "Create a rollup job", - "description": "WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will fail with a message about the deprecation and planned removal of rollup features. A cluster needs to contain either a rollup job or a rollup index in order for this API to be allowed to run.\n\nThe rollup job configuration contains all the details about how the job should run, when it indexes documents, and what future queries will be able to run against the rollup index.\n\nThere are three main sections to the job configuration: the logistical details about the job (for example, the cron schedule), the fields that are used for grouping, and what metrics to collect for each group.\n\nJobs are created in a `STOPPED` state. You can start them with the start rollup jobs API.", + "description": "WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will fail with a message about the deprecation and planned removal of rollup features. A cluster needs to contain either a rollup job or a rollup index in order for this API to be allowed to run.\n\nThe rollup job configuration contains all the details about how the job should run, when it indexes documents, and what future queries will be able to run against the rollup index.\n\nThere are three main sections to the job configuration: the logistical details about the job (for example, the cron schedule), the fields that are used for grouping, and what metrics to collect for each group.\n\nJobs are created in a `STOPPED` state. You can start them with the start rollup jobs API.\n ##Required authorization\n* Cluster privileges: `manage`,`manage_rollup`", "operationId": "rollup-put-job", "parameters": [ { @@ -32102,7 +32145,7 @@ "rollup" ], "summary": "Delete a rollup job", - "description": "A job must be stopped before it can be deleted.\nIf you attempt to delete a started job, an error occurs.\nSimilarly, if you attempt to delete a nonexistent job, an exception occurs.\n\nIMPORTANT: When you delete a job, you remove only the process that is actively monitoring and rolling up data.\nThe API does not delete any previously rolled up data.\nThis is by design; a user may wish to roll up a static data set.\nBecause the data set is static, after it has been fully rolled up there is no need to keep the indexing rollup job around (as there will be no new data).\nThus the job can be deleted, leaving behind the rolled up data for analysis.\nIf you wish to also remove the rollup data and the rollup index contains the data for only a single job, you can delete the whole rollup index.\nIf the rollup index stores data from several jobs, you must issue a delete-by-query that targets the rollup job's identifier in the rollup index. For example:\n\n```\nPOST my_rollup_index/_delete_by_query\n{\n \"query\": {\n \"term\": {\n \"_rollup.id\": \"the_rollup_job_id\"\n }\n }\n}\n```", + "description": "A job must be stopped before it can be deleted.\nIf you attempt to delete a started job, an error occurs.\nSimilarly, if you attempt to delete a nonexistent job, an exception occurs.\n\nIMPORTANT: When you delete a job, you remove only the process that is actively monitoring and rolling up data.\nThe API does not delete any previously rolled up data.\nThis is by design; a user may wish to roll up a static data set.\nBecause the data set is static, after it has been fully rolled up there is no need to keep the indexing rollup job around (as there will be no new data).\nThus the job can be deleted, leaving behind the rolled up data for analysis.\nIf you wish to also remove the rollup data and the rollup index contains the data for only a single job, you can delete the whole rollup index.\nIf the rollup index stores data from several jobs, you must issue a delete-by-query that targets the rollup job's identifier in the rollup index. For example:\n\n```\nPOST my_rollup_index/_delete_by_query\n{\n \"query\": {\n \"term\": {\n \"_rollup.id\": \"the_rollup_job_id\"\n }\n }\n}\n```\n ##Required authorization\n* Cluster privileges: `manage_rollup`", "operationId": "rollup-delete-job", "parameters": [ { @@ -32165,7 +32208,7 @@ "rollup" ], "summary": "Get rollup job information", - "description": "Get the configuration, stats, and status of rollup jobs.\n\nNOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs.\nIf a job was created, ran for a while, then was deleted, the API does not return any details about it.\nFor details about a historical rollup job, the rollup capabilities API may be more useful.", + "description": "Get the configuration, stats, and status of rollup jobs.\n\nNOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs.\nIf a job was created, ran for a while, then was deleted, the API does not return any details about it.\nFor details about a historical rollup job, the rollup capabilities API may be more useful.\n ##Required authorization\n* Cluster privileges: `monitor_rollup`", "operationId": "rollup-get-jobs-1", "responses": { "200": { @@ -32188,7 +32231,7 @@ "rollup" ], "summary": "Get the rollup job capabilities", - "description": "Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern.\n\nThis API is useful because a rollup job is often configured to rollup only a subset of fields from the source index.\nFurthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration.\nThis API enables you to inspect an index and determine:\n\n1. Does this index have associated rollup data somewhere in the cluster?\n2. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live?", + "description": "Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern.\n\nThis API is useful because a rollup job is often configured to rollup only a subset of fields from the source index.\nFurthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration.\nThis API enables you to inspect an index and determine:\n\n1. Does this index have associated rollup data somewhere in the cluster?\n2. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live?\n ##Required authorization\n* Cluster privileges: `monitor_rollup`", "operationId": "rollup-get-rollup-caps", "parameters": [ { @@ -32216,7 +32259,7 @@ "rollup" ], "summary": "Get the rollup job capabilities", - "description": "Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern.\n\nThis API is useful because a rollup job is often configured to rollup only a subset of fields from the source index.\nFurthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration.\nThis API enables you to inspect an index and determine:\n\n1. Does this index have associated rollup data somewhere in the cluster?\n2. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live?", + "description": "Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern.\n\nThis API is useful because a rollup job is often configured to rollup only a subset of fields from the source index.\nFurthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration.\nThis API enables you to inspect an index and determine:\n\n1. Does this index have associated rollup data somewhere in the cluster?\n2. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live?\n ##Required authorization\n* Cluster privileges: `monitor_rollup`", "operationId": "rollup-get-rollup-caps-1", "responses": { "200": { @@ -32239,7 +32282,7 @@ "rollup" ], "summary": "Get the rollup index capabilities", - "description": "Get the rollup capabilities of all jobs inside of a rollup index.\nA single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine:\n\n* What jobs are stored in an index (or indices specified via a pattern)?\n* What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job?", + "description": "Get the rollup capabilities of all jobs inside of a rollup index.\nA single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine:\n\n* What jobs are stored in an index (or indices specified via a pattern)?\n* What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job?\n ##Required authorization\n* Index privileges: `read`", "operationId": "rollup-get-rollup-index-caps", "parameters": [ { @@ -32363,7 +32406,7 @@ "rollup" ], "summary": "Start rollup jobs", - "description": "If you try to start a job that does not exist, an exception occurs.\nIf you try to start a job that is already started, nothing happens.", + "description": "If you try to start a job that does not exist, an exception occurs.\nIf you try to start a job that is already started, nothing happens.\n ##Required authorization\n* Cluster privileges: `manage_rollup`", "operationId": "rollup-start-job", "parameters": [ { @@ -32420,7 +32463,7 @@ "rollup" ], "summary": "Stop rollup jobs", - "description": "If you try to stop a job that does not exist, an exception occurs.\nIf you try to stop a job that is already stopped, nothing happens.\n\nSince only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped.\nThis is accomplished with the `wait_for_completion` query parameter, and optionally a timeout. For example:\n\n```\nPOST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s\n```\nThe parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed.\nIf the specified time elapses without the job moving to STOPPED, a timeout exception occurs.", + "description": "If you try to stop a job that does not exist, an exception occurs.\nIf you try to stop a job that is already stopped, nothing happens.\n\nSince only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped.\nThis is accomplished with the `wait_for_completion` query parameter, and optionally a timeout. For example:\n\n```\nPOST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s\n```\nThe parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed.\nIf the specified time elapses without the job moving to STOPPED, a timeout exception occurs.\n ##Required authorization\n* Cluster privileges: `manage_rollup`", "operationId": "rollup-stop-job", "parameters": [ { @@ -32533,7 +32576,7 @@ "search" ], "summary": "Run a search", - "description": "Get search hits that match the query defined in the request.\nYou can provide search queries using the `q` query string parameter or the request body.\nIf both are specified, only the query parameter is used.\n\nIf the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges.\nTo search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices.\n\n**Search slicing**\n\nWhen paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties.\nBy default the splitting is done first on the shards, then locally on each shard.\nThe local splitting partitions the shard into contiguous ranges based on Lucene document IDs.\n\nFor instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard.\n\nIMPORTANT: The same point-in-time ID should be used for all slices.\nIf different PIT IDs are used, slices can overlap and miss documents.\nThis situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.", + "description": "Get search hits that match the query defined in the request.\nYou can provide search queries using the `q` query string parameter or the request body.\nIf both are specified, only the query parameter is used.\n\nIf the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges.\nTo search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices.\n\n**Search slicing**\n\nWhen paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties.\nBy default the splitting is done first on the shards, then locally on each shard.\nThe local splitting partitions the shard into contiguous ranges based on Lucene document IDs.\n\nFor instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard.\n\nIMPORTANT: The same point-in-time ID should be used for all slices.\nIf different PIT IDs are used, slices can overlap and miss documents.\nThis situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/remote-clusters/remote-clusters-cert#remote-clusters-privileges-ccs" }, @@ -32690,7 +32733,7 @@ "search" ], "summary": "Run a search", - "description": "Get search hits that match the query defined in the request.\nYou can provide search queries using the `q` query string parameter or the request body.\nIf both are specified, only the query parameter is used.\n\nIf the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges.\nTo search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices.\n\n**Search slicing**\n\nWhen paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties.\nBy default the splitting is done first on the shards, then locally on each shard.\nThe local splitting partitions the shard into contiguous ranges based on Lucene document IDs.\n\nFor instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard.\n\nIMPORTANT: The same point-in-time ID should be used for all slices.\nIf different PIT IDs are used, slices can overlap and miss documents.\nThis situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.", + "description": "Get search hits that match the query defined in the request.\nYou can provide search queries using the `q` query string parameter or the request body.\nIf both are specified, only the query parameter is used.\n\nIf the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges.\nTo search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices.\n\n**Search slicing**\n\nWhen paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties.\nBy default the splitting is done first on the shards, then locally on each shard.\nThe local splitting partitions the shard into contiguous ranges based on Lucene document IDs.\n\nFor instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard.\n\nIMPORTANT: The same point-in-time ID should be used for all slices.\nIf different PIT IDs are used, slices can overlap and miss documents.\nThis situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/remote-clusters/remote-clusters-cert#remote-clusters-privileges-ccs" }, @@ -32849,7 +32892,7 @@ "search" ], "summary": "Run a search", - "description": "Get search hits that match the query defined in the request.\nYou can provide search queries using the `q` query string parameter or the request body.\nIf both are specified, only the query parameter is used.\n\nIf the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges.\nTo search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices.\n\n**Search slicing**\n\nWhen paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties.\nBy default the splitting is done first on the shards, then locally on each shard.\nThe local splitting partitions the shard into contiguous ranges based on Lucene document IDs.\n\nFor instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard.\n\nIMPORTANT: The same point-in-time ID should be used for all slices.\nIf different PIT IDs are used, slices can overlap and miss documents.\nThis situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.", + "description": "Get search hits that match the query defined in the request.\nYou can provide search queries using the `q` query string parameter or the request body.\nIf both are specified, only the query parameter is used.\n\nIf the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges.\nTo search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices.\n\n**Search slicing**\n\nWhen paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties.\nBy default the splitting is done first on the shards, then locally on each shard.\nThe local splitting partitions the shard into contiguous ranges based on Lucene document IDs.\n\nFor instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard.\n\nIMPORTANT: The same point-in-time ID should be used for all slices.\nIf different PIT IDs are used, slices can overlap and miss documents.\nThis situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/remote-clusters/remote-clusters-cert#remote-clusters-privileges-ccs" }, @@ -33009,7 +33052,7 @@ "search" ], "summary": "Run a search", - "description": "Get search hits that match the query defined in the request.\nYou can provide search queries using the `q` query string parameter or the request body.\nIf both are specified, only the query parameter is used.\n\nIf the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges.\nTo search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices.\n\n**Search slicing**\n\nWhen paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties.\nBy default the splitting is done first on the shards, then locally on each shard.\nThe local splitting partitions the shard into contiguous ranges based on Lucene document IDs.\n\nFor instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard.\n\nIMPORTANT: The same point-in-time ID should be used for all slices.\nIf different PIT IDs are used, slices can overlap and miss documents.\nThis situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.", + "description": "Get search hits that match the query defined in the request.\nYou can provide search queries using the `q` query string parameter or the request body.\nIf both are specified, only the query parameter is used.\n\nIf the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges.\nTo search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices.\n\n**Search slicing**\n\nWhen paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties.\nBy default the splitting is done first on the shards, then locally on each shard.\nThe local splitting partitions the shard into contiguous ranges based on Lucene document IDs.\n\nFor instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard.\n\nIMPORTANT: The same point-in-time ID should be used for all slices.\nIf different PIT IDs are used, slices can overlap and miss documents.\nThis situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/remote-clusters/remote-clusters-cert#remote-clusters-privileges-ccs" }, @@ -33171,6 +33214,7 @@ "search_application" ], "summary": "Get search application details", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_search_application`", "operationId": "search-application-get", "parameters": [ { @@ -33216,6 +33260,7 @@ "search_application" ], "summary": "Create or update a search application", + "description": "\n ##Required authorization\n* Index privileges: `manage`* Cluster privileges: `manage_search_application`", "operationId": "search-application-put", "parameters": [ { @@ -33289,7 +33334,7 @@ "search_application" ], "summary": "Delete a search application", - "description": "Remove a search application and its associated alias. Indices attached to the search application are not removed.", + "description": "Remove a search application and its associated alias. Indices attached to the search application are not removed.\n ##Required authorization\n* Index privileges: `manage`* Cluster privileges: `manage_search_application`", "operationId": "search-application-delete", "parameters": [ { @@ -33443,7 +33488,7 @@ "search_application" ], "summary": "Get search applications", - "description": "Get information about search applications.", + "description": "Get information about search applications.\n ##Required authorization\n* Cluster privileges: `manage_search_application`", "operationId": "search-application-list", "parameters": [ { @@ -33753,7 +33798,7 @@ "search" ], "summary": "Search a vector tile", - "description": "Search a vector tile for geospatial values.\nBefore using this API, you should be familiar with the Mapbox vector tile specification.\nThe API returns results as a binary mapbox vector tile.\n\nInternally, Elasticsearch translates a vector tile search API request into a search containing:\n\n* A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box.\n* A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box.\n* Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`.\n* If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label.\n\nFor example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search\n\n```\nGET my-index/_search\n{\n \"size\": 10000,\n \"query\": {\n \"geo_bounding_box\": {\n \"my-geo-field\": {\n \"top_left\": {\n \"lat\": -40.979898069620134,\n \"lon\": -45\n },\n \"bottom_right\": {\n \"lat\": -66.51326044311186,\n \"lon\": 0\n }\n }\n }\n },\n \"aggregations\": {\n \"grid\": {\n \"geotile_grid\": {\n \"field\": \"my-geo-field\",\n \"precision\": 11,\n \"size\": 65536,\n \"bounds\": {\n \"top_left\": {\n \"lat\": -40.979898069620134,\n \"lon\": -45\n },\n \"bottom_right\": {\n \"lat\": -66.51326044311186,\n \"lon\": 0\n }\n }\n }\n },\n \"bounds\": {\n \"geo_bounds\": {\n \"field\": \"my-geo-field\",\n \"wrap_longitude\": false\n }\n }\n }\n}\n```\n\nThe API returns results as a binary Mapbox vector tile.\nMapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers:\n\n* A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query.\n* An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data.\n* A meta layer containing:\n * A feature containing a bounding box. By default, this is the bounding box of the tile.\n * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`.\n * Metadata for the search.\n\nThe API only returns features that can display at its zoom level.\nFor example, if a polygon feature has no area at its zoom level, the API omits it.\nThe API returns errors as UTF-8 encoded JSON.\n\nIMPORTANT: You can specify several options for this API as either a query parameter or request body parameter.\nIf you specify both parameters, the query parameter takes precedence.\n\n**Grid precision for geotile**\n\nFor a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels.\n`grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`.\nFor example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15.\nThe maximum final precision is 29.\nThe `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`.\nFor example, a value of 8 divides the tile into a grid of 256 x 256 cells.\nThe `aggs` layer only contains features for cells with matching data.\n\n**Grid precision for geohex**\n\nFor a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`.\n\nThis precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation.\nThe following table maps the H3 resolution for each precision.\nFor example, if `` is 3 and `grid_precision` is 3, the precision is 6.\nAt a precision of 6, hexagonal cells have an H3 resolution of 2.\nIf `` is 3 and `grid_precision` is 4, the precision is 7.\nAt a precision of 7, hexagonal cells have an H3 resolution of 3.\n\n| Precision | Unique tile bins | H3 resolution | Unique hex bins |\tRatio |\n| --------- | ---------------- | ------------- | ----------------| ----- |\n| 1 | 4 | 0 | 122 | 30.5 |\n| 2 | 16 | 0 | 122 | 7.625 |\n| 3 | 64 | 1 | 842 | 13.15625 |\n| 4 | 256 | 1 | 842 | 3.2890625 |\n| 5 | 1024 | 2 | 5882 | 5.744140625 |\n| 6 | 4096 | 2 | 5882 | 1.436035156 |\n| 7 | 16384 | 3 | 41162 | 2.512329102 |\n| 8 | 65536 | 3 | 41162 | 0.6280822754 |\n| 9 | 262144 | 4 | 288122 | 1.099098206 |\n| 10 | 1048576 | 4 | 288122 | 0.2747745514 |\n| 11 | 4194304 | 5 | 2016842 | 0.4808526039 |\n| 12 | 16777216 | 6 | 14117882 | 0.8414913416 |\n| 13 | 67108864 | 6 | 14117882 | 0.2103728354 |\n| 14 | 268435456 | 7 | 98825162 | 0.3681524172 |\n| 15 | 1073741824 | 8 | 691776122 | 0.644266719 |\n| 16 | 4294967296 | 8 | 691776122 | 0.1610666797 |\n| 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 |\n| 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 |\n| 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 |\n| 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 |\n| 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 |\n| 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 |\n| 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 |\n| 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 |\n| 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 |\n| 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 |\n| 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 |\n| 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 |\n| 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 |\n\nHexagonal cells don't align perfectly on a vector tile.\nSome cells may intersect more than one vector tile.\nTo compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level.\nElasticsearch uses the H3 resolution that is closest to the corresponding geotile density.", + "description": "Search a vector tile for geospatial values.\nBefore using this API, you should be familiar with the Mapbox vector tile specification.\nThe API returns results as a binary mapbox vector tile.\n\nInternally, Elasticsearch translates a vector tile search API request into a search containing:\n\n* A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box.\n* A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box.\n* Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`.\n* If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label.\n\nFor example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search\n\n```\nGET my-index/_search\n{\n \"size\": 10000,\n \"query\": {\n \"geo_bounding_box\": {\n \"my-geo-field\": {\n \"top_left\": {\n \"lat\": -40.979898069620134,\n \"lon\": -45\n },\n \"bottom_right\": {\n \"lat\": -66.51326044311186,\n \"lon\": 0\n }\n }\n }\n },\n \"aggregations\": {\n \"grid\": {\n \"geotile_grid\": {\n \"field\": \"my-geo-field\",\n \"precision\": 11,\n \"size\": 65536,\n \"bounds\": {\n \"top_left\": {\n \"lat\": -40.979898069620134,\n \"lon\": -45\n },\n \"bottom_right\": {\n \"lat\": -66.51326044311186,\n \"lon\": 0\n }\n }\n }\n },\n \"bounds\": {\n \"geo_bounds\": {\n \"field\": \"my-geo-field\",\n \"wrap_longitude\": false\n }\n }\n }\n}\n```\n\nThe API returns results as a binary Mapbox vector tile.\nMapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers:\n\n* A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query.\n* An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data.\n* A meta layer containing:\n * A feature containing a bounding box. By default, this is the bounding box of the tile.\n * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`.\n * Metadata for the search.\n\nThe API only returns features that can display at its zoom level.\nFor example, if a polygon feature has no area at its zoom level, the API omits it.\nThe API returns errors as UTF-8 encoded JSON.\n\nIMPORTANT: You can specify several options for this API as either a query parameter or request body parameter.\nIf you specify both parameters, the query parameter takes precedence.\n\n**Grid precision for geotile**\n\nFor a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels.\n`grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`.\nFor example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15.\nThe maximum final precision is 29.\nThe `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`.\nFor example, a value of 8 divides the tile into a grid of 256 x 256 cells.\nThe `aggs` layer only contains features for cells with matching data.\n\n**Grid precision for geohex**\n\nFor a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`.\n\nThis precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation.\nThe following table maps the H3 resolution for each precision.\nFor example, if `` is 3 and `grid_precision` is 3, the precision is 6.\nAt a precision of 6, hexagonal cells have an H3 resolution of 2.\nIf `` is 3 and `grid_precision` is 4, the precision is 7.\nAt a precision of 7, hexagonal cells have an H3 resolution of 3.\n\n| Precision | Unique tile bins | H3 resolution | Unique hex bins |\tRatio |\n| --------- | ---------------- | ------------- | ----------------| ----- |\n| 1 | 4 | 0 | 122 | 30.5 |\n| 2 | 16 | 0 | 122 | 7.625 |\n| 3 | 64 | 1 | 842 | 13.15625 |\n| 4 | 256 | 1 | 842 | 3.2890625 |\n| 5 | 1024 | 2 | 5882 | 5.744140625 |\n| 6 | 4096 | 2 | 5882 | 1.436035156 |\n| 7 | 16384 | 3 | 41162 | 2.512329102 |\n| 8 | 65536 | 3 | 41162 | 0.6280822754 |\n| 9 | 262144 | 4 | 288122 | 1.099098206 |\n| 10 | 1048576 | 4 | 288122 | 0.2747745514 |\n| 11 | 4194304 | 5 | 2016842 | 0.4808526039 |\n| 12 | 16777216 | 6 | 14117882 | 0.8414913416 |\n| 13 | 67108864 | 6 | 14117882 | 0.2103728354 |\n| 14 | 268435456 | 7 | 98825162 | 0.3681524172 |\n| 15 | 1073741824 | 8 | 691776122 | 0.644266719 |\n| 16 | 4294967296 | 8 | 691776122 | 0.1610666797 |\n| 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 |\n| 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 |\n| 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 |\n| 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 |\n| 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 |\n| 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 |\n| 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 |\n| 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 |\n| 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 |\n| 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 |\n| 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 |\n| 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 |\n| 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 |\n\nHexagonal cells don't align perfectly on a vector tile.\nSome cells may intersect more than one vector tile.\nTo compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level.\nElasticsearch uses the H3 resolution that is closest to the corresponding geotile density.\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://github.com/mapbox/vector-tile-spec/blob/master/README.md" }, @@ -33817,7 +33862,7 @@ "search" ], "summary": "Search a vector tile", - "description": "Search a vector tile for geospatial values.\nBefore using this API, you should be familiar with the Mapbox vector tile specification.\nThe API returns results as a binary mapbox vector tile.\n\nInternally, Elasticsearch translates a vector tile search API request into a search containing:\n\n* A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box.\n* A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box.\n* Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`.\n* If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label.\n\nFor example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search\n\n```\nGET my-index/_search\n{\n \"size\": 10000,\n \"query\": {\n \"geo_bounding_box\": {\n \"my-geo-field\": {\n \"top_left\": {\n \"lat\": -40.979898069620134,\n \"lon\": -45\n },\n \"bottom_right\": {\n \"lat\": -66.51326044311186,\n \"lon\": 0\n }\n }\n }\n },\n \"aggregations\": {\n \"grid\": {\n \"geotile_grid\": {\n \"field\": \"my-geo-field\",\n \"precision\": 11,\n \"size\": 65536,\n \"bounds\": {\n \"top_left\": {\n \"lat\": -40.979898069620134,\n \"lon\": -45\n },\n \"bottom_right\": {\n \"lat\": -66.51326044311186,\n \"lon\": 0\n }\n }\n }\n },\n \"bounds\": {\n \"geo_bounds\": {\n \"field\": \"my-geo-field\",\n \"wrap_longitude\": false\n }\n }\n }\n}\n```\n\nThe API returns results as a binary Mapbox vector tile.\nMapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers:\n\n* A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query.\n* An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data.\n* A meta layer containing:\n * A feature containing a bounding box. By default, this is the bounding box of the tile.\n * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`.\n * Metadata for the search.\n\nThe API only returns features that can display at its zoom level.\nFor example, if a polygon feature has no area at its zoom level, the API omits it.\nThe API returns errors as UTF-8 encoded JSON.\n\nIMPORTANT: You can specify several options for this API as either a query parameter or request body parameter.\nIf you specify both parameters, the query parameter takes precedence.\n\n**Grid precision for geotile**\n\nFor a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels.\n`grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`.\nFor example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15.\nThe maximum final precision is 29.\nThe `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`.\nFor example, a value of 8 divides the tile into a grid of 256 x 256 cells.\nThe `aggs` layer only contains features for cells with matching data.\n\n**Grid precision for geohex**\n\nFor a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`.\n\nThis precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation.\nThe following table maps the H3 resolution for each precision.\nFor example, if `` is 3 and `grid_precision` is 3, the precision is 6.\nAt a precision of 6, hexagonal cells have an H3 resolution of 2.\nIf `` is 3 and `grid_precision` is 4, the precision is 7.\nAt a precision of 7, hexagonal cells have an H3 resolution of 3.\n\n| Precision | Unique tile bins | H3 resolution | Unique hex bins |\tRatio |\n| --------- | ---------------- | ------------- | ----------------| ----- |\n| 1 | 4 | 0 | 122 | 30.5 |\n| 2 | 16 | 0 | 122 | 7.625 |\n| 3 | 64 | 1 | 842 | 13.15625 |\n| 4 | 256 | 1 | 842 | 3.2890625 |\n| 5 | 1024 | 2 | 5882 | 5.744140625 |\n| 6 | 4096 | 2 | 5882 | 1.436035156 |\n| 7 | 16384 | 3 | 41162 | 2.512329102 |\n| 8 | 65536 | 3 | 41162 | 0.6280822754 |\n| 9 | 262144 | 4 | 288122 | 1.099098206 |\n| 10 | 1048576 | 4 | 288122 | 0.2747745514 |\n| 11 | 4194304 | 5 | 2016842 | 0.4808526039 |\n| 12 | 16777216 | 6 | 14117882 | 0.8414913416 |\n| 13 | 67108864 | 6 | 14117882 | 0.2103728354 |\n| 14 | 268435456 | 7 | 98825162 | 0.3681524172 |\n| 15 | 1073741824 | 8 | 691776122 | 0.644266719 |\n| 16 | 4294967296 | 8 | 691776122 | 0.1610666797 |\n| 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 |\n| 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 |\n| 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 |\n| 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 |\n| 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 |\n| 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 |\n| 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 |\n| 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 |\n| 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 |\n| 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 |\n| 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 |\n| 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 |\n| 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 |\n\nHexagonal cells don't align perfectly on a vector tile.\nSome cells may intersect more than one vector tile.\nTo compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level.\nElasticsearch uses the H3 resolution that is closest to the corresponding geotile density.", + "description": "Search a vector tile for geospatial values.\nBefore using this API, you should be familiar with the Mapbox vector tile specification.\nThe API returns results as a binary mapbox vector tile.\n\nInternally, Elasticsearch translates a vector tile search API request into a search containing:\n\n* A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box.\n* A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box.\n* Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`.\n* If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label.\n\nFor example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search\n\n```\nGET my-index/_search\n{\n \"size\": 10000,\n \"query\": {\n \"geo_bounding_box\": {\n \"my-geo-field\": {\n \"top_left\": {\n \"lat\": -40.979898069620134,\n \"lon\": -45\n },\n \"bottom_right\": {\n \"lat\": -66.51326044311186,\n \"lon\": 0\n }\n }\n }\n },\n \"aggregations\": {\n \"grid\": {\n \"geotile_grid\": {\n \"field\": \"my-geo-field\",\n \"precision\": 11,\n \"size\": 65536,\n \"bounds\": {\n \"top_left\": {\n \"lat\": -40.979898069620134,\n \"lon\": -45\n },\n \"bottom_right\": {\n \"lat\": -66.51326044311186,\n \"lon\": 0\n }\n }\n }\n },\n \"bounds\": {\n \"geo_bounds\": {\n \"field\": \"my-geo-field\",\n \"wrap_longitude\": false\n }\n }\n }\n}\n```\n\nThe API returns results as a binary Mapbox vector tile.\nMapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers:\n\n* A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query.\n* An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data.\n* A meta layer containing:\n * A feature containing a bounding box. By default, this is the bounding box of the tile.\n * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`.\n * Metadata for the search.\n\nThe API only returns features that can display at its zoom level.\nFor example, if a polygon feature has no area at its zoom level, the API omits it.\nThe API returns errors as UTF-8 encoded JSON.\n\nIMPORTANT: You can specify several options for this API as either a query parameter or request body parameter.\nIf you specify both parameters, the query parameter takes precedence.\n\n**Grid precision for geotile**\n\nFor a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels.\n`grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`.\nFor example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15.\nThe maximum final precision is 29.\nThe `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`.\nFor example, a value of 8 divides the tile into a grid of 256 x 256 cells.\nThe `aggs` layer only contains features for cells with matching data.\n\n**Grid precision for geohex**\n\nFor a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`.\n\nThis precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation.\nThe following table maps the H3 resolution for each precision.\nFor example, if `` is 3 and `grid_precision` is 3, the precision is 6.\nAt a precision of 6, hexagonal cells have an H3 resolution of 2.\nIf `` is 3 and `grid_precision` is 4, the precision is 7.\nAt a precision of 7, hexagonal cells have an H3 resolution of 3.\n\n| Precision | Unique tile bins | H3 resolution | Unique hex bins |\tRatio |\n| --------- | ---------------- | ------------- | ----------------| ----- |\n| 1 | 4 | 0 | 122 | 30.5 |\n| 2 | 16 | 0 | 122 | 7.625 |\n| 3 | 64 | 1 | 842 | 13.15625 |\n| 4 | 256 | 1 | 842 | 3.2890625 |\n| 5 | 1024 | 2 | 5882 | 5.744140625 |\n| 6 | 4096 | 2 | 5882 | 1.436035156 |\n| 7 | 16384 | 3 | 41162 | 2.512329102 |\n| 8 | 65536 | 3 | 41162 | 0.6280822754 |\n| 9 | 262144 | 4 | 288122 | 1.099098206 |\n| 10 | 1048576 | 4 | 288122 | 0.2747745514 |\n| 11 | 4194304 | 5 | 2016842 | 0.4808526039 |\n| 12 | 16777216 | 6 | 14117882 | 0.8414913416 |\n| 13 | 67108864 | 6 | 14117882 | 0.2103728354 |\n| 14 | 268435456 | 7 | 98825162 | 0.3681524172 |\n| 15 | 1073741824 | 8 | 691776122 | 0.644266719 |\n| 16 | 4294967296 | 8 | 691776122 | 0.1610666797 |\n| 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 |\n| 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 |\n| 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 |\n| 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 |\n| 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 |\n| 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 |\n| 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 |\n| 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 |\n| 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 |\n| 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 |\n| 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 |\n| 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 |\n| 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 |\n\nHexagonal cells don't align perfectly on a vector tile.\nSome cells may intersect more than one vector tile.\nTo compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level.\nElasticsearch uses the H3 resolution that is closest to the corresponding geotile density.\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://github.com/mapbox/vector-tile-spec/blob/master/README.md" }, @@ -33883,7 +33928,7 @@ "search" ], "summary": "Get the search shards", - "description": "Get the indices and shards that a search request would be run against.\nThis information can be useful for working out issues or planning optimizations with routing and shard preferences.\nWhen filtered aliases are used, the filter is returned as part of the `indices` section.\n\nIf the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias.", + "description": "Get the indices and shards that a search request would be run against.\nThis information can be useful for working out issues or planning optimizations with routing and shard preferences.\nWhen filtered aliases are used, the filter is returned as part of the `indices` section.\n\nIf the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "search-shards", "parameters": [ { @@ -33926,7 +33971,7 @@ "search" ], "summary": "Get the search shards", - "description": "Get the indices and shards that a search request would be run against.\nThis information can be useful for working out issues or planning optimizations with routing and shard preferences.\nWhen filtered aliases are used, the filter is returned as part of the `indices` section.\n\nIf the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias.", + "description": "Get the indices and shards that a search request would be run against.\nThis information can be useful for working out issues or planning optimizations with routing and shard preferences.\nWhen filtered aliases are used, the filter is returned as part of the `indices` section.\n\nIf the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "search-shards-1", "parameters": [ { @@ -33971,7 +34016,7 @@ "search" ], "summary": "Get the search shards", - "description": "Get the indices and shards that a search request would be run against.\nThis information can be useful for working out issues or planning optimizations with routing and shard preferences.\nWhen filtered aliases are used, the filter is returned as part of the `indices` section.\n\nIf the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias.", + "description": "Get the indices and shards that a search request would be run against.\nThis information can be useful for working out issues or planning optimizations with routing and shard preferences.\nWhen filtered aliases are used, the filter is returned as part of the `indices` section.\n\nIf the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "search-shards-2", "parameters": [ { @@ -34017,7 +34062,7 @@ "search" ], "summary": "Get the search shards", - "description": "Get the indices and shards that a search request would be run against.\nThis information can be useful for working out issues or planning optimizations with routing and shard preferences.\nWhen filtered aliases are used, the filter is returned as part of the `indices` section.\n\nIf the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias.", + "description": "Get the indices and shards that a search request would be run against.\nThis information can be useful for working out issues or planning optimizations with routing and shard preferences.\nWhen filtered aliases are used, the filter is returned as part of the `indices` section.\n\nIf the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "search-shards-3", "parameters": [ { @@ -34065,6 +34110,7 @@ "search" ], "summary": "Run a search with a search template", + "description": "\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/search-templates" }, @@ -34131,6 +34177,7 @@ "search" ], "summary": "Run a search with a search template", + "description": "\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/search-templates" }, @@ -34199,6 +34246,7 @@ "search" ], "summary": "Run a search with a search template", + "description": "\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/search-templates" }, @@ -34268,6 +34316,7 @@ "search" ], "summary": "Run a search with a search template", + "description": "\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/search-templates" }, @@ -34339,7 +34388,7 @@ "searchable_snapshots" ], "summary": "Get cache statistics", - "description": "Get statistics about the shared cache for partially mounted indices.", + "description": "Get statistics about the shared cache for partially mounted indices.\n ##Required authorization\n* Cluster privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/tools/snapshot-and-restore/searchable-snapshots" }, @@ -34369,7 +34418,7 @@ "searchable_snapshots" ], "summary": "Get cache statistics", - "description": "Get statistics about the shared cache for partially mounted indices.", + "description": "Get statistics about the shared cache for partially mounted indices.\n ##Required authorization\n* Cluster privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/tools/snapshot-and-restore/searchable-snapshots" }, @@ -34402,7 +34451,7 @@ "searchable_snapshots" ], "summary": "Clear the cache", - "description": "Clear indices and data streams from the shared cache for partially mounted indices.", + "description": "Clear indices and data streams from the shared cache for partially mounted indices.\n ##Required authorization\n* Index privileges: `manage`* Cluster privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/tools/snapshot-and-restore/searchable-snapshots" }, @@ -34432,7 +34481,7 @@ "searchable_snapshots" ], "summary": "Clear the cache", - "description": "Clear indices and data streams from the shared cache for partially mounted indices.", + "description": "Clear indices and data streams from the shared cache for partially mounted indices.\n ##Required authorization\n* Index privileges: `manage`* Cluster privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/tools/snapshot-and-restore/searchable-snapshots" }, @@ -34465,7 +34514,7 @@ "searchable_snapshots" ], "summary": "Mount a snapshot", - "description": "Mount a snapshot as a searchable snapshot index.\nDo not use this API for snapshots managed by index lifecycle management (ILM).\nManually mounting ILM-managed snapshots can interfere with ILM processes.", + "description": "Mount a snapshot as a searchable snapshot index.\nDo not use this API for snapshots managed by index lifecycle management (ILM).\nManually mounting ILM-managed snapshots can interfere with ILM processes.\n ##Required authorization\n* Index privileges: `manage`* Cluster privileges: `manage`", "operationId": "searchable-snapshots-mount", "parameters": [ { @@ -34597,6 +34646,7 @@ "searchable_snapshots" ], "summary": "Get searchable snapshot statistics", + "description": "\n ##Required authorization\n* Index privileges: `manage`* Cluster privileges: `manage`", "operationId": "searchable-snapshots-stats", "parameters": [ { @@ -34617,6 +34667,7 @@ "searchable_snapshots" ], "summary": "Get searchable snapshot statistics", + "description": "\n ##Required authorization\n* Index privileges: `manage`* Cluster privileges: `manage`", "operationId": "searchable-snapshots-stats-1", "parameters": [ { @@ -34640,7 +34691,7 @@ "security" ], "summary": "Activate a user profile", - "description": "Create or update a user profile on behalf of another user.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions.\nIndividual users and external applications should not call this API directly.\nThe calling application must have either an `access_token` or a combination of `username` and `password` for the user that the profile document is intended for.\nElastic reserves the right to change or remove this feature in future releases without prior notice.\n\nThis API creates or updates a profile document for end users with information that is extracted from the user's authentication object including `username`, `full_name,` `roles`, and the authentication realm.\nFor example, in the JWT `access_token` case, the profile user's `username` is extracted from the JWT token claim pointed to by the `claims.principal` setting of the JWT realm that authenticated the token.\n\nWhen updating a profile document, the API enables the document if it was disabled.\nAny updates do not change existing content for either the `labels` or `data` fields.", + "description": "Create or update a user profile on behalf of another user.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions.\nIndividual users and external applications should not call this API directly.\nThe calling application must have either an `access_token` or a combination of `username` and `password` for the user that the profile document is intended for.\nElastic reserves the right to change or remove this feature in future releases without prior notice.\n\nThis API creates or updates a profile document for end users with information that is extracted from the user's authentication object including `username`, `full_name,` `roles`, and the authentication realm.\nFor example, in the JWT `access_token` case, the profile user's `username` is extracted from the JWT token claim pointed to by the `claims.principal` setting of the JWT realm that authenticated the token.\n\nWhen updating a profile document, the API enables the document if it was disabled.\nAny updates do not change existing content for either the `labels` or `data` fields.\n ##Required authorization\n* Cluster privileges: `manage_user_profile`", "operationId": "security-activate-user-profile", "requestBody": { "content": { @@ -34809,7 +34860,7 @@ "security" ], "summary": "Get roles", - "description": "Get roles in the native realm.\nThe role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe get roles API cannot retrieve roles that are defined in roles files.", + "description": "Get roles in the native realm.\nThe role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe get roles API cannot retrieve roles that are defined in roles files.\n ##Required authorization\n* Cluster privileges: `read_security`", "operationId": "security-get-role-1", "responses": { "200": { @@ -34829,7 +34880,7 @@ "security" ], "summary": "Bulk create or update roles", - "description": "The role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe bulk create or update roles API cannot update roles that are defined in roles files.", + "description": "The role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe bulk create or update roles API cannot update roles that are defined in roles files.\n ##Required authorization\n* Cluster privileges: `manage_security`", "operationId": "security-bulk-put-role", "parameters": [ { @@ -34945,7 +34996,7 @@ "security" ], "summary": "Bulk delete roles", - "description": "The role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe bulk delete roles API cannot delete roles that are defined in roles files.", + "description": "The role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe bulk delete roles API cannot delete roles that are defined in roles files.\n ##Required authorization\n* Cluster privileges: `manage_security`", "operationId": "security-bulk-delete-role", "parameters": [ { @@ -35051,7 +35102,7 @@ "security" ], "summary": "Bulk update API keys", - "description": "Update the attributes for multiple API keys.\n\nIMPORTANT: It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user's credentials are required.\n\nThis API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates.\n\nIt is not possible to update expired or invalidated API keys.\n\nThis API supports updates to API key access scope, metadata and expiration.\nThe access scope of each API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request.\nThe snapshot of the owner's permissions is updated automatically on every call.\n\nIMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change an API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified.\n\nA successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update.", + "description": "Update the attributes for multiple API keys.\n\nIMPORTANT: It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user's credentials are required.\n\nThis API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates.\n\nIt is not possible to update expired or invalidated API keys.\n\nThis API supports updates to API key access scope, metadata and expiration.\nThe access scope of each API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request.\nThe snapshot of the owner's permissions is updated automatically on every call.\n\nIMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change an API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified.\n\nA successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update.\n ##Required authorization\n* Cluster privileges: `manage_own_api_key`", "operationId": "security-bulk-update-api-keys", "requestBody": { "content": { @@ -35275,7 +35326,7 @@ "security" ], "summary": "Clear the API key cache", - "description": "Evict a subset of all entries from the API key cache.\nThe cache is also automatically cleared on state changes of the security index.", + "description": "Evict a subset of all entries from the API key cache.\nThe cache is also automatically cleared on state changes of the security index.\n ##Required authorization\n* Cluster privileges: `manage_security`", "operationId": "security-clear-api-key-cache", "parameters": [ { @@ -35330,7 +35381,7 @@ "security" ], "summary": "Clear the privileges cache", - "description": "Evict privileges from the native application privilege cache.\nThe cache is also automatically cleared for applications that have their privileges updated.", + "description": "Evict privileges from the native application privilege cache.\nThe cache is also automatically cleared for applications that have their privileges updated.\n ##Required authorization\n* Cluster privileges: `manage_security`", "operationId": "security-clear-cached-privileges", "parameters": [ { @@ -35456,7 +35507,7 @@ "security" ], "summary": "Clear the roles cache", - "description": "Evict roles from the native role cache.", + "description": "Evict roles from the native role cache.\n ##Required authorization\n* Cluster privileges: `manage_security`", "operationId": "security-clear-cached-roles", "parameters": [ { @@ -35511,7 +35562,7 @@ "security" ], "summary": "Clear service account token caches", - "description": "Evict a subset of all entries from the service account token caches.\nTwo separate caches exist for service account tokens: one cache for tokens backed by the `service_tokens` file, and another for tokens backed by the `.security` index.\nThis API clears matching entries from both caches.\n\nThe cache for service account tokens backed by the `.security` index is cleared automatically on state changes of the security index.\nThe cache for tokens backed by the `service_tokens` file is cleared automatically on file changes.", + "description": "Evict a subset of all entries from the service account token caches.\nTwo separate caches exist for service account tokens: one cache for tokens backed by the `service_tokens` file, and another for tokens backed by the `.security` index.\nThis API clears matching entries from both caches.\n\nThe cache for service account tokens backed by the `.security` index is cleared automatically on state changes of the security index.\nThe cache for tokens backed by the `service_tokens` file is cleared automatically on file changes.\n ##Required authorization\n* Cluster privileges: `manage_security`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/users-roles/cluster-or-deployment-auth/service-accounts" }, @@ -35591,7 +35642,7 @@ "security" ], "summary": "Get API key information", - "description": "Retrieves information for one or more API keys.\nNOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own.\nIf you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership.", + "description": "Retrieves information for one or more API keys.\nNOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own.\nIf you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership.\n ##Required authorization\n* Cluster privileges: `manage_own_api_key`,`read_security`", "operationId": "security-get-api-key", "parameters": [ { @@ -35723,7 +35774,7 @@ "security" ], "summary": "Create an API key", - "description": "Create an API key for access without requiring basic authentication.\n\nIMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges.\nIf you specify privileges, the API returns an error.\n\nA successful request returns a JSON structure that contains the API key, its unique id, and its name.\nIf applicable, it also returns expiration information for the API key in milliseconds.\n\nNOTE: By default, API keys never expire. You can specify expiration information when you create the API keys.\n\nThe API keys are created by the Elasticsearch API key service, which is automatically enabled.\nTo configure or turn off the API key service, refer to API key service setting documentation.", + "description": "Create an API key for access without requiring basic authentication.\n\nIMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges.\nIf you specify privileges, the API returns an error.\n\nA successful request returns a JSON structure that contains the API key, its unique id, and its name.\nIf applicable, it also returns expiration information for the API key in milliseconds.\n\nNOTE: By default, API keys never expire. You can specify expiration information when you create the API keys.\n\nThe API keys are created by the Elasticsearch API key service, which is automatically enabled.\nTo configure or turn off the API key service, refer to API key service setting documentation.\n ##Required authorization\n* Cluster privileges: `manage_own_api_key`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/configuration-reference/security-settings#api-key-service-settings" }, @@ -35754,7 +35805,7 @@ "security" ], "summary": "Create an API key", - "description": "Create an API key for access without requiring basic authentication.\n\nIMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges.\nIf you specify privileges, the API returns an error.\n\nA successful request returns a JSON structure that contains the API key, its unique id, and its name.\nIf applicable, it also returns expiration information for the API key in milliseconds.\n\nNOTE: By default, API keys never expire. You can specify expiration information when you create the API keys.\n\nThe API keys are created by the Elasticsearch API key service, which is automatically enabled.\nTo configure or turn off the API key service, refer to API key service setting documentation.", + "description": "Create an API key for access without requiring basic authentication.\n\nIMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges.\nIf you specify privileges, the API returns an error.\n\nA successful request returns a JSON structure that contains the API key, its unique id, and its name.\nIf applicable, it also returns expiration information for the API key in milliseconds.\n\nNOTE: By default, API keys never expire. You can specify expiration information when you create the API keys.\n\nThe API keys are created by the Elasticsearch API key service, which is automatically enabled.\nTo configure or turn off the API key service, refer to API key service setting documentation.\n ##Required authorization\n* Cluster privileges: `manage_own_api_key`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/configuration-reference/security-settings#api-key-service-settings" }, @@ -35785,7 +35836,7 @@ "security" ], "summary": "Invalidate API keys", - "description": "This API invalidates API keys created by the create API key or grant API key APIs.\nInvalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted.\n\nTo use this API, you must have at least the `manage_security`, `manage_api_key`, or `manage_own_api_key` cluster privileges.\nThe `manage_security` privilege allows deleting any API key, including both REST and cross cluster API keys.\nThe `manage_api_key` privilege allows deleting any REST API key, but not cross cluster API keys.\nThe `manage_own_api_key` only allows deleting REST API keys that are owned by the user.\nIn addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats:\n\n- Set the parameter `owner=true`.\n- Or, set both `username` and `realm_name` to match the user's identity.\n- Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field.", + "description": "This API invalidates API keys created by the create API key or grant API key APIs.\nInvalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted.\n\nTo use this API, you must have at least the `manage_security`, `manage_api_key`, or `manage_own_api_key` cluster privileges.\nThe `manage_security` privilege allows deleting any API key, including both REST and cross cluster API keys.\nThe `manage_api_key` privilege allows deleting any REST API key, but not cross cluster API keys.\nThe `manage_own_api_key` only allows deleting REST API keys that are owned by the user.\nIn addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats:\n\n- Set the parameter `owner=true`.\n- Or, set both `username` and `realm_name` to match the user's identity.\n- Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field.\n ##Required authorization\n* Cluster privileges: `manage_api_key`,`manage_own_api_key`", "operationId": "security-invalidate-api-key", "requestBody": { "content": { @@ -35920,7 +35971,7 @@ "security" ], "summary": "Create a cross-cluster API key", - "description": "Create an API key of the `cross_cluster` type for the API key based remote cluster access.\nA `cross_cluster` API key cannot be used to authenticate through the REST interface.\n\nIMPORTANT: To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error.\n\nCross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled.\n\nNOTE: Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the `access` property.\n\nA successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds.\n\nBy default, API keys never expire. You can specify expiration information when you create the API keys.\n\nCross-cluster API keys can only be updated with the update cross-cluster API key API.\nAttempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error.", + "description": "Create an API key of the `cross_cluster` type for the API key based remote cluster access.\nA `cross_cluster` API key cannot be used to authenticate through the REST interface.\n\nIMPORTANT: To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error.\n\nCross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled.\n\nNOTE: Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the `access` property.\n\nA successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds.\n\nBy default, API keys never expire. You can specify expiration information when you create the API keys.\n\nCross-cluster API keys can only be updated with the update cross-cluster API key API.\nAttempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error.\n ##Required authorization\n* Cluster privileges: `manage_security`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/remote-clusters/remote-clusters-api-key" }, @@ -36017,7 +36068,7 @@ "security" ], "summary": "Create a service account token", - "description": "Create a service accounts token for access without requiring basic authentication.\n\nNOTE: Service account tokens never expire.\nYou must actively delete them if they are no longer needed.", + "description": "Create a service accounts token for access without requiring basic authentication.\n\nNOTE: Service account tokens never expire.\nYou must actively delete them if they are no longer needed.\n ##Required authorization\n* Cluster privileges: `manage_service_account`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/users-roles/cluster-or-deployment-auth/service-accounts" }, @@ -36054,7 +36105,7 @@ "security" ], "summary": "Create a service account token", - "description": "Create a service accounts token for access without requiring basic authentication.\n\nNOTE: Service account tokens never expire.\nYou must actively delete them if they are no longer needed.", + "description": "Create a service accounts token for access without requiring basic authentication.\n\nNOTE: Service account tokens never expire.\nYou must actively delete them if they are no longer needed.\n ##Required authorization\n* Cluster privileges: `manage_service_account`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/users-roles/cluster-or-deployment-auth/service-accounts" }, @@ -36091,7 +36142,7 @@ "security" ], "summary": "Delete service account tokens", - "description": "Delete service account tokens for a service in a specified namespace.", + "description": "Delete service account tokens for a service in a specified namespace.\n ##Required authorization\n* Cluster privileges: `manage_service_account`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/users-roles/cluster-or-deployment-auth/service-accounts" }, @@ -36183,7 +36234,7 @@ "security" ], "summary": "Create a service account token", - "description": "Create a service accounts token for access without requiring basic authentication.\n\nNOTE: Service account tokens never expire.\nYou must actively delete them if they are no longer needed.", + "description": "Create a service accounts token for access without requiring basic authentication.\n\nNOTE: Service account tokens never expire.\nYou must actively delete them if they are no longer needed.\n ##Required authorization\n* Cluster privileges: `manage_service_account`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/users-roles/cluster-or-deployment-auth/service-accounts" }, @@ -36219,7 +36270,7 @@ "security" ], "summary": "Delegate PKI authentication", - "description": "This API implements the exchange of an X509Certificate chain for an Elasticsearch access token.\nThe certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has `delegation.enabled` set to `true`.\nA successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw `username_pattern` of the respective realm.\n\nThis API is called by smart and trusted proxies, such as Kibana, which terminate the user's TLS session but still want to authenticate the user by using a PKI realm—-​as if the user connected directly to Elasticsearch.\n\nIMPORTANT: The association between the subject public key in the target certificate and the corresponding private key is not validated.\nThis is part of the TLS authentication process and it is delegated to the proxy that calls this API.\nThe proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token.", + "description": "This API implements the exchange of an X509Certificate chain for an Elasticsearch access token.\nThe certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has `delegation.enabled` set to `true`.\nA successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw `username_pattern` of the respective realm.\n\nThis API is called by smart and trusted proxies, such as Kibana, which terminate the user's TLS session but still want to authenticate the user by using a PKI realm—-​as if the user connected directly to Elasticsearch.\n\nIMPORTANT: The association between the subject public key in the target certificate and the corresponding private key is not validated.\nThis is part of the TLS authentication process and it is delegated to the proxy that calls this API.\nThe proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token.\n ##Required authorization\n* Cluster privileges: `all`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/users-roles/cluster-or-deployment-auth/pki" }, @@ -36301,7 +36352,7 @@ "security" ], "summary": "Get application privileges", - "description": "To use this API, you must have one of the following privileges:\n\n* The `read_security` cluster privilege (or a greater privilege such as `manage_security` or `all`).\n* The \"Manage Application Privileges\" global privilege for the application being referenced in the request.", + "description": "To use this API, you must have one of the following privileges:\n\n* The `read_security` cluster privilege (or a greater privilege such as `manage_security` or `all`).\n* The \"Manage Application Privileges\" global privilege for the application being referenced in the request.\n ##Required authorization\n* Cluster privileges: `read_security`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/security-privileges" }, @@ -36332,7 +36383,7 @@ "security" ], "summary": "Delete application privileges", - "description": "To use this API, you must have one of the following privileges:\n\n* The `manage_security` cluster privilege (or a greater privilege such as `all`).\n* The \"Manage Application Privileges\" global privilege for the application being referenced in the request.", + "description": "To use this API, you must have one of the following privileges:\n\n* The `manage_security` cluster privilege (or a greater privilege such as `all`).\n* The \"Manage Application Privileges\" global privilege for the application being referenced in the request.\n ##Required authorization\n* Cluster privileges: `manage_security`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/security-privileges" }, @@ -36410,7 +36461,7 @@ "security" ], "summary": "Get roles", - "description": "Get roles in the native realm.\nThe role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe get roles API cannot retrieve roles that are defined in roles files.", + "description": "Get roles in the native realm.\nThe role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe get roles API cannot retrieve roles that are defined in roles files.\n ##Required authorization\n* Cluster privileges: `read_security`", "operationId": "security-get-role", "parameters": [ { @@ -36435,7 +36486,7 @@ "security" ], "summary": "Create or update roles", - "description": "The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management.\nThe create or update roles API cannot update roles that are defined in roles files.\nFile-based role management is not available in Elastic Serverless.", + "description": "The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management.\nThe create or update roles API cannot update roles that are defined in roles files.\nFile-based role management is not available in Elastic Serverless.\n ##Required authorization\n* Cluster privileges: `manage_security`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/users-roles/cluster-or-deployment-auth/defining-roles" }, @@ -36469,7 +36520,7 @@ "security" ], "summary": "Create or update roles", - "description": "The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management.\nThe create or update roles API cannot update roles that are defined in roles files.\nFile-based role management is not available in Elastic Serverless.", + "description": "The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management.\nThe create or update roles API cannot update roles that are defined in roles files.\nFile-based role management is not available in Elastic Serverless.\n ##Required authorization\n* Cluster privileges: `manage_security`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/users-roles/cluster-or-deployment-auth/defining-roles" }, @@ -36503,7 +36554,7 @@ "security" ], "summary": "Delete roles", - "description": "Delete roles in the native realm.\nThe role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe delete roles API cannot remove roles that are defined in roles files.", + "description": "Delete roles in the native realm.\nThe role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe delete roles API cannot remove roles that are defined in roles files.\n ##Required authorization\n* Cluster privileges: `manage_security`", "operationId": "security-delete-role", "parameters": [ { @@ -36570,7 +36621,7 @@ "security" ], "summary": "Get role mappings", - "description": "Role mappings define which roles are assigned to each user.\nThe role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files.\nThe get role mappings API cannot retrieve role mappings that are defined in role mapping files.", + "description": "Role mappings define which roles are assigned to each user.\nThe role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files.\nThe get role mappings API cannot retrieve role mappings that are defined in role mapping files.\n ##Required authorization\n* Cluster privileges: `manage_security`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/users-roles/cluster-or-deployment-auth/mapping-users-groups-to-roles" }, @@ -36598,7 +36649,7 @@ "security" ], "summary": "Create or update role mappings", - "description": "Role mappings define which roles are assigned to each user.\nEach mapping has rules that identify users and a list of roles that are granted to those users.\nThe role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files.\n\nNOTE: This API does not create roles. Rather, it maps users to existing roles.\nRoles can be created by using the create or update roles API or roles files.\n\n**Role templates**\n\nThe most common use for role mappings is to create a mapping from a known value on the user to a fixed role name.\nFor example, all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the superuser role in Elasticsearch.\nThe `roles` field is used for this purpose.\n\nFor more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user.\nThe `role_templates` field is used for this purpose.\n\nNOTE: To use role templates successfully, the relevant scripting feature must be enabled.\nOtherwise, all attempts to create a role mapping with role templates fail.\n\nAll of the user fields that are available in the role mapping rules are also available in the role templates.\nThus it is possible to assign a user to a role that reflects their username, their groups, or the name of the realm to which they authenticated.\n\nBy default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user.\nIf the format of the template is set to \"json\" then the template is expected to produce a JSON string or an array of JSON strings for the role names.", + "description": "Role mappings define which roles are assigned to each user.\nEach mapping has rules that identify users and a list of roles that are granted to those users.\nThe role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files.\n\nNOTE: This API does not create roles. Rather, it maps users to existing roles.\nRoles can be created by using the create or update roles API or roles files.\n\n**Role templates**\n\nThe most common use for role mappings is to create a mapping from a known value on the user to a fixed role name.\nFor example, all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the superuser role in Elasticsearch.\nThe `roles` field is used for this purpose.\n\nFor more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user.\nThe `role_templates` field is used for this purpose.\n\nNOTE: To use role templates successfully, the relevant scripting feature must be enabled.\nOtherwise, all attempts to create a role mapping with role templates fail.\n\nAll of the user fields that are available in the role mapping rules are also available in the role templates.\nThus it is possible to assign a user to a role that reflects their username, their groups, or the name of the realm to which they authenticated.\n\nBy default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user.\nIf the format of the template is set to \"json\" then the template is expected to produce a JSON string or an array of JSON strings for the role names.\n ##Required authorization\n* Cluster privileges: `manage_security`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/users-roles/cluster-or-deployment-auth/mapping-users-groups-to-roles" }, @@ -36632,7 +36683,7 @@ "security" ], "summary": "Create or update role mappings", - "description": "Role mappings define which roles are assigned to each user.\nEach mapping has rules that identify users and a list of roles that are granted to those users.\nThe role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files.\n\nNOTE: This API does not create roles. Rather, it maps users to existing roles.\nRoles can be created by using the create or update roles API or roles files.\n\n**Role templates**\n\nThe most common use for role mappings is to create a mapping from a known value on the user to a fixed role name.\nFor example, all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the superuser role in Elasticsearch.\nThe `roles` field is used for this purpose.\n\nFor more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user.\nThe `role_templates` field is used for this purpose.\n\nNOTE: To use role templates successfully, the relevant scripting feature must be enabled.\nOtherwise, all attempts to create a role mapping with role templates fail.\n\nAll of the user fields that are available in the role mapping rules are also available in the role templates.\nThus it is possible to assign a user to a role that reflects their username, their groups, or the name of the realm to which they authenticated.\n\nBy default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user.\nIf the format of the template is set to \"json\" then the template is expected to produce a JSON string or an array of JSON strings for the role names.", + "description": "Role mappings define which roles are assigned to each user.\nEach mapping has rules that identify users and a list of roles that are granted to those users.\nThe role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files.\n\nNOTE: This API does not create roles. Rather, it maps users to existing roles.\nRoles can be created by using the create or update roles API or roles files.\n\n**Role templates**\n\nThe most common use for role mappings is to create a mapping from a known value on the user to a fixed role name.\nFor example, all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the superuser role in Elasticsearch.\nThe `roles` field is used for this purpose.\n\nFor more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user.\nThe `role_templates` field is used for this purpose.\n\nNOTE: To use role templates successfully, the relevant scripting feature must be enabled.\nOtherwise, all attempts to create a role mapping with role templates fail.\n\nAll of the user fields that are available in the role mapping rules are also available in the role templates.\nThus it is possible to assign a user to a role that reflects their username, their groups, or the name of the realm to which they authenticated.\n\nBy default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user.\nIf the format of the template is set to \"json\" then the template is expected to produce a JSON string or an array of JSON strings for the role names.\n ##Required authorization\n* Cluster privileges: `manage_security`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/users-roles/cluster-or-deployment-auth/mapping-users-groups-to-roles" }, @@ -36666,7 +36717,7 @@ "security" ], "summary": "Delete role mappings", - "description": "Role mappings define which roles are assigned to each user.\nThe role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files.\nThe delete role mappings API cannot remove role mappings that are defined in role mapping files.", + "description": "Role mappings define which roles are assigned to each user.\nThe role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files.\nThe delete role mappings API cannot remove role mappings that are defined in role mapping files.\n ##Required authorization\n* Cluster privileges: `manage_security`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/users-roles/cluster-or-deployment-auth/mapping-users-groups-to-roles" }, @@ -36736,7 +36787,7 @@ "security" ], "summary": "Get users", - "description": "Get information about users in the native realm and built-in users.", + "description": "Get information about users in the native realm and built-in users.\n ##Required authorization\n* Cluster privileges: `read_security`", "operationId": "security-get-user", "parameters": [ { @@ -36764,7 +36815,7 @@ "security" ], "summary": "Create or update users", - "description": "Add and update users in the native realm.\nA password is required for adding a new user but is optional when updating an existing user.\nTo change a user's password without updating any other fields, use the change password API.", + "description": "Add and update users in the native realm.\nA password is required for adding a new user but is optional when updating an existing user.\nTo change a user's password without updating any other fields, use the change password API.\n ##Required authorization\n* Cluster privileges: `manage_security`", "operationId": "security-put-user", "parameters": [ { @@ -36795,7 +36846,7 @@ "security" ], "summary": "Create or update users", - "description": "Add and update users in the native realm.\nA password is required for adding a new user but is optional when updating an existing user.\nTo change a user's password without updating any other fields, use the change password API.", + "description": "Add and update users in the native realm.\nA password is required for adding a new user but is optional when updating an existing user.\nTo change a user's password without updating any other fields, use the change password API.\n ##Required authorization\n* Cluster privileges: `manage_security`", "operationId": "security-put-user-1", "parameters": [ { @@ -36826,7 +36877,7 @@ "security" ], "summary": "Delete users", - "description": "Delete users from the native realm.", + "description": "Delete users from the native realm.\n ##Required authorization\n* Cluster privileges: `manage_security`", "operationId": "security-delete-user", "parameters": [ { @@ -36893,7 +36944,7 @@ "security" ], "summary": "Disable users", - "description": "Disable users in the native realm.\nBy default, when you create users, they are enabled.\nYou can use this API to revoke a user's access to Elasticsearch.", + "description": "Disable users in the native realm.\nBy default, when you create users, they are enabled.\nYou can use this API to revoke a user's access to Elasticsearch.\n ##Required authorization\n* Cluster privileges: `manage_security`", "operationId": "security-disable-user", "parameters": [ { @@ -36915,7 +36966,7 @@ "security" ], "summary": "Disable users", - "description": "Disable users in the native realm.\nBy default, when you create users, they are enabled.\nYou can use this API to revoke a user's access to Elasticsearch.", + "description": "Disable users in the native realm.\nBy default, when you create users, they are enabled.\nYou can use this API to revoke a user's access to Elasticsearch.\n ##Required authorization\n* Cluster privileges: `manage_security`", "operationId": "security-disable-user-1", "parameters": [ { @@ -36939,7 +36990,7 @@ "security" ], "summary": "Disable a user profile", - "description": "Disable user profiles so that they are not visible in user profile searches.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions.\nIndividual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.\n\nWhen you activate a user profile, its automatically enabled and visible in user profile searches. You can use the disable user profile API to disable a user profile so it’s not visible in these searches.\nTo re-enable a disabled user profile, use the enable user profile API .", + "description": "Disable user profiles so that they are not visible in user profile searches.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions.\nIndividual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.\n\nWhen you activate a user profile, its automatically enabled and visible in user profile searches. You can use the disable user profile API to disable a user profile so it’s not visible in these searches.\nTo re-enable a disabled user profile, use the enable user profile API .\n ##Required authorization\n* Cluster privileges: `manage_user_profile`", "operationId": "security-disable-user-profile", "parameters": [ { @@ -36961,7 +37012,7 @@ "security" ], "summary": "Disable a user profile", - "description": "Disable user profiles so that they are not visible in user profile searches.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions.\nIndividual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.\n\nWhen you activate a user profile, its automatically enabled and visible in user profile searches. You can use the disable user profile API to disable a user profile so it’s not visible in these searches.\nTo re-enable a disabled user profile, use the enable user profile API .", + "description": "Disable user profiles so that they are not visible in user profile searches.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions.\nIndividual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.\n\nWhen you activate a user profile, its automatically enabled and visible in user profile searches. You can use the disable user profile API to disable a user profile so it’s not visible in these searches.\nTo re-enable a disabled user profile, use the enable user profile API .\n ##Required authorization\n* Cluster privileges: `manage_user_profile`", "operationId": "security-disable-user-profile-1", "parameters": [ { @@ -36985,7 +37036,7 @@ "security" ], "summary": "Enable users", - "description": "Enable users in the native realm.\nBy default, when you create users, they are enabled.", + "description": "Enable users in the native realm.\nBy default, when you create users, they are enabled.\n ##Required authorization\n* Cluster privileges: `manage_security`", "operationId": "security-enable-user", "parameters": [ { @@ -37007,7 +37058,7 @@ "security" ], "summary": "Enable users", - "description": "Enable users in the native realm.\nBy default, when you create users, they are enabled.", + "description": "Enable users in the native realm.\nBy default, when you create users, they are enabled.\n ##Required authorization\n* Cluster privileges: `manage_security`", "operationId": "security-enable-user-1", "parameters": [ { @@ -37031,7 +37082,7 @@ "security" ], "summary": "Enable a user profile", - "description": "Enable user profiles to make them visible in user profile searches.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions.\nIndividual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.\n\nWhen you activate a user profile, it's automatically enabled and visible in user profile searches.\nIf you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again.", + "description": "Enable user profiles to make them visible in user profile searches.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions.\nIndividual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.\n\nWhen you activate a user profile, it's automatically enabled and visible in user profile searches.\nIf you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again.\n ##Required authorization\n* Cluster privileges: `manage_user_profile`", "operationId": "security-enable-user-profile", "parameters": [ { @@ -37053,7 +37104,7 @@ "security" ], "summary": "Enable a user profile", - "description": "Enable user profiles to make them visible in user profile searches.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions.\nIndividual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.\n\nWhen you activate a user profile, it's automatically enabled and visible in user profile searches.\nIf you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again.", + "description": "Enable user profiles to make them visible in user profile searches.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions.\nIndividual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.\n\nWhen you activate a user profile, it's automatically enabled and visible in user profile searches.\nIf you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again.\n ##Required authorization\n* Cluster privileges: `manage_user_profile`", "operationId": "security-enable-user-profile-1", "parameters": [ { @@ -37191,7 +37242,7 @@ "security" ], "summary": "Get builtin privileges", - "description": "Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch.", + "description": "Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch.\n ##Required authorization\n* Cluster privileges: `manage_security`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/security-privileges" }, @@ -37258,7 +37309,7 @@ "security" ], "summary": "Get application privileges", - "description": "To use this API, you must have one of the following privileges:\n\n* The `read_security` cluster privilege (or a greater privilege such as `manage_security` or `all`).\n* The \"Manage Application Privileges\" global privilege for the application being referenced in the request.", + "description": "To use this API, you must have one of the following privileges:\n\n* The `read_security` cluster privilege (or a greater privilege such as `manage_security` or `all`).\n* The \"Manage Application Privileges\" global privilege for the application being referenced in the request.\n ##Required authorization\n* Cluster privileges: `read_security`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/security-privileges" }, @@ -37281,7 +37332,7 @@ "security" ], "summary": "Create or update application privileges", - "description": "To use this API, you must have one of the following privileges:\n\n* The `manage_security` cluster privilege (or a greater privilege such as `all`).\n* The \"Manage Application Privileges\" global privilege for the application being referenced in the request.\n\nApplication names are formed from a prefix, with an optional suffix that conform to the following rules:\n\n* The prefix must begin with a lowercase ASCII letter.\n* The prefix must contain only ASCII letters or digits.\n* The prefix must be at least 3 characters long.\n* If the suffix exists, it must begin with either a dash `-` or `_`.\n* The suffix cannot contain any of the following characters: `\\`, `/`, `*`, `?`, `\"`, `<`, `>`, `|`, `,`, `*`.\n* No part of the name can contain whitespace.\n\nPrivilege names must begin with a lowercase ASCII letter and must contain only ASCII letters and digits along with the characters `_`, `-`, and `.`.\n\nAction names can contain any number of printable ASCII characters and must contain at least one of the following characters: `/`, `*`, `:`.", + "description": "To use this API, you must have one of the following privileges:\n\n* The `manage_security` cluster privilege (or a greater privilege such as `all`).\n* The \"Manage Application Privileges\" global privilege for the application being referenced in the request.\n\nApplication names are formed from a prefix, with an optional suffix that conform to the following rules:\n\n* The prefix must begin with a lowercase ASCII letter.\n* The prefix must contain only ASCII letters or digits.\n* The prefix must be at least 3 characters long.\n* If the suffix exists, it must begin with either a dash `-` or `_`.\n* The suffix cannot contain any of the following characters: `\\`, `/`, `*`, `?`, `\"`, `<`, `>`, `|`, `,`, `*`.\n* No part of the name can contain whitespace.\n\nPrivilege names must begin with a lowercase ASCII letter and must contain only ASCII letters and digits along with the characters `_`, `-`, and `.`.\n\nAction names can contain any number of printable ASCII characters and must contain at least one of the following characters: `/`, `*`, `:`.\n ##Required authorization\n* Cluster privileges: `manage_security`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/security-privileges" }, @@ -37312,7 +37363,7 @@ "security" ], "summary": "Create or update application privileges", - "description": "To use this API, you must have one of the following privileges:\n\n* The `manage_security` cluster privilege (or a greater privilege such as `all`).\n* The \"Manage Application Privileges\" global privilege for the application being referenced in the request.\n\nApplication names are formed from a prefix, with an optional suffix that conform to the following rules:\n\n* The prefix must begin with a lowercase ASCII letter.\n* The prefix must contain only ASCII letters or digits.\n* The prefix must be at least 3 characters long.\n* If the suffix exists, it must begin with either a dash `-` or `_`.\n* The suffix cannot contain any of the following characters: `\\`, `/`, `*`, `?`, `\"`, `<`, `>`, `|`, `,`, `*`.\n* No part of the name can contain whitespace.\n\nPrivilege names must begin with a lowercase ASCII letter and must contain only ASCII letters and digits along with the characters `_`, `-`, and `.`.\n\nAction names can contain any number of printable ASCII characters and must contain at least one of the following characters: `/`, `*`, `:`.", + "description": "To use this API, you must have one of the following privileges:\n\n* The `manage_security` cluster privilege (or a greater privilege such as `all`).\n* The \"Manage Application Privileges\" global privilege for the application being referenced in the request.\n\nApplication names are formed from a prefix, with an optional suffix that conform to the following rules:\n\n* The prefix must begin with a lowercase ASCII letter.\n* The prefix must contain only ASCII letters or digits.\n* The prefix must be at least 3 characters long.\n* If the suffix exists, it must begin with either a dash `-` or `_`.\n* The suffix cannot contain any of the following characters: `\\`, `/`, `*`, `?`, `\"`, `<`, `>`, `|`, `,`, `*`.\n* No part of the name can contain whitespace.\n\nPrivilege names must begin with a lowercase ASCII letter and must contain only ASCII letters and digits along with the characters `_`, `-`, and `.`.\n\nAction names can contain any number of printable ASCII characters and must contain at least one of the following characters: `/`, `*`, `:`.\n ##Required authorization\n* Cluster privileges: `manage_security`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/security-privileges" }, @@ -37345,7 +37396,7 @@ "security" ], "summary": "Get application privileges", - "description": "To use this API, you must have one of the following privileges:\n\n* The `read_security` cluster privilege (or a greater privilege such as `manage_security` or `all`).\n* The \"Manage Application Privileges\" global privilege for the application being referenced in the request.", + "description": "To use this API, you must have one of the following privileges:\n\n* The `read_security` cluster privilege (or a greater privilege such as `manage_security` or `all`).\n* The \"Manage Application Privileges\" global privilege for the application being referenced in the request.\n ##Required authorization\n* Cluster privileges: `read_security`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/security-privileges" }, @@ -37375,7 +37426,7 @@ "security" ], "summary": "Get role mappings", - "description": "Role mappings define which roles are assigned to each user.\nThe role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files.\nThe get role mappings API cannot retrieve role mappings that are defined in role mapping files.", + "description": "Role mappings define which roles are assigned to each user.\nThe role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files.\nThe get role mappings API cannot retrieve role mappings that are defined in role mapping files.\n ##Required authorization\n* Cluster privileges: `manage_security`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/users-roles/cluster-or-deployment-auth/mapping-users-groups-to-roles" }, @@ -37400,7 +37451,7 @@ "security" ], "summary": "Get service accounts", - "description": "Get a list of service accounts that match the provided path parameters.\n\nNOTE: Currently, only the `elastic/fleet-server` service account is available.", + "description": "Get a list of service accounts that match the provided path parameters.\n\nNOTE: Currently, only the `elastic/fleet-server` service account is available.\n ##Required authorization\n* Cluster privileges: `manage_service_account`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/users-roles/cluster-or-deployment-auth/service-accounts" }, @@ -37433,7 +37484,7 @@ "security" ], "summary": "Get service accounts", - "description": "Get a list of service accounts that match the provided path parameters.\n\nNOTE: Currently, only the `elastic/fleet-server` service account is available.", + "description": "Get a list of service accounts that match the provided path parameters.\n\nNOTE: Currently, only the `elastic/fleet-server` service account is available.\n ##Required authorization\n* Cluster privileges: `manage_service_account`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/users-roles/cluster-or-deployment-auth/service-accounts" }, @@ -37463,7 +37514,7 @@ "security" ], "summary": "Get service accounts", - "description": "Get a list of service accounts that match the provided path parameters.\n\nNOTE: Currently, only the `elastic/fleet-server` service account is available.", + "description": "Get a list of service accounts that match the provided path parameters.\n\nNOTE: Currently, only the `elastic/fleet-server` service account is available.\n ##Required authorization\n* Cluster privileges: `manage_service_account`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/users-roles/cluster-or-deployment-auth/service-accounts" }, @@ -37488,7 +37539,7 @@ "security" ], "summary": "Get service account credentials", - "description": "To use this API, you must have at least the `read_security` cluster privilege (or a greater privilege such as `manage_service_account` or `manage_security`).\n\nThe response includes service account tokens that were created with the create service account tokens API as well as file-backed tokens from all nodes of the cluster.\n\nNOTE: For tokens backed by the `service_tokens` file, the API collects them from all nodes of the cluster.\nTokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens.", + "description": "To use this API, you must have at least the `read_security` cluster privilege (or a greater privilege such as `manage_service_account` or `manage_security`).\n\nThe response includes service account tokens that were created with the create service account tokens API as well as file-backed tokens from all nodes of the cluster.\n\nNOTE: For tokens backed by the `service_tokens` file, the API collects them from all nodes of the cluster.\nTokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens.\n ##Required authorization\n* Cluster privileges: `read_security`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/users-roles/cluster-or-deployment-auth/service-accounts" }, @@ -37573,7 +37624,7 @@ "security" ], "summary": "Get security index settings", - "description": "Get the user-configurable settings for the security internal index (`.security` and associated indices).\nOnly a subset of the index settings — those that are user-configurable—will be shown.\nThis includes:\n\n* `index.auto_expand_replicas`\n* `index.number_of_replicas`", + "description": "Get the user-configurable settings for the security internal index (`.security` and associated indices).\nOnly a subset of the index settings — those that are user-configurable—will be shown.\nThis includes:\n\n* `index.auto_expand_replicas`\n* `index.number_of_replicas`\n ##Required authorization\n* Cluster privileges: `read_security`", "operationId": "security-get-settings", "parameters": [ { @@ -37622,7 +37673,7 @@ "security" ], "summary": "Update security index settings", - "description": "Update the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of settings are allowed to be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`.\n\nNOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will be ignored during updates.\n\nIf a specific index is not in use on the system and settings are provided for it, the request will be rejected.\nThis API does not yet support configuring the settings for indices before they are in use.", + "description": "Update the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of settings are allowed to be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`.\n\nNOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will be ignored during updates.\n\nIf a specific index is not in use on the system and settings are provided for it, the request will be rejected.\nThis API does not yet support configuring the settings for indices before they are in use.\n ##Required authorization\n* Cluster privileges: `manage_security`", "operationId": "security-update-settings", "parameters": [ { @@ -37708,7 +37759,7 @@ "security" ], "summary": "Get a token", - "description": "Create a bearer token for access without requiring basic authentication.\nThe tokens are created by the Elasticsearch Token Service, which is automatically enabled when you configure TLS on the HTTP interface.\nAlternatively, you can explicitly enable the `xpack.security.authc.token.enabled` setting.\nWhen you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface.\n\nThe get token API takes the same parameters as a typical OAuth 2.0 token API except for the use of a JSON request body.\n\nA successful get token API call returns a JSON structure that contains the access token, the amount of time (seconds) that the token expires in, the type, and the scope if available.\n\nThe tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used.\nThat time period is defined by the `xpack.security.authc.token.timeout` setting.\nIf you want to invalidate a token immediately, you can do so by using the invalidate token API.", + "description": "Create a bearer token for access without requiring basic authentication.\nThe tokens are created by the Elasticsearch Token Service, which is automatically enabled when you configure TLS on the HTTP interface.\nAlternatively, you can explicitly enable the `xpack.security.authc.token.enabled` setting.\nWhen you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface.\n\nThe get token API takes the same parameters as a typical OAuth 2.0 token API except for the use of a JSON request body.\n\nA successful get token API call returns a JSON structure that contains the access token, the amount of time (seconds) that the token expires in, the type, and the scope if available.\n\nThe tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used.\nThat time period is defined by the `xpack.security.authc.token.timeout` setting.\nIf you want to invalidate a token immediately, you can do so by using the invalidate token API.\n ##Required authorization\n* Cluster privileges: `manage_token`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/security/set-up-basic-security-plus-https#encrypt-http-communication" }, @@ -37938,7 +37989,7 @@ "security" ], "summary": "Get users", - "description": "Get information about users in the native realm and built-in users.", + "description": "Get information about users in the native realm and built-in users.\n ##Required authorization\n* Cluster privileges: `read_security`", "operationId": "security-get-user-1", "parameters": [ { @@ -38090,7 +38141,7 @@ "security" ], "summary": "Get a user profile", - "description": "Get a user's profile using the unique profile ID.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions.\nIndividual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.", + "description": "Get a user's profile using the unique profile ID.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions.\nIndividual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.\n ##Required authorization\n* Cluster privileges: `read_security`", "operationId": "security-get-user-profile", "parameters": [ { @@ -38194,7 +38245,7 @@ "security" ], "summary": "Grant an API key", - "description": "Create an API key on behalf of another user.\nThis API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API.\nThe caller must have authentication credentials for the user on whose behalf the API key will be created.\nIt is not possible to use this API to create an API key without that user's credentials.\nThe supported user authentication credential types are:\n\n* username and password\n* Elasticsearch access tokens\n* JWTs\n\nThe user, for whom the authentication credentials is provided, can optionally \"run as\" (impersonate) another user.\nIn this case, the API key will be created on behalf of the impersonated user.\n\nThis API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf.\nThe API keys are created by the Elasticsearch API key service, which is automatically enabled.\n\nA successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name.\nIf applicable, it also returns expiration information for the API key in milliseconds.\n\nBy default, API keys never expire. You can specify expiration information when you create the API keys.", + "description": "Create an API key on behalf of another user.\nThis API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API.\nThe caller must have authentication credentials for the user on whose behalf the API key will be created.\nIt is not possible to use this API to create an API key without that user's credentials.\nThe supported user authentication credential types are:\n\n* username and password\n* Elasticsearch access tokens\n* JWTs\n\nThe user, for whom the authentication credentials is provided, can optionally \"run as\" (impersonate) another user.\nIn this case, the API key will be created on behalf of the impersonated user.\n\nThis API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf.\nThe API keys are created by the Elasticsearch API key service, which is automatically enabled.\n\nA successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name.\nIf applicable, it also returns expiration information for the API key in milliseconds.\n\nBy default, API keys never expire. You can specify expiration information when you create the API keys.\n ##Required authorization\n* Cluster privileges: `grant_api_key`", "operationId": "security-grant-api-key", "requestBody": { "content": { @@ -38411,7 +38462,7 @@ "security" ], "summary": "Check user profile privileges", - "description": "Determine whether the users associated with the specified user profile IDs have all the requested privileges.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.", + "description": "Determine whether the users associated with the specified user profile IDs have all the requested privileges.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.\n ##Required authorization\n* Cluster privileges: `read_security`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/users-roles/cluster-or-deployment-auth/user-profiles" }, @@ -38437,7 +38488,7 @@ "security" ], "summary": "Check user profile privileges", - "description": "Determine whether the users associated with the specified user profile IDs have all the requested privileges.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.", + "description": "Determine whether the users associated with the specified user profile IDs have all the requested privileges.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.\n ##Required authorization\n* Cluster privileges: `read_security`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/users-roles/cluster-or-deployment-auth/user-profiles" }, @@ -38741,7 +38792,7 @@ "security" ], "summary": "Find API keys with a query", - "description": "Get a paginated list of API keys and their information.\nYou can optionally filter the results with a query.\n\nTo use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges.\nIf you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own.\nIf you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership.", + "description": "Get a paginated list of API keys and their information.\nYou can optionally filter the results with a query.\n\nTo use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges.\nIf you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own.\nIf you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership.\n ##Required authorization\n* Cluster privileges: `manage_own_api_key`,`read_security`", "operationId": "security-query-api-keys", "parameters": [ { @@ -38775,7 +38826,7 @@ "security" ], "summary": "Find API keys with a query", - "description": "Get a paginated list of API keys and their information.\nYou can optionally filter the results with a query.\n\nTo use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges.\nIf you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own.\nIf you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership.", + "description": "Get a paginated list of API keys and their information.\nYou can optionally filter the results with a query.\n\nTo use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges.\nIf you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own.\nIf you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership.\n ##Required authorization\n* Cluster privileges: `manage_own_api_key`,`read_security`", "operationId": "security-query-api-keys-1", "parameters": [ { @@ -38811,7 +38862,7 @@ "security" ], "summary": "Find roles with a query", - "description": "Get roles in a paginated manner.\nThe role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe query roles API does not retrieve roles that are defined in roles files, nor built-in ones.\nYou can optionally filter the results with a query.\nAlso, the results can be paginated and sorted.", + "description": "Get roles in a paginated manner.\nThe role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe query roles API does not retrieve roles that are defined in roles files, nor built-in ones.\nYou can optionally filter the results with a query.\nAlso, the results can be paginated and sorted.\n ##Required authorization\n* Cluster privileges: `read_security`", "operationId": "security-query-role", "requestBody": { "$ref": "#/components/requestBodies/security.query_role" @@ -38834,7 +38885,7 @@ "security" ], "summary": "Find roles with a query", - "description": "Get roles in a paginated manner.\nThe role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe query roles API does not retrieve roles that are defined in roles files, nor built-in ones.\nYou can optionally filter the results with a query.\nAlso, the results can be paginated and sorted.", + "description": "Get roles in a paginated manner.\nThe role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe query roles API does not retrieve roles that are defined in roles files, nor built-in ones.\nYou can optionally filter the results with a query.\nAlso, the results can be paginated and sorted.\n ##Required authorization\n* Cluster privileges: `read_security`", "operationId": "security-query-role-1", "requestBody": { "$ref": "#/components/requestBodies/security.query_role" @@ -38859,7 +38910,7 @@ "security" ], "summary": "Find users with a query", - "description": "Get information for users in a paginated manner.\nYou can optionally filter the results with a query.\n\nNOTE: As opposed to the get user API, built-in users are excluded from the result.\nThis API is only for native users.", + "description": "Get information for users in a paginated manner.\nYou can optionally filter the results with a query.\n\nNOTE: As opposed to the get user API, built-in users are excluded from the result.\nThis API is only for native users.\n ##Required authorization\n* Cluster privileges: `read_security`", "operationId": "security-query-user", "parameters": [ { @@ -38887,7 +38938,7 @@ "security" ], "summary": "Find users with a query", - "description": "Get information for users in a paginated manner.\nYou can optionally filter the results with a query.\n\nNOTE: As opposed to the get user API, built-in users are excluded from the result.\nThis API is only for native users.", + "description": "Get information for users in a paginated manner.\nYou can optionally filter the results with a query.\n\nNOTE: As opposed to the get user API, built-in users are excluded from the result.\nThis API is only for native users.\n ##Required authorization\n* Cluster privileges: `read_security`", "operationId": "security-query-user-1", "parameters": [ { @@ -39403,7 +39454,7 @@ "security" ], "summary": "Suggest a user profile", - "description": "Get suggestions for user profiles that match specified search criteria.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions.\nIndividual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.", + "description": "Get suggestions for user profiles that match specified search criteria.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions.\nIndividual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.\n ##Required authorization\n* Cluster privileges: `read_security`", "operationId": "security-suggest-user-profiles", "parameters": [ { @@ -39431,7 +39482,7 @@ "security" ], "summary": "Suggest a user profile", - "description": "Get suggestions for user profiles that match specified search criteria.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions.\nIndividual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.", + "description": "Get suggestions for user profiles that match specified search criteria.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions.\nIndividual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.\n ##Required authorization\n* Cluster privileges: `read_security`", "operationId": "security-suggest-user-profiles-1", "parameters": [ { @@ -39461,7 +39512,7 @@ "security" ], "summary": "Update an API key", - "description": "Update attributes of an existing API key.\nThis API supports updates to an API key's access scope, expiration, and metadata.\n\nTo use this API, you must have at least the `manage_own_api_key` cluster privilege.\nUsers can only update API keys that they created or that were granted to them.\nTo update another user’s API key, use the `run_as` feature to submit a request on behalf of another user.\n\nIMPORTANT: It's not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required.\n\nUse this API to update API keys created by the create API key or grant API Key APIs.\nIf you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead.\nIt's not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API.\n\nThe access scope of an API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request.\nThe snapshot of the owner's permissions is updated automatically on every call.\n\nIMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change the API key's access scope.\nThis change can occur if the owner user's permissions have changed since the API key was created or last modified.", + "description": "Update attributes of an existing API key.\nThis API supports updates to an API key's access scope, expiration, and metadata.\n\nTo use this API, you must have at least the `manage_own_api_key` cluster privilege.\nUsers can only update API keys that they created or that were granted to them.\nTo update another user’s API key, use the `run_as` feature to submit a request on behalf of another user.\n\nIMPORTANT: It's not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required.\n\nUse this API to update API keys created by the create API key or grant API Key APIs.\nIf you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead.\nIt's not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API.\n\nThe access scope of an API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request.\nThe snapshot of the owner's permissions is updated automatically on every call.\n\nIMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change the API key's access scope.\nThis change can occur if the owner user's permissions have changed since the API key was created or last modified.\n ##Required authorization\n* Cluster privileges: `manage_own_api_key`", "operationId": "security-update-api-key", "parameters": [ { @@ -39555,7 +39606,7 @@ "security" ], "summary": "Update a cross-cluster API key", - "description": "Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access.\n\nTo use this API, you must have at least the `manage_security` cluster privilege.\nUsers can only update API keys that they created.\nTo update another user's API key, use the `run_as` feature to submit a request on behalf of another user.\n\nIMPORTANT: It's not possible to use an API key as the authentication credential for this API.\nTo update an API key, the owner user's credentials are required.\n\nIt's not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API.\n\nThis API supports updates to an API key's access scope, metadata, and expiration.\nThe owner user's information, such as the `username` and `realm`, is also updated automatically on every call.\n\nNOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API.", + "description": "Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access.\n\nTo use this API, you must have at least the `manage_security` cluster privilege.\nUsers can only update API keys that they created.\nTo update another user's API key, use the `run_as` feature to submit a request on behalf of another user.\n\nIMPORTANT: It's not possible to use an API key as the authentication credential for this API.\nTo update an API key, the owner user's credentials are required.\n\nIt's not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API.\n\nThis API supports updates to an API key's access scope, metadata, and expiration.\nThe owner user's information, such as the `username` and `realm`, is also updated automatically on every call.\n\nNOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API.\n ##Required authorization\n* Cluster privileges: `manage_security`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/remote-clusters/remote-clusters-api-key" }, @@ -39645,7 +39696,7 @@ "security" ], "summary": "Update user profile data", - "description": "Update specific data for the user profile that is associated with a unique ID.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions.\nIndividual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.\n\nTo use this API, you must have one of the following privileges:\n\n* The `manage_user_profile` cluster privilege.\n* The `update_profile_data` global privilege for the namespaces that are referenced in the request.\n\nThis API updates the `labels` and `data` fields of an existing user profile document with JSON objects.\nNew keys and their values are added to the profile document and conflicting keys are replaced by data that's included in the request.\n\nFor both labels and data, content is namespaced by the top-level fields.\nThe `update_profile_data` global privilege grants privileges for updating only the allowed namespaces.", + "description": "Update specific data for the user profile that is associated with a unique ID.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions.\nIndividual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.\n\nTo use this API, you must have one of the following privileges:\n\n* The `manage_user_profile` cluster privilege.\n* The `update_profile_data` global privilege for the namespaces that are referenced in the request.\n\nThis API updates the `labels` and `data` fields of an existing user profile document with JSON objects.\nNew keys and their values are added to the profile document and conflicting keys are replaced by data that's included in the request.\n\nFor both labels and data, content is namespaced by the top-level fields.\nThe `update_profile_data` global privilege grants privileges for updating only the allowed namespaces.\n ##Required authorization\n* Cluster privileges: `manage_user_profile`", "operationId": "security-update-user-profile-data", "parameters": [ { @@ -39682,7 +39733,7 @@ "security" ], "summary": "Update user profile data", - "description": "Update specific data for the user profile that is associated with a unique ID.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions.\nIndividual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.\n\nTo use this API, you must have one of the following privileges:\n\n* The `manage_user_profile` cluster privilege.\n* The `update_profile_data` global privilege for the namespaces that are referenced in the request.\n\nThis API updates the `labels` and `data` fields of an existing user profile document with JSON objects.\nNew keys and their values are added to the profile document and conflicting keys are replaced by data that's included in the request.\n\nFor both labels and data, content is namespaced by the top-level fields.\nThe `update_profile_data` global privilege grants privileges for updating only the allowed namespaces.", + "description": "Update specific data for the user profile that is associated with a unique ID.\n\nNOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions.\nIndividual users and external applications should not call this API directly.\nElastic reserves the right to change or remove this feature in future releases without prior notice.\n\nTo use this API, you must have one of the following privileges:\n\n* The `manage_user_profile` cluster privilege.\n* The `update_profile_data` global privilege for the namespaces that are referenced in the request.\n\nThis API updates the `labels` and `data` fields of an existing user profile document with JSON objects.\nNew keys and their values are added to the profile document and conflicting keys are replaced by data that's included in the request.\n\nFor both labels and data, content is namespaced by the top-level fields.\nThe `update_profile_data` global privilege grants privileges for updating only the allowed namespaces.\n ##Required authorization\n* Cluster privileges: `manage_user_profile`", "operationId": "security-update-user-profile-data-1", "parameters": [ { @@ -39721,7 +39772,7 @@ "shutdown" ], "summary": "Get the shutdown status", - "description": "Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled.\nThe API returns status information for each part of the shut down process.\n\nNOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.\n\nIf the operator privileges feature is enabled, you must be an operator to use this API.", + "description": "Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled.\nThe API returns status information for each part of the shut down process.\n\nNOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.\n\nIf the operator privileges feature is enabled, you must be an operator to use this API.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "shutdown-get-node-1", "parameters": [ { @@ -39749,7 +39800,7 @@ "shutdown" ], "summary": "Prepare a node to be shut down", - "description": "NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.\n\nIf you specify a node that is offline, it will be prepared for shut down when it rejoins the cluster.\n\nIf the operator privileges feature is enabled, you must be an operator to use this API.\n\nThe API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster.\nThis ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster.\n\nYou must specify the type of shutdown: `restart`, `remove`, or `replace`.\nIf a node is already being prepared for shutdown, you can use this API to change the shutdown type.\n\nIMPORTANT: This API does NOT terminate the Elasticsearch process.\nMonitor the node shutdown status to determine when it is safe to stop Elasticsearch.", + "description": "NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.\n\nIf you specify a node that is offline, it will be prepared for shut down when it rejoins the cluster.\n\nIf the operator privileges feature is enabled, you must be an operator to use this API.\n\nThe API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster.\nThis ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster.\n\nYou must specify the type of shutdown: `restart`, `remove`, or `replace`.\nIf a node is already being prepared for shutdown, you can use this API to change the shutdown type.\n\nIMPORTANT: This API does NOT terminate the Elasticsearch process.\nMonitor the node shutdown status to determine when it is safe to stop Elasticsearch.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "shutdown-put-node", "parameters": [ { @@ -39846,7 +39897,7 @@ "shutdown" ], "summary": "Cancel node shutdown preparations", - "description": "Remove a node from the shutdown list so it can resume normal operations.\nYou must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster.\nShutdown requests are never removed automatically by Elasticsearch.\n\nNOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes.\nDirect use is not supported.\n\nIf the operator privileges feature is enabled, you must be an operator to use this API.", + "description": "Remove a node from the shutdown list so it can resume normal operations.\nYou must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster.\nShutdown requests are never removed automatically by Elasticsearch.\n\nNOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes.\nDirect use is not supported.\n\nIf the operator privileges feature is enabled, you must be an operator to use this API.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "shutdown-delete-node", "parameters": [ { @@ -39914,7 +39965,7 @@ "shutdown" ], "summary": "Get the shutdown status", - "description": "Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled.\nThe API returns status information for each part of the shut down process.\n\nNOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.\n\nIf the operator privileges feature is enabled, you must be an operator to use this API.", + "description": "Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled.\nThe API returns status information for each part of the shut down process.\n\nNOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.\n\nIf the operator privileges feature is enabled, you must be an operator to use this API.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "shutdown-get-node", "parameters": [ { @@ -39941,7 +39992,7 @@ "ingest" ], "summary": "Simulate data ingestion", - "description": "Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index.\n\nThis API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch.\n\nThe API runs the default and final pipeline for that index against a set of documents provided in the body of the request.\nIf a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would.\nNo data is indexed into Elasticsearch.\nInstead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation.\nThe transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result.\n\nThis API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline.\nThe simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index.\n\nBy default, the pipeline definitions that are currently in the system are used.\nHowever, you can supply substitute pipeline definitions in the body of the request.\nThese will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request.", + "description": "Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index.\n\nThis API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch.\n\nThe API runs the default and final pipeline for that index against a set of documents provided in the body of the request.\nIf a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would.\nNo data is indexed into Elasticsearch.\nInstead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation.\nThe transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result.\n\nThis API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline.\nThe simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index.\n\nBy default, the pipeline definitions that are currently in the system are used.\nHowever, you can supply substitute pipeline definitions in the body of the request.\nThese will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request.\n ##Required authorization\n* Index privileges: `index`", "operationId": "simulate-ingest", "parameters": [ { @@ -39969,7 +40020,7 @@ "ingest" ], "summary": "Simulate data ingestion", - "description": "Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index.\n\nThis API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch.\n\nThe API runs the default and final pipeline for that index against a set of documents provided in the body of the request.\nIf a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would.\nNo data is indexed into Elasticsearch.\nInstead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation.\nThe transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result.\n\nThis API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline.\nThe simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index.\n\nBy default, the pipeline definitions that are currently in the system are used.\nHowever, you can supply substitute pipeline definitions in the body of the request.\nThese will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request.", + "description": "Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index.\n\nThis API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch.\n\nThe API runs the default and final pipeline for that index against a set of documents provided in the body of the request.\nIf a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would.\nNo data is indexed into Elasticsearch.\nInstead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation.\nThe transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result.\n\nThis API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline.\nThe simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index.\n\nBy default, the pipeline definitions that are currently in the system are used.\nHowever, you can supply substitute pipeline definitions in the body of the request.\nThese will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request.\n ##Required authorization\n* Index privileges: `index`", "operationId": "simulate-ingest-1", "parameters": [ { @@ -39999,7 +40050,7 @@ "ingest" ], "summary": "Simulate data ingestion", - "description": "Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index.\n\nThis API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch.\n\nThe API runs the default and final pipeline for that index against a set of documents provided in the body of the request.\nIf a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would.\nNo data is indexed into Elasticsearch.\nInstead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation.\nThe transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result.\n\nThis API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline.\nThe simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index.\n\nBy default, the pipeline definitions that are currently in the system are used.\nHowever, you can supply substitute pipeline definitions in the body of the request.\nThese will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request.", + "description": "Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index.\n\nThis API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch.\n\nThe API runs the default and final pipeline for that index against a set of documents provided in the body of the request.\nIf a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would.\nNo data is indexed into Elasticsearch.\nInstead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation.\nThe transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result.\n\nThis API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline.\nThe simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index.\n\nBy default, the pipeline definitions that are currently in the system are used.\nHowever, you can supply substitute pipeline definitions in the body of the request.\nThese will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request.\n ##Required authorization\n* Index privileges: `index`", "operationId": "simulate-ingest-2", "parameters": [ { @@ -40030,7 +40081,7 @@ "ingest" ], "summary": "Simulate data ingestion", - "description": "Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index.\n\nThis API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch.\n\nThe API runs the default and final pipeline for that index against a set of documents provided in the body of the request.\nIf a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would.\nNo data is indexed into Elasticsearch.\nInstead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation.\nThe transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result.\n\nThis API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline.\nThe simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index.\n\nBy default, the pipeline definitions that are currently in the system are used.\nHowever, you can supply substitute pipeline definitions in the body of the request.\nThese will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request.", + "description": "Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index.\n\nThis API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch.\n\nThe API runs the default and final pipeline for that index against a set of documents provided in the body of the request.\nIf a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would.\nNo data is indexed into Elasticsearch.\nInstead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation.\nThe transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result.\n\nThis API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline.\nThe simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index.\n\nBy default, the pipeline definitions that are currently in the system are used.\nHowever, you can supply substitute pipeline definitions in the body of the request.\nThese will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request.\n ##Required authorization\n* Index privileges: `index`", "operationId": "simulate-ingest-3", "parameters": [ { @@ -40063,7 +40114,7 @@ "slm" ], "summary": "Get policy information", - "description": "Get snapshot lifecycle policy definitions and information about the latest snapshot attempts.", + "description": "Get snapshot lifecycle policy definitions and information about the latest snapshot attempts.\n ##Required authorization\n* Cluster privileges: `manage_slm`", "operationId": "slm-get-lifecycle", "parameters": [ { @@ -40094,7 +40145,7 @@ "slm" ], "summary": "Create or update a policy", - "description": "Create or update a snapshot lifecycle policy.\nIf the policy already exists, this request increments the policy version.\nOnly the latest version of a policy is stored.", + "description": "Create or update a snapshot lifecycle policy.\nIf the policy already exists, this request increments the policy version.\nOnly the latest version of a policy is stored.\n ##Required authorization\n* Index privileges: `manage`* Cluster privileges: `manage_slm`", "operationId": "slm-put-lifecycle", "parameters": [ { @@ -40193,7 +40244,7 @@ "slm" ], "summary": "Delete a policy", - "description": "Delete a snapshot lifecycle policy definition.\nThis operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots.", + "description": "Delete a snapshot lifecycle policy definition.\nThis operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots.\n ##Required authorization\n* Cluster privileges: `manage_slm`", "operationId": "slm-delete-lifecycle", "parameters": [ { @@ -40249,7 +40300,7 @@ "slm" ], "summary": "Run a policy", - "description": "Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time.\nThe snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance.", + "description": "Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time.\nThe snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance.\n ##Required authorization\n* Cluster privileges: `manage_slm`", "operationId": "slm-execute-lifecycle", "parameters": [ { @@ -40325,7 +40376,7 @@ "slm" ], "summary": "Run a retention policy", - "description": "Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules.\nThe retention policy is normally applied according to its schedule.", + "description": "Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules.\nThe retention policy is normally applied according to its schedule.\n ##Required authorization\n* Cluster privileges: `manage_slm`", "operationId": "slm-execute-retention", "parameters": [ { @@ -40370,7 +40421,7 @@ "slm" ], "summary": "Get policy information", - "description": "Get snapshot lifecycle policy definitions and information about the latest snapshot attempts.", + "description": "Get snapshot lifecycle policy definitions and information about the latest snapshot attempts.\n ##Required authorization\n* Cluster privileges: `manage_slm`", "operationId": "slm-get-lifecycle-1", "parameters": [ { @@ -40400,7 +40451,7 @@ "slm" ], "summary": "Get snapshot lifecycle management statistics", - "description": "Get global and policy-level statistics about actions taken by snapshot lifecycle management.", + "description": "Get global and policy-level statistics about actions taken by snapshot lifecycle management.\n ##Required authorization\n* Cluster privileges: `manage_slm`", "operationId": "slm-get-stats", "parameters": [ { @@ -40504,6 +40555,7 @@ "slm" ], "summary": "Get the snapshot lifecycle management status", + "description": "\n ##Required authorization\n* Cluster privileges: `read_slm`", "operationId": "slm-get-status", "parameters": [ { @@ -40568,7 +40620,7 @@ "slm" ], "summary": "Start snapshot lifecycle management", - "description": "Snapshot lifecycle management (SLM) starts automatically when a cluster is formed.\nManually starting SLM is necessary only if it has been stopped using the stop SLM API.", + "description": "Snapshot lifecycle management (SLM) starts automatically when a cluster is formed.\nManually starting SLM is necessary only if it has been stopped using the stop SLM API.\n ##Required authorization\n* Cluster privileges: `manage_slm`", "operationId": "slm-start", "parameters": [ { @@ -40670,7 +40722,7 @@ "snapshot" ], "summary": "Clean up the snapshot repository", - "description": "Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots.", + "description": "Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots.\n ##Required authorization\n* Cluster privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/tools/snapshot-and-restore/self-managed#snapshots-repository-cleanup" }, @@ -40749,7 +40801,7 @@ "snapshot" ], "summary": "Clone a snapshot", - "description": "Clone part of all of a snapshot into another snapshot in the same repository.", + "description": "Clone part of all of a snapshot into another snapshot in the same repository.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "snapshot-clone", "parameters": [ { @@ -40848,7 +40900,7 @@ "snapshot" ], "summary": "Get snapshot information", - "description": "NOTE: The `after` parameter and `next` field enable you to iterate through snapshots with some consistency guarantees regarding concurrent creation or deletion of snapshots.\nIt is guaranteed that any snapshot that exists at the beginning of the iteration and is not concurrently deleted will be seen during the iteration.\nSnapshots concurrently created may be seen during an iteration.", + "description": "NOTE: The `after` parameter and `next` field enable you to iterate through snapshots with some consistency guarantees regarding concurrent creation or deletion of snapshots.\nIt is guaranteed that any snapshot that exists at the beginning of the iteration and is not concurrently deleted will be seen during the iteration.\nSnapshots concurrently created may be seen during an iteration.\n ##Required authorization\n* Cluster privileges: `monitor_snapshot`", "operationId": "snapshot-get", "parameters": [ { @@ -41067,7 +41119,7 @@ "snapshot" ], "summary": "Create a snapshot", - "description": "Take a snapshot of a cluster or of data streams and indices.", + "description": "Take a snapshot of a cluster or of data streams and indices.\n ##Required authorization\n* Cluster privileges: `create_snapshot`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/tools/snapshot-and-restore/create-snapshots" }, @@ -41107,7 +41159,7 @@ "snapshot" ], "summary": "Create a snapshot", - "description": "Take a snapshot of a cluster or of data streams and indices.", + "description": "Take a snapshot of a cluster or of data streams and indices.\n ##Required authorization\n* Cluster privileges: `create_snapshot`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/tools/snapshot-and-restore/create-snapshots" }, @@ -41147,6 +41199,7 @@ "snapshot" ], "summary": "Delete snapshots", + "description": "\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "snapshot-delete", "parameters": [ { @@ -41215,6 +41268,7 @@ "snapshot" ], "summary": "Get snapshot repository information", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_snapshot`", "operationId": "snapshot-get-repository-1", "parameters": [ { @@ -41245,7 +41299,7 @@ "snapshot" ], "summary": "Create or update a snapshot repository", - "description": "IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters.\nTo register a snapshot repository, the cluster's global metadata must be writeable.\nEnsure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access.\n\nSeveral options for this API can be specified using a query parameter or a request body parameter.\nIf both parameters are specified, only the query parameter is used.", + "description": "IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters.\nTo register a snapshot repository, the cluster's global metadata must be writeable.\nEnsure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access.\n\nSeveral options for this API can be specified using a query parameter or a request body parameter.\nIf both parameters are specified, only the query parameter is used.\n ##Required authorization\n* Cluster privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/tools/snapshot-and-restore/self-managed" }, @@ -41285,7 +41339,7 @@ "snapshot" ], "summary": "Create or update a snapshot repository", - "description": "IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters.\nTo register a snapshot repository, the cluster's global metadata must be writeable.\nEnsure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access.\n\nSeveral options for this API can be specified using a query parameter or a request body parameter.\nIf both parameters are specified, only the query parameter is used.", + "description": "IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters.\nTo register a snapshot repository, the cluster's global metadata must be writeable.\nEnsure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access.\n\nSeveral options for this API can be specified using a query parameter or a request body parameter.\nIf both parameters are specified, only the query parameter is used.\n ##Required authorization\n* Cluster privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/tools/snapshot-and-restore/self-managed" }, @@ -41325,7 +41379,7 @@ "snapshot" ], "summary": "Delete snapshot repositories", - "description": "When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots.\nThe snapshots themselves are left untouched and in place.", + "description": "When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots.\nThe snapshots themselves are left untouched and in place.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "snapshot-delete-repository", "parameters": [ { @@ -41381,6 +41435,7 @@ "snapshot" ], "summary": "Get snapshot repository information", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_snapshot`", "operationId": "snapshot-get-repository", "parameters": [ { @@ -41410,7 +41465,7 @@ "snapshot" ], "summary": "Analyze a snapshot repository", - "description": "Performs operations on a snapshot repository in order to check for incorrect behaviour.\n\nThere are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch.\nSome storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do.\nThis API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system.\n\nThe default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations.\nRun your first analysis with the default parameter values to check for simple problems.\nSome repositories may behave correctly when lightly loaded but incorrectly under production-like workloads.\nIf the first analysis is successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of at least `100`.\nAlways specify a generous timeout, possibly `1h` or longer, to allow time for each analysis to run to completion.\nSome repositories may behave correctly when accessed by a small number of Elasticsearch nodes but incorrectly when accessed concurrently by a production-scale cluster.\nPerform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once.\n\nIf the analysis fails, Elasticsearch detected that your repository behaved unexpectedly.\nThis usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support.\nIf so, this storage system is not suitable for use as a snapshot repository.\nRepository analysis triggers conditions that occur only rarely when taking snapshots in a production system.\nSnapshotting to unsuitable storage may appear to work correctly most of the time despite repository analysis failures.\nHowever your snapshot data is at risk if you store it in a snapshot repository that does not reliably pass repository analysis.\nYou can demonstrate that the analysis failure is due to an incompatible storage implementation by verifying that Elasticsearch does not detect the same problem when analysing the reference implementation of the storage protocol you are using.\nFor instance, if you are using storage that offers an API which the supplier claims to be compatible with AWS S3, verify that repositories in AWS S3 do not fail repository analysis.\nThis allows you to demonstrate to your storage supplier that a repository analysis failure must only be caused by an incompatibility with AWS S3 and cannot be attributed to a problem in Elasticsearch.\nPlease do not report Elasticsearch issues involving third-party storage systems unless you can demonstrate that the same issue exists when analysing a repository that uses the reference implementation of the same storage protocol.\nYou will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects.\n\nIf the analysis is successful, the API returns details of the testing process, optionally including how long each operation took.\nYou can use this information to determine the performance of your storage system.\nIf any operation fails or returns an incorrect result, the API returns an error.\nIf the API returns an error, it may not have removed all the data it wrote to the repository.\nThe error will indicate the location of any leftover data and this path is also recorded in the Elasticsearch logs.\nYou should verify that this location has been cleaned up correctly.\nIf there is still leftover data at the specified location, you should manually remove it.\n\nIf the connection from your client to Elasticsearch is closed while the client is waiting for the result of the analysis, the test is cancelled.\nSome clients are configured to close their connection if no response is received within a certain timeout.\nAn analysis takes a long time to complete so you might need to relax any such client-side timeouts.\nOn cancellation the analysis attempts to clean up the data it was writing, but it may not be able to remove it all.\nThe path to the leftover data is recorded in the Elasticsearch logs.\nYou should verify that this location has been cleaned up correctly.\nIf there is still leftover data at the specified location, you should manually remove it.\n\nIf the analysis is successful then it detected no incorrect behaviour, but this does not mean that correct behaviour is guaranteed.\nThe analysis attempts to detect common bugs but it does not offer 100% coverage.\nAdditionally, it does not test the following:\n\n* Your repository must perform durable writes. Once a blob has been written it must remain in place until it is deleted, even after a power loss or similar disaster.\n* Your repository must not suffer from silent data corruption. Once a blob has been written, its contents must remain unchanged until it is deliberately modified or deleted.\n* Your repository must behave correctly even if connectivity from the cluster is disrupted. Reads and writes may fail in this case, but they must not return incorrect results.\n\nIMPORTANT: An analysis writes a substantial amount of data to your repository and then reads it back again.\nThis consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself.\nYou must ensure this load does not affect other users of these systems.\nAnalyses respect the repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` if available and the cluster setting `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth they consume.\n\nNOTE: This API is intended for exploratory use by humans.\nYou should expect the request parameters and the response format to vary in future versions.\nThe response exposes immplementation details of the analysis which may change from version to version.\n\nNOTE: Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones.\nA storage system that passes repository analysis with one version of Elasticsearch may fail with a different version.\nThis indicates it behaves incorrectly in ways that the former version did not detect.\nYou must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch.\n\nNOTE: This API may not work correctly in a mixed-version cluster.\n\n*Implementation details*\n\nNOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions.\nThe request parameters and response format depend on details of the implementation so may also be different in newer versions.\n\nThe analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter and a number of compare-and-exchange operations on linearizable registers, as set by the `register_operation_count` parameter.\nThese tasks are distributed over the data and master-eligible nodes in the cluster for execution.\n\nFor most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote.\nThe size of the blob is chosen randomly, according to the `max_blob_size` and `max_total_data_size` parameters.\nIf any of these reads fails then the repository does not implement the necessary read-after-write semantics that Elasticsearch requires.\n\nFor some blob-level tasks, the executing node will instruct some of its peers to attempt to read the data before the writing process completes.\nThese reads are permitted to fail, but must not return partial data.\nIf any read returns partial data then the repository does not implement the necessary atomicity semantics that Elasticsearch requires.\n\nFor some blob-level tasks, the executing node will overwrite the blob while its peers are reading it.\nIn this case the data read may come from either the original or the overwritten blob, but the read operation must not return partial data or a mix of data from the two blobs.\nIf any of these reads returns partial data or a mix of the two blobs then the repository does not implement the necessary atomicity semantics that Elasticsearch requires for overwrites.\n\nThe executing node will use a variety of different methods to write the blob.\nFor instance, where applicable, it will use both single-part and multi-part uploads.\nSimilarly, the reading nodes will use a variety of different methods to read the data back again.\nFor instance they may read the entire blob from start to end or may read only a subset of the data.\n\nFor some blob-level tasks, the executing node will cancel the write before it is complete.\nIn this case, it still instructs some of the other nodes in the cluster to attempt to read the blob but all of these reads must fail to find the blob.\n\nLinearizable registers are special blobs that Elasticsearch manipulates using an atomic compare-and-exchange operation.\nThis operation ensures correct and strongly-consistent behavior even when the blob is accessed by multiple nodes at the same time.\nThe detailed implementation of the compare-and-exchange operation on linearizable registers varies by repository type.\nRepository analysis verifies that that uncontended compare-and-exchange operations on a linearizable register blob always succeed.\nRepository analysis also verifies that contended operations either succeed or report the contention but do not return incorrect results.\nIf an operation fails due to contention, Elasticsearch retries the operation until it succeeds.\nMost of the compare-and-exchange operations performed by repository analysis atomically increment a counter which is represented as an 8-byte blob.\nSome operations also verify the behavior on small blobs with sizes other than 8 bytes.", + "description": "Performs operations on a snapshot repository in order to check for incorrect behaviour.\n\nThere are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch.\nSome storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do.\nThis API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system.\n\nThe default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations.\nRun your first analysis with the default parameter values to check for simple problems.\nSome repositories may behave correctly when lightly loaded but incorrectly under production-like workloads.\nIf the first analysis is successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of at least `100`.\nAlways specify a generous timeout, possibly `1h` or longer, to allow time for each analysis to run to completion.\nSome repositories may behave correctly when accessed by a small number of Elasticsearch nodes but incorrectly when accessed concurrently by a production-scale cluster.\nPerform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once.\n\nIf the analysis fails, Elasticsearch detected that your repository behaved unexpectedly.\nThis usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support.\nIf so, this storage system is not suitable for use as a snapshot repository.\nRepository analysis triggers conditions that occur only rarely when taking snapshots in a production system.\nSnapshotting to unsuitable storage may appear to work correctly most of the time despite repository analysis failures.\nHowever your snapshot data is at risk if you store it in a snapshot repository that does not reliably pass repository analysis.\nYou can demonstrate that the analysis failure is due to an incompatible storage implementation by verifying that Elasticsearch does not detect the same problem when analysing the reference implementation of the storage protocol you are using.\nFor instance, if you are using storage that offers an API which the supplier claims to be compatible with AWS S3, verify that repositories in AWS S3 do not fail repository analysis.\nThis allows you to demonstrate to your storage supplier that a repository analysis failure must only be caused by an incompatibility with AWS S3 and cannot be attributed to a problem in Elasticsearch.\nPlease do not report Elasticsearch issues involving third-party storage systems unless you can demonstrate that the same issue exists when analysing a repository that uses the reference implementation of the same storage protocol.\nYou will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects.\n\nIf the analysis is successful, the API returns details of the testing process, optionally including how long each operation took.\nYou can use this information to determine the performance of your storage system.\nIf any operation fails or returns an incorrect result, the API returns an error.\nIf the API returns an error, it may not have removed all the data it wrote to the repository.\nThe error will indicate the location of any leftover data and this path is also recorded in the Elasticsearch logs.\nYou should verify that this location has been cleaned up correctly.\nIf there is still leftover data at the specified location, you should manually remove it.\n\nIf the connection from your client to Elasticsearch is closed while the client is waiting for the result of the analysis, the test is cancelled.\nSome clients are configured to close their connection if no response is received within a certain timeout.\nAn analysis takes a long time to complete so you might need to relax any such client-side timeouts.\nOn cancellation the analysis attempts to clean up the data it was writing, but it may not be able to remove it all.\nThe path to the leftover data is recorded in the Elasticsearch logs.\nYou should verify that this location has been cleaned up correctly.\nIf there is still leftover data at the specified location, you should manually remove it.\n\nIf the analysis is successful then it detected no incorrect behaviour, but this does not mean that correct behaviour is guaranteed.\nThe analysis attempts to detect common bugs but it does not offer 100% coverage.\nAdditionally, it does not test the following:\n\n* Your repository must perform durable writes. Once a blob has been written it must remain in place until it is deleted, even after a power loss or similar disaster.\n* Your repository must not suffer from silent data corruption. Once a blob has been written, its contents must remain unchanged until it is deliberately modified or deleted.\n* Your repository must behave correctly even if connectivity from the cluster is disrupted. Reads and writes may fail in this case, but they must not return incorrect results.\n\nIMPORTANT: An analysis writes a substantial amount of data to your repository and then reads it back again.\nThis consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself.\nYou must ensure this load does not affect other users of these systems.\nAnalyses respect the repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` if available and the cluster setting `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth they consume.\n\nNOTE: This API is intended for exploratory use by humans.\nYou should expect the request parameters and the response format to vary in future versions.\nThe response exposes immplementation details of the analysis which may change from version to version.\n\nNOTE: Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones.\nA storage system that passes repository analysis with one version of Elasticsearch may fail with a different version.\nThis indicates it behaves incorrectly in ways that the former version did not detect.\nYou must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch.\n\nNOTE: This API may not work correctly in a mixed-version cluster.\n\n*Implementation details*\n\nNOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions.\nThe request parameters and response format depend on details of the implementation so may also be different in newer versions.\n\nThe analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter and a number of compare-and-exchange operations on linearizable registers, as set by the `register_operation_count` parameter.\nThese tasks are distributed over the data and master-eligible nodes in the cluster for execution.\n\nFor most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote.\nThe size of the blob is chosen randomly, according to the `max_blob_size` and `max_total_data_size` parameters.\nIf any of these reads fails then the repository does not implement the necessary read-after-write semantics that Elasticsearch requires.\n\nFor some blob-level tasks, the executing node will instruct some of its peers to attempt to read the data before the writing process completes.\nThese reads are permitted to fail, but must not return partial data.\nIf any read returns partial data then the repository does not implement the necessary atomicity semantics that Elasticsearch requires.\n\nFor some blob-level tasks, the executing node will overwrite the blob while its peers are reading it.\nIn this case the data read may come from either the original or the overwritten blob, but the read operation must not return partial data or a mix of data from the two blobs.\nIf any of these reads returns partial data or a mix of the two blobs then the repository does not implement the necessary atomicity semantics that Elasticsearch requires for overwrites.\n\nThe executing node will use a variety of different methods to write the blob.\nFor instance, where applicable, it will use both single-part and multi-part uploads.\nSimilarly, the reading nodes will use a variety of different methods to read the data back again.\nFor instance they may read the entire blob from start to end or may read only a subset of the data.\n\nFor some blob-level tasks, the executing node will cancel the write before it is complete.\nIn this case, it still instructs some of the other nodes in the cluster to attempt to read the blob but all of these reads must fail to find the blob.\n\nLinearizable registers are special blobs that Elasticsearch manipulates using an atomic compare-and-exchange operation.\nThis operation ensures correct and strongly-consistent behavior even when the blob is accessed by multiple nodes at the same time.\nThe detailed implementation of the compare-and-exchange operation on linearizable registers varies by repository type.\nRepository analysis verifies that that uncontended compare-and-exchange operations on a linearizable register blob always succeed.\nRepository analysis also verifies that contended operations either succeed or report the contention but do not return incorrect results.\nIf an operation fails due to contention, Elasticsearch retries the operation until it succeeds.\nMost of the compare-and-exchange operations performed by repository analysis atomically increment a counter which is represented as an 8-byte blob.\nSome operations also verify the behavior on small blobs with sizes other than 8 bytes.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "snapshot-repository-analyze", "parameters": [ { @@ -41670,7 +41725,7 @@ "snapshot" ], "summary": "Verify the repository integrity", - "description": "Verify the integrity of the contents of a snapshot repository.\n\nThis API enables you to perform a comprehensive check of the contents of a repository, looking for any anomalies in its data or metadata which might prevent you from restoring snapshots from the repository or which might cause future snapshot create or delete operations to fail.\n\nIf you suspect the integrity of the contents of one of your snapshot repositories, cease all write activity to this repository immediately, set its `read_only` option to `true`, and use this API to verify its integrity.\nUntil you do so:\n\n* It may not be possible to restore some snapshots from this repository.\n* Searchable snapshots may report errors when searched or may have unassigned shards.\n* Taking snapshots into this repository may fail or may appear to succeed but have created a snapshot which cannot be restored.\n* Deleting snapshots from this repository may fail or may appear to succeed but leave the underlying data on disk.\n* Continuing to write to the repository while it is in an invalid state may causing additional damage to its contents.\n\nIf the API finds any problems with the integrity of the contents of your repository, Elasticsearch will not be able to repair the damage.\nThe only way to bring the repository back into a fully working state after its contents have been damaged is by restoring its contents from a repository backup which was taken before the damage occurred.\nYou must also identify what caused the damage and take action to prevent it from happening again.\n\nIf you cannot restore a repository backup, register a new repository and use this for all future snapshot operations.\nIn some cases it may be possible to recover some of the contents of a damaged repository, either by restoring as many of its snapshots as needed and taking new snapshots of the restored data, or by using the reindex API to copy data from any searchable snapshots mounted from the damaged repository.\n\nAvoid all operations which write to the repository while the verify repository integrity API is running.\nIf something changes the repository contents while an integrity verification is running then Elasticsearch may incorrectly report having detected some anomalies in its contents due to the concurrent writes.\nIt may also incorrectly fail to report some anomalies that the concurrent writes prevented it from detecting.\n\nNOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions.\n\nNOTE: This API may not work correctly in a mixed-version cluster.\n\nThe default values for the parameters of this API are designed to limit the impact of the integrity verification on other activities in your cluster.\nFor instance, by default it will only use at most half of the `snapshot_meta` threads to verify the integrity of each snapshot, allowing other snapshot operations to use the other half of this thread pool.\nIf you modify these parameters to speed up the verification process, you risk disrupting other snapshot-related operations in your cluster.\nFor large repositories, consider setting up a separate single-node Elasticsearch cluster just for running the integrity verification API.\n\nThe response exposes implementation details of the analysis which may change from version to version.\nThe response body format is therefore not considered stable and may be different in newer versions.", + "description": "Verify the integrity of the contents of a snapshot repository.\n\nThis API enables you to perform a comprehensive check of the contents of a repository, looking for any anomalies in its data or metadata which might prevent you from restoring snapshots from the repository or which might cause future snapshot create or delete operations to fail.\n\nIf you suspect the integrity of the contents of one of your snapshot repositories, cease all write activity to this repository immediately, set its `read_only` option to `true`, and use this API to verify its integrity.\nUntil you do so:\n\n* It may not be possible to restore some snapshots from this repository.\n* Searchable snapshots may report errors when searched or may have unassigned shards.\n* Taking snapshots into this repository may fail or may appear to succeed but have created a snapshot which cannot be restored.\n* Deleting snapshots from this repository may fail or may appear to succeed but leave the underlying data on disk.\n* Continuing to write to the repository while it is in an invalid state may causing additional damage to its contents.\n\nIf the API finds any problems with the integrity of the contents of your repository, Elasticsearch will not be able to repair the damage.\nThe only way to bring the repository back into a fully working state after its contents have been damaged is by restoring its contents from a repository backup which was taken before the damage occurred.\nYou must also identify what caused the damage and take action to prevent it from happening again.\n\nIf you cannot restore a repository backup, register a new repository and use this for all future snapshot operations.\nIn some cases it may be possible to recover some of the contents of a damaged repository, either by restoring as many of its snapshots as needed and taking new snapshots of the restored data, or by using the reindex API to copy data from any searchable snapshots mounted from the damaged repository.\n\nAvoid all operations which write to the repository while the verify repository integrity API is running.\nIf something changes the repository contents while an integrity verification is running then Elasticsearch may incorrectly report having detected some anomalies in its contents due to the concurrent writes.\nIt may also incorrectly fail to report some anomalies that the concurrent writes prevented it from detecting.\n\nNOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions.\n\nNOTE: This API may not work correctly in a mixed-version cluster.\n\nThe default values for the parameters of this API are designed to limit the impact of the integrity verification on other activities in your cluster.\nFor instance, by default it will only use at most half of the `snapshot_meta` threads to verify the integrity of each snapshot, allowing other snapshot operations to use the other half of this thread pool.\nIf you modify these parameters to speed up the verification process, you risk disrupting other snapshot-related operations in your cluster.\nFor large repositories, consider setting up a separate single-node Elasticsearch cluster just for running the integrity verification API.\n\nThe response exposes implementation details of the analysis which may change from version to version.\nThe response body format is therefore not considered stable and may be different in newer versions.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "snapshot-repository-verify-integrity", "parameters": [ { @@ -41786,7 +41841,7 @@ "snapshot" ], "summary": "Restore a snapshot", - "description": "Restore a snapshot of a cluster or data streams and indices.\n\nYou can restore a snapshot only to a running cluster with an elected master node.\nThe snapshot repository must be registered and available to the cluster.\nThe snapshot and cluster versions must be compatible.\n\nTo restore a snapshot, the cluster's global metadata must be writable. Ensure there are't any cluster blocks that prevent writes. The restore operation ignores index blocks.\n\nBefore you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API:\n\n```\nGET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream\n```\n\nIf no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices.\n\nIf your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot.", + "description": "Restore a snapshot of a cluster or data streams and indices.\n\nYou can restore a snapshot only to a running cluster with an elected master node.\nThe snapshot repository must be registered and available to the cluster.\nThe snapshot and cluster versions must be compatible.\n\nTo restore a snapshot, the cluster's global metadata must be writable. Ensure there are't any cluster blocks that prevent writes. The restore operation ignores index blocks.\n\nBefore you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API:\n\n```\nGET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream\n```\n\nIf no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices.\n\nIf your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot.\n ##Required authorization\n* Cluster privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/tools/snapshot-and-restore/restore-snapshot" }, @@ -41943,7 +41998,7 @@ "snapshot" ], "summary": "Get the snapshot status", - "description": "Get a detailed description of the current state for each shard participating in the snapshot.\n\nNote that this API should be used only to obtain detailed shard-level information for ongoing snapshots.\nIf this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API.\n\nIf you omit the `` request path parameter, the request retrieves information only for currently running snapshots.\nThis usage is preferred.\nIf needed, you can specify `` and `` to retrieve information for specific snapshots, even if they're not currently running.\n\nWARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive.\nThe API requires a read from the repository for each shard in each snapshot.\nFor example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards).\n\nDepending on the latency of your storage, such requests can take an extremely long time to return results.\nThese requests can also tax machine resources and, when using cloud storage, incur high processing costs.", + "description": "Get a detailed description of the current state for each shard participating in the snapshot.\n\nNote that this API should be used only to obtain detailed shard-level information for ongoing snapshots.\nIf this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API.\n\nIf you omit the `` request path parameter, the request retrieves information only for currently running snapshots.\nThis usage is preferred.\nIf needed, you can specify `` and `` to retrieve information for specific snapshots, even if they're not currently running.\n\nWARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive.\nThe API requires a read from the repository for each shard in each snapshot.\nFor example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards).\n\nDepending on the latency of your storage, such requests can take an extremely long time to return results.\nThese requests can also tax machine resources and, when using cloud storage, incur high processing costs.\n ##Required authorization\n* Cluster privileges: `monitor_snapshot`", "operationId": "snapshot-status", "parameters": [ { @@ -41973,7 +42028,7 @@ "snapshot" ], "summary": "Get the snapshot status", - "description": "Get a detailed description of the current state for each shard participating in the snapshot.\n\nNote that this API should be used only to obtain detailed shard-level information for ongoing snapshots.\nIf this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API.\n\nIf you omit the `` request path parameter, the request retrieves information only for currently running snapshots.\nThis usage is preferred.\nIf needed, you can specify `` and `` to retrieve information for specific snapshots, even if they're not currently running.\n\nWARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive.\nThe API requires a read from the repository for each shard in each snapshot.\nFor example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards).\n\nDepending on the latency of your storage, such requests can take an extremely long time to return results.\nThese requests can also tax machine resources and, when using cloud storage, incur high processing costs.", + "description": "Get a detailed description of the current state for each shard participating in the snapshot.\n\nNote that this API should be used only to obtain detailed shard-level information for ongoing snapshots.\nIf this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API.\n\nIf you omit the `` request path parameter, the request retrieves information only for currently running snapshots.\nThis usage is preferred.\nIf needed, you can specify `` and `` to retrieve information for specific snapshots, even if they're not currently running.\n\nWARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive.\nThe API requires a read from the repository for each shard in each snapshot.\nFor example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards).\n\nDepending on the latency of your storage, such requests can take an extremely long time to return results.\nThese requests can also tax machine resources and, when using cloud storage, incur high processing costs.\n ##Required authorization\n* Cluster privileges: `monitor_snapshot`", "operationId": "snapshot-status-1", "parameters": [ { @@ -42006,7 +42061,7 @@ "snapshot" ], "summary": "Get the snapshot status", - "description": "Get a detailed description of the current state for each shard participating in the snapshot.\n\nNote that this API should be used only to obtain detailed shard-level information for ongoing snapshots.\nIf this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API.\n\nIf you omit the `` request path parameter, the request retrieves information only for currently running snapshots.\nThis usage is preferred.\nIf needed, you can specify `` and `` to retrieve information for specific snapshots, even if they're not currently running.\n\nWARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive.\nThe API requires a read from the repository for each shard in each snapshot.\nFor example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards).\n\nDepending on the latency of your storage, such requests can take an extremely long time to return results.\nThese requests can also tax machine resources and, when using cloud storage, incur high processing costs.", + "description": "Get a detailed description of the current state for each shard participating in the snapshot.\n\nNote that this API should be used only to obtain detailed shard-level information for ongoing snapshots.\nIf this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API.\n\nIf you omit the `` request path parameter, the request retrieves information only for currently running snapshots.\nThis usage is preferred.\nIf needed, you can specify `` and `` to retrieve information for specific snapshots, even if they're not currently running.\n\nWARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive.\nThe API requires a read from the repository for each shard in each snapshot.\nFor example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards).\n\nDepending on the latency of your storage, such requests can take an extremely long time to return results.\nThese requests can also tax machine resources and, when using cloud storage, incur high processing costs.\n ##Required authorization\n* Cluster privileges: `monitor_snapshot`", "operationId": "snapshot-status-2", "parameters": [ { @@ -42042,7 +42097,7 @@ "snapshot" ], "summary": "Verify a snapshot repository", - "description": "Check for common misconfigurations in a snapshot repository.", + "description": "Check for common misconfigurations in a snapshot repository.\n ##Required authorization\n* Cluster privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/tools/snapshot-and-restore/self-managed#snapshots-repository-verification" }, @@ -42174,7 +42229,7 @@ "sql" ], "summary": "Delete an async SQL search", - "description": "Delete an async SQL search or a stored synchronous SQL search.\nIf the search is still running, the API cancels it.\n\nIf the Elasticsearch security features are enabled, only the following users can use this API to delete a search:\n\n* Users with the `cancel_task` cluster privilege.\n* The user who first submitted the search.", + "description": "Delete an async SQL search or a stored synchronous SQL search.\nIf the search is still running, the API cancels it.\n\nIf the Elasticsearch security features are enabled, only the following users can use this API to delete a search:\n\n* Users with the `cancel_task` cluster privilege.\n* The user who first submitted the search.\n ##Required authorization\n* Cluster privileges: `cancel_task`", "operationId": "sql-delete-async", "parameters": [ { @@ -42323,7 +42378,7 @@ "sql" ], "summary": "Get the async SQL search status", - "description": "Get the current status of an async SQL search or a stored synchronous SQL search.", + "description": "Get the current status of an async SQL search or a stored synchronous SQL search.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "sql-get-async-status", "parameters": [ { @@ -42389,7 +42444,7 @@ "sql" ], "summary": "Get SQL search results", - "description": "Run an SQL request.", + "description": "Run an SQL request.\n ##Required authorization\n* Index privileges: `read`", "operationId": "sql-query-1", "parameters": [ { @@ -42417,7 +42472,7 @@ "sql" ], "summary": "Get SQL search results", - "description": "Run an SQL request.", + "description": "Run an SQL request.\n ##Required authorization\n* Index privileges: `read`", "operationId": "sql-query", "parameters": [ { @@ -42447,7 +42502,7 @@ "sql" ], "summary": "Translate SQL into Elasticsearch queries", - "description": "Translate an SQL search into a search API request containing Query DSL.\nIt accepts the same request body parameters as the SQL search API, excluding `cursor`.", + "description": "Translate an SQL search into a search API request containing Query DSL.\nIt accepts the same request body parameters as the SQL search API, excluding `cursor`.\n ##Required authorization\n* Index privileges: `read`", "operationId": "sql-translate-1", "requestBody": { "$ref": "#/components/requestBodies/sql.translate" @@ -42470,7 +42525,7 @@ "sql" ], "summary": "Translate SQL into Elasticsearch queries", - "description": "Translate an SQL search into a search API request containing Query DSL.\nIt accepts the same request body parameters as the SQL search API, excluding `cursor`.", + "description": "Translate an SQL search into a search API request containing Query DSL.\nIt accepts the same request body parameters as the SQL search API, excluding `cursor`.\n ##Required authorization\n* Index privileges: `read`", "operationId": "sql-translate", "requestBody": { "$ref": "#/components/requestBodies/sql.translate" @@ -42495,7 +42550,7 @@ "security" ], "summary": "Get SSL certificates", - "description": "Get information about the X.509 certificates that are used to encrypt communications in the cluster.\nThe API returns a list that includes certificates from all TLS contexts including:\n\n- Settings for transport and HTTP interfaces\n- TLS settings that are used within authentication realms\n- TLS settings for remote monitoring exporters\n\nThe list includes certificates that are used for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` and `xpack.security.transport.ssl.certificate_authorities` settings.\nIt also includes certificates that are used for configuring server identity, such as `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`.\n\nThe list does not include certificates that are sourced from the default SSL context of the Java Runtime Environment (JRE), even if those certificates are in use within Elasticsearch.\n\nNOTE: When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration.\n\nIf Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster.", + "description": "Get information about the X.509 certificates that are used to encrypt communications in the cluster.\nThe API returns a list that includes certificates from all TLS contexts including:\n\n- Settings for transport and HTTP interfaces\n- TLS settings that are used within authentication realms\n- TLS settings for remote monitoring exporters\n\nThe list includes certificates that are used for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` and `xpack.security.transport.ssl.certificate_authorities` settings.\nIt also includes certificates that are used for configuring server identity, such as `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`.\n\nThe list does not include certificates that are sourced from the default SSL context of the Java Runtime Environment (JRE), even if those certificates are in use within Elasticsearch.\n\nNOTE: When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration.\n\nIf Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster.\n ##Required authorization\n* Cluster privileges: `monitor`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/security/set-up-basic-security#encrypt-internode-communication" }, @@ -42536,6 +42591,7 @@ "synonyms" ], "summary": "Get a synonym set", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_search_synonyms`", "operationId": "synonyms-get-synonym", "parameters": [ { @@ -42618,7 +42674,7 @@ "synonyms" ], "summary": "Create or update a synonym set", - "description": "Synonyms sets are limited to a maximum of 10,000 synonym rules per set.\nIf you need to manage more synonym rules, you can create multiple synonym sets.\n\nWhen an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices.\nThis is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set.", + "description": "Synonyms sets are limited to a maximum of 10,000 synonym rules per set.\nIf you need to manage more synonym rules, you can create multiple synonym sets.\n\nWhen an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices.\nThis is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set.\n ##Required authorization\n* Cluster privileges: `manage_search_synonyms`", "operationId": "synonyms-put-synonym", "parameters": [ { @@ -42702,7 +42758,7 @@ "synonyms" ], "summary": "Delete a synonym set", - "description": "You can only delete a synonyms set that is not in use by any index analyzer.\n\nSynonyms sets can be used in synonym graph token filters and synonym token filters.\nThese synonym filters can be used as part of search analyzers.\n\nAnalyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open).\nEven if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase.\n\nIf any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available.\nTo prevent that, synonyms sets that are used in analyzers can't be deleted.\nA delete request in this case will return a 400 response code.\n\nTo remove a synonyms set, you must first remove all indices that contain analyzers using it.\nYou can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data.\nOnce finished, you can delete the index.\nWhen the synonyms set is not used in analyzers, you will be able to delete it.", + "description": "You can only delete a synonyms set that is not in use by any index analyzer.\n\nSynonyms sets can be used in synonym graph token filters and synonym token filters.\nThese synonym filters can be used as part of search analyzers.\n\nAnalyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open).\nEven if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase.\n\nIf any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available.\nTo prevent that, synonyms sets that are used in analyzers can't be deleted.\nA delete request in this case will return a 400 response code.\n\nTo remove a synonyms set, you must first remove all indices that contain analyzers using it.\nYou can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data.\nOnce finished, you can delete the index.\nWhen the synonyms set is not used in analyzers, you will be able to delete it.\n ##Required authorization\n* Cluster privileges: `manage_search_synonyms`", "operationId": "synonyms-delete-synonym", "parameters": [ { @@ -42738,7 +42794,7 @@ "synonyms" ], "summary": "Get a synonym rule", - "description": "Get a synonym rule from a synonym set.", + "description": "Get a synonym rule from a synonym set.\n ##Required authorization\n* Cluster privileges: `manage_search_synonyms`", "operationId": "synonyms-get-synonym-rule", "parameters": [ { @@ -42795,7 +42851,7 @@ "synonyms" ], "summary": "Create or update a synonym rule", - "description": "Create or update a synonym rule in a synonym set.\n\nIf any of the synonym rules included is invalid, the API returns an error.\n\nWhen you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule.", + "description": "Create or update a synonym rule in a synonym set.\n\nIf any of the synonym rules included is invalid, the API returns an error.\n\nWhen you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule.\n ##Required authorization\n* Cluster privileges: `manage_search_synonyms`", "operationId": "synonyms-put-synonym-rule", "parameters": [ { @@ -42887,7 +42943,7 @@ "synonyms" ], "summary": "Delete a synonym rule", - "description": "Delete a synonym rule from a synonym set.", + "description": "Delete a synonym rule from a synonym set.\n ##Required authorization\n* Cluster privileges: `manage_search_synonyms`", "operationId": "synonyms-delete-synonym-rule", "parameters": [ { @@ -42956,7 +43012,7 @@ "synonyms" ], "summary": "Get all synonym sets", - "description": "Get a summary of all defined synonym sets.", + "description": "Get a summary of all defined synonym sets.\n ##Required authorization\n* Cluster privileges: `manage_search_synonyms`", "operationId": "synonyms-get-synonyms-sets", "parameters": [ { @@ -43030,7 +43086,7 @@ "tasks" ], "summary": "Cancel a task", - "description": "WARNING: The task management API is new and should still be considered a beta feature.\nThe API may change in ways that are not backwards compatible.\n\nA task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away.\nIt is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation.\nThe get task information API will continue to list these cancelled tasks until they complete.\nThe cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible.\n\nTo troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running.\nYou can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task.", + "description": "WARNING: The task management API is new and should still be considered a beta feature.\nThe API may change in ways that are not backwards compatible.\n\nA task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away.\nIt is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation.\nThe get task information API will continue to list these cancelled tasks until they complete.\nThe cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible.\n\nTo troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running.\nYou can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "tasks-cancel", "parameters": [ { @@ -43060,7 +43116,7 @@ "tasks" ], "summary": "Cancel a task", - "description": "WARNING: The task management API is new and should still be considered a beta feature.\nThe API may change in ways that are not backwards compatible.\n\nA task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away.\nIt is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation.\nThe get task information API will continue to list these cancelled tasks until they complete.\nThe cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible.\n\nTo troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running.\nYou can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task.", + "description": "WARNING: The task management API is new and should still be considered a beta feature.\nThe API may change in ways that are not backwards compatible.\n\nA task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away.\nIt is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation.\nThe get task information API will continue to list these cancelled tasks until they complete.\nThe cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible.\n\nTo troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running.\nYou can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "tasks-cancel-1", "parameters": [ { @@ -43093,7 +43149,7 @@ "tasks" ], "summary": "Get task information", - "description": "Get information about a task currently running in the cluster.\n\nWARNING: The task management API is new and should still be considered a beta feature.\nThe API may change in ways that are not backwards compatible.\n\nIf the task identifier is not found, a 404 response code indicates that there are no resources that match the request.", + "description": "Get information about a task currently running in the cluster.\n\nWARNING: The task management API is new and should still be considered a beta feature.\nThe API may change in ways that are not backwards compatible.\n\nIf the task identifier is not found, a 404 response code indicates that there are no resources that match the request.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "tasks-get", "parameters": [ { @@ -43185,7 +43241,7 @@ "tasks" ], "summary": "Get all tasks", - "description": "Get information about the tasks currently running on one or more nodes in the cluster.\n\nWARNING: The task management API is new and should still be considered a beta feature.\nThe API may change in ways that are not backwards compatible.\n\n**Identifying running tasks**\n\nThe `X-Opaque-Id header`, when provided on the HTTP request header, is going to be returned as a header in the response as well as in the headers field for in the task information.\nThis enables you to track certain calls or associate certain tasks with the client that started them.\nFor example:\n\n```\ncurl -i -H \"X-Opaque-Id: 123456\" \"http://localhost:9200/_tasks?group_by=parents\"\n```\n\nThe API returns the following result:\n\n```\nHTTP/1.1 200 OK\nX-Opaque-Id: 123456\ncontent-type: application/json; charset=UTF-8\ncontent-length: 831\n\n{\n \"tasks\" : {\n \"u5lcZHqcQhu-rUoFaqDphA:45\" : {\n \"node\" : \"u5lcZHqcQhu-rUoFaqDphA\",\n \"id\" : 45,\n \"type\" : \"transport\",\n \"action\" : \"cluster:monitor/tasks/lists\",\n \"start_time_in_millis\" : 1513823752749,\n \"running_time_in_nanos\" : 293139,\n \"cancellable\" : false,\n \"headers\" : {\n \"X-Opaque-Id\" : \"123456\"\n },\n \"children\" : [\n {\n \"node\" : \"u5lcZHqcQhu-rUoFaqDphA\",\n \"id\" : 46,\n \"type\" : \"direct\",\n \"action\" : \"cluster:monitor/tasks/lists[n]\",\n \"start_time_in_millis\" : 1513823752750,\n \"running_time_in_nanos\" : 92133,\n \"cancellable\" : false,\n \"parent_task_id\" : \"u5lcZHqcQhu-rUoFaqDphA:45\",\n \"headers\" : {\n \"X-Opaque-Id\" : \"123456\"\n }\n }\n ]\n }\n }\n }\n```\nIn this example, `X-Opaque-Id: 123456` is the ID as a part of the response header.\nThe `X-Opaque-Id` in the task `headers` is the ID for the task that was initiated by the REST request.\nThe `X-Opaque-Id` in the children `headers` is the child task of the task that was initiated by the REST request.", + "description": "Get information about the tasks currently running on one or more nodes in the cluster.\n\nWARNING: The task management API is new and should still be considered a beta feature.\nThe API may change in ways that are not backwards compatible.\n\n**Identifying running tasks**\n\nThe `X-Opaque-Id header`, when provided on the HTTP request header, is going to be returned as a header in the response as well as in the headers field for in the task information.\nThis enables you to track certain calls or associate certain tasks with the client that started them.\nFor example:\n\n```\ncurl -i -H \"X-Opaque-Id: 123456\" \"http://localhost:9200/_tasks?group_by=parents\"\n```\n\nThe API returns the following result:\n\n```\nHTTP/1.1 200 OK\nX-Opaque-Id: 123456\ncontent-type: application/json; charset=UTF-8\ncontent-length: 831\n\n{\n \"tasks\" : {\n \"u5lcZHqcQhu-rUoFaqDphA:45\" : {\n \"node\" : \"u5lcZHqcQhu-rUoFaqDphA\",\n \"id\" : 45,\n \"type\" : \"transport\",\n \"action\" : \"cluster:monitor/tasks/lists\",\n \"start_time_in_millis\" : 1513823752749,\n \"running_time_in_nanos\" : 293139,\n \"cancellable\" : false,\n \"headers\" : {\n \"X-Opaque-Id\" : \"123456\"\n },\n \"children\" : [\n {\n \"node\" : \"u5lcZHqcQhu-rUoFaqDphA\",\n \"id\" : 46,\n \"type\" : \"direct\",\n \"action\" : \"cluster:monitor/tasks/lists[n]\",\n \"start_time_in_millis\" : 1513823752750,\n \"running_time_in_nanos\" : 92133,\n \"cancellable\" : false,\n \"parent_task_id\" : \"u5lcZHqcQhu-rUoFaqDphA:45\",\n \"headers\" : {\n \"X-Opaque-Id\" : \"123456\"\n }\n }\n ]\n }\n }\n }\n```\nIn this example, `X-Opaque-Id: 123456` is the ID as a part of the response header.\nThe `X-Opaque-Id` in the task `headers` is the ID for the task that was initiated by the REST request.\nThe `X-Opaque-Id` in the children `headers` is the child task of the task that was initiated by the REST request.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "tasks-list", "parameters": [ { @@ -43360,7 +43416,7 @@ "document" ], "summary": "Get term vector information", - "description": "Get information and statistics about terms in the fields of a particular document.\n\nYou can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request.\nYou can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body.\nFor example:\n\n```\nGET /my-index-000001/_termvectors/1?fields=message\n```\n\nFields can be specified using wildcards, similar to the multi match query.\n\nTerm vectors are real-time by default, not near real-time.\nThis can be changed by setting `realtime` parameter to `false`.\n\nYou can request three types of values: _term information_, _term statistics_, and _field statistics_.\nBy default, all term information and field statistics are returned for all fields but term statistics are excluded.\n\n**Term information**\n\n* term frequency in the field (always returned)\n* term positions (`positions: true`)\n* start and end offsets (`offsets: true`)\n* term payloads (`payloads: true`), as base64 encoded bytes\n\nIf the requested information wasn't stored in the index, it will be computed on the fly if possible.\nAdditionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user.\n\n> warn\n> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16.\n\n**Behaviour**\n\nThe term and field statistics are not accurate.\nDeleted documents are not taken into account.\nThe information is only retrieved for the shard the requested document resides in.\nThe term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context.\nBy default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected.\nUse `routing` only to hit a particular shard.", + "description": "Get information and statistics about terms in the fields of a particular document.\n\nYou can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request.\nYou can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body.\nFor example:\n\n```\nGET /my-index-000001/_termvectors/1?fields=message\n```\n\nFields can be specified using wildcards, similar to the multi match query.\n\nTerm vectors are real-time by default, not near real-time.\nThis can be changed by setting `realtime` parameter to `false`.\n\nYou can request three types of values: _term information_, _term statistics_, and _field statistics_.\nBy default, all term information and field statistics are returned for all fields but term statistics are excluded.\n\n**Term information**\n\n* term frequency in the field (always returned)\n* term positions (`positions: true`)\n* start and end offsets (`offsets: true`)\n* term payloads (`payloads: true`), as base64 encoded bytes\n\nIf the requested information wasn't stored in the index, it will be computed on the fly if possible.\nAdditionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user.\n\n> warn\n> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16.\n\n**Behaviour**\n\nThe term and field statistics are not accurate.\nDeleted documents are not taken into account.\nThe information is only retrieved for the shard the requested document resides in.\nThe term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context.\nBy default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected.\nUse `routing` only to hit a particular shard.\n ##Required authorization\n* Index privileges: `read`", "operationId": "termvectors", "parameters": [ { @@ -43424,7 +43480,7 @@ "document" ], "summary": "Get term vector information", - "description": "Get information and statistics about terms in the fields of a particular document.\n\nYou can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request.\nYou can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body.\nFor example:\n\n```\nGET /my-index-000001/_termvectors/1?fields=message\n```\n\nFields can be specified using wildcards, similar to the multi match query.\n\nTerm vectors are real-time by default, not near real-time.\nThis can be changed by setting `realtime` parameter to `false`.\n\nYou can request three types of values: _term information_, _term statistics_, and _field statistics_.\nBy default, all term information and field statistics are returned for all fields but term statistics are excluded.\n\n**Term information**\n\n* term frequency in the field (always returned)\n* term positions (`positions: true`)\n* start and end offsets (`offsets: true`)\n* term payloads (`payloads: true`), as base64 encoded bytes\n\nIf the requested information wasn't stored in the index, it will be computed on the fly if possible.\nAdditionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user.\n\n> warn\n> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16.\n\n**Behaviour**\n\nThe term and field statistics are not accurate.\nDeleted documents are not taken into account.\nThe information is only retrieved for the shard the requested document resides in.\nThe term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context.\nBy default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected.\nUse `routing` only to hit a particular shard.", + "description": "Get information and statistics about terms in the fields of a particular document.\n\nYou can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request.\nYou can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body.\nFor example:\n\n```\nGET /my-index-000001/_termvectors/1?fields=message\n```\n\nFields can be specified using wildcards, similar to the multi match query.\n\nTerm vectors are real-time by default, not near real-time.\nThis can be changed by setting `realtime` parameter to `false`.\n\nYou can request three types of values: _term information_, _term statistics_, and _field statistics_.\nBy default, all term information and field statistics are returned for all fields but term statistics are excluded.\n\n**Term information**\n\n* term frequency in the field (always returned)\n* term positions (`positions: true`)\n* start and end offsets (`offsets: true`)\n* term payloads (`payloads: true`), as base64 encoded bytes\n\nIf the requested information wasn't stored in the index, it will be computed on the fly if possible.\nAdditionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user.\n\n> warn\n> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16.\n\n**Behaviour**\n\nThe term and field statistics are not accurate.\nDeleted documents are not taken into account.\nThe information is only retrieved for the shard the requested document resides in.\nThe term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context.\nBy default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected.\nUse `routing` only to hit a particular shard.\n ##Required authorization\n* Index privileges: `read`", "operationId": "termvectors-1", "parameters": [ { @@ -43490,7 +43546,7 @@ "document" ], "summary": "Get term vector information", - "description": "Get information and statistics about terms in the fields of a particular document.\n\nYou can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request.\nYou can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body.\nFor example:\n\n```\nGET /my-index-000001/_termvectors/1?fields=message\n```\n\nFields can be specified using wildcards, similar to the multi match query.\n\nTerm vectors are real-time by default, not near real-time.\nThis can be changed by setting `realtime` parameter to `false`.\n\nYou can request three types of values: _term information_, _term statistics_, and _field statistics_.\nBy default, all term information and field statistics are returned for all fields but term statistics are excluded.\n\n**Term information**\n\n* term frequency in the field (always returned)\n* term positions (`positions: true`)\n* start and end offsets (`offsets: true`)\n* term payloads (`payloads: true`), as base64 encoded bytes\n\nIf the requested information wasn't stored in the index, it will be computed on the fly if possible.\nAdditionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user.\n\n> warn\n> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16.\n\n**Behaviour**\n\nThe term and field statistics are not accurate.\nDeleted documents are not taken into account.\nThe information is only retrieved for the shard the requested document resides in.\nThe term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context.\nBy default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected.\nUse `routing` only to hit a particular shard.", + "description": "Get information and statistics about terms in the fields of a particular document.\n\nYou can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request.\nYou can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body.\nFor example:\n\n```\nGET /my-index-000001/_termvectors/1?fields=message\n```\n\nFields can be specified using wildcards, similar to the multi match query.\n\nTerm vectors are real-time by default, not near real-time.\nThis can be changed by setting `realtime` parameter to `false`.\n\nYou can request three types of values: _term information_, _term statistics_, and _field statistics_.\nBy default, all term information and field statistics are returned for all fields but term statistics are excluded.\n\n**Term information**\n\n* term frequency in the field (always returned)\n* term positions (`positions: true`)\n* start and end offsets (`offsets: true`)\n* term payloads (`payloads: true`), as base64 encoded bytes\n\nIf the requested information wasn't stored in the index, it will be computed on the fly if possible.\nAdditionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user.\n\n> warn\n> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16.\n\n**Behaviour**\n\nThe term and field statistics are not accurate.\nDeleted documents are not taken into account.\nThe information is only retrieved for the shard the requested document resides in.\nThe term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context.\nBy default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected.\nUse `routing` only to hit a particular shard.\n ##Required authorization\n* Index privileges: `read`", "operationId": "termvectors-2", "parameters": [ { @@ -43551,7 +43607,7 @@ "document" ], "summary": "Get term vector information", - "description": "Get information and statistics about terms in the fields of a particular document.\n\nYou can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request.\nYou can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body.\nFor example:\n\n```\nGET /my-index-000001/_termvectors/1?fields=message\n```\n\nFields can be specified using wildcards, similar to the multi match query.\n\nTerm vectors are real-time by default, not near real-time.\nThis can be changed by setting `realtime` parameter to `false`.\n\nYou can request three types of values: _term information_, _term statistics_, and _field statistics_.\nBy default, all term information and field statistics are returned for all fields but term statistics are excluded.\n\n**Term information**\n\n* term frequency in the field (always returned)\n* term positions (`positions: true`)\n* start and end offsets (`offsets: true`)\n* term payloads (`payloads: true`), as base64 encoded bytes\n\nIf the requested information wasn't stored in the index, it will be computed on the fly if possible.\nAdditionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user.\n\n> warn\n> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16.\n\n**Behaviour**\n\nThe term and field statistics are not accurate.\nDeleted documents are not taken into account.\nThe information is only retrieved for the shard the requested document resides in.\nThe term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context.\nBy default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected.\nUse `routing` only to hit a particular shard.", + "description": "Get information and statistics about terms in the fields of a particular document.\n\nYou can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request.\nYou can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body.\nFor example:\n\n```\nGET /my-index-000001/_termvectors/1?fields=message\n```\n\nFields can be specified using wildcards, similar to the multi match query.\n\nTerm vectors are real-time by default, not near real-time.\nThis can be changed by setting `realtime` parameter to `false`.\n\nYou can request three types of values: _term information_, _term statistics_, and _field statistics_.\nBy default, all term information and field statistics are returned for all fields but term statistics are excluded.\n\n**Term information**\n\n* term frequency in the field (always returned)\n* term positions (`positions: true`)\n* start and end offsets (`offsets: true`)\n* term payloads (`payloads: true`), as base64 encoded bytes\n\nIf the requested information wasn't stored in the index, it will be computed on the fly if possible.\nAdditionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user.\n\n> warn\n> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16.\n\n**Behaviour**\n\nThe term and field statistics are not accurate.\nDeleted documents are not taken into account.\nThe information is only retrieved for the shard the requested document resides in.\nThe term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context.\nBy default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected.\nUse `routing` only to hit a particular shard.\n ##Required authorization\n* Index privileges: `read`", "operationId": "termvectors-3", "parameters": [ { @@ -43614,7 +43670,7 @@ "text_structure" ], "summary": "Find the structure of a text field", - "description": "Find the structure of a text field in an Elasticsearch index.\n\nThis API provides a starting point for extracting further information from log messages already ingested into Elasticsearch.\nFor example, if you have ingested data into a very simple index that has just `@timestamp` and message fields, you can use this API to see what common structure exists in the message field.\n\nThe response from the API contains:\n\n* Sample messages.\n* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields.\n* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text.\n* Appropriate mappings for an Elasticsearch index, which you could use to ingest the text.\n\nAll this information can be calculated by the structure finder with no guidance.\nHowever, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.\n\nIf the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response.\nIt helps determine why the returned structure was chosen.", + "description": "Find the structure of a text field in an Elasticsearch index.\n\nThis API provides a starting point for extracting further information from log messages already ingested into Elasticsearch.\nFor example, if you have ingested data into a very simple index that has just `@timestamp` and message fields, you can use this API to see what common structure exists in the message field.\n\nThe response from the API contains:\n\n* Sample messages.\n* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields.\n* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text.\n* Appropriate mappings for an Elasticsearch index, which you could use to ingest the text.\n\nAll this information can be calculated by the structure finder with no guidance.\nHowever, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.\n\nIf the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response.\nIt helps determine why the returned structure was chosen.\n ##Required authorization\n* Cluster privileges: `monitor_text_structure`", "operationId": "text-structure-find-field-structure", "parameters": [ { @@ -43860,7 +43916,7 @@ "text_structure" ], "summary": "Find the structure of text messages", - "description": "Find the structure of a list of text messages.\nThe messages must contain data that is suitable to be ingested into Elasticsearch.\n\nThis API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality.\nUse this API rather than the find text structure API if your input text has already been split up into separate messages by some other process.\n\nThe response from the API contains:\n\n* Sample messages.\n* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields.\n* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text.\nAppropriate mappings for an Elasticsearch index, which you could use to ingest the text.\n\nAll this information can be calculated by the structure finder with no guidance.\nHowever, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.\n\nIf the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response.\nIt helps determine why the returned structure was chosen.", + "description": "Find the structure of a list of text messages.\nThe messages must contain data that is suitable to be ingested into Elasticsearch.\n\nThis API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality.\nUse this API rather than the find text structure API if your input text has already been split up into separate messages by some other process.\n\nThe response from the API contains:\n\n* Sample messages.\n* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields.\n* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text.\nAppropriate mappings for an Elasticsearch index, which you could use to ingest the text.\n\nAll this information can be calculated by the structure finder with no guidance.\nHowever, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.\n\nIf the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response.\nIt helps determine why the returned structure was chosen.\n ##Required authorization\n* Cluster privileges: `monitor_text_structure`", "operationId": "text-structure-find-message-structure", "parameters": [ { @@ -43918,7 +43974,7 @@ "text_structure" ], "summary": "Find the structure of text messages", - "description": "Find the structure of a list of text messages.\nThe messages must contain data that is suitable to be ingested into Elasticsearch.\n\nThis API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality.\nUse this API rather than the find text structure API if your input text has already been split up into separate messages by some other process.\n\nThe response from the API contains:\n\n* Sample messages.\n* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields.\n* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text.\nAppropriate mappings for an Elasticsearch index, which you could use to ingest the text.\n\nAll this information can be calculated by the structure finder with no guidance.\nHowever, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.\n\nIf the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response.\nIt helps determine why the returned structure was chosen.", + "description": "Find the structure of a list of text messages.\nThe messages must contain data that is suitable to be ingested into Elasticsearch.\n\nThis API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality.\nUse this API rather than the find text structure API if your input text has already been split up into separate messages by some other process.\n\nThe response from the API contains:\n\n* Sample messages.\n* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields.\n* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text.\nAppropriate mappings for an Elasticsearch index, which you could use to ingest the text.\n\nAll this information can be calculated by the structure finder with no guidance.\nHowever, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.\n\nIf the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response.\nIt helps determine why the returned structure was chosen.\n ##Required authorization\n* Cluster privileges: `monitor_text_structure`", "operationId": "text-structure-find-message-structure-1", "parameters": [ { @@ -43978,7 +44034,7 @@ "text_structure" ], "summary": "Find the structure of a text file", - "description": "The text file must contain data that is suitable to be ingested into Elasticsearch.\n\nThis API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality.\nUnlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format.\nIt must, however, be text; binary text formats are not currently supported.\nThe size is limited to the Elasticsearch HTTP receive buffer size, which defaults to 100 Mb.\n\nThe response from the API contains:\n\n* A couple of messages from the beginning of the text.\n* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields.\n* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text.\n* Appropriate mappings for an Elasticsearch index, which you could use to ingest the text.\n\nAll this information can be calculated by the structure finder with no guidance.\nHowever, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.", + "description": "The text file must contain data that is suitable to be ingested into Elasticsearch.\n\nThis API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality.\nUnlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format.\nIt must, however, be text; binary text formats are not currently supported.\nThe size is limited to the Elasticsearch HTTP receive buffer size, which defaults to 100 Mb.\n\nThe response from the API contains:\n\n* A couple of messages from the beginning of the text.\n* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields.\n* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text.\n* Appropriate mappings for an Elasticsearch index, which you could use to ingest the text.\n\nAll this information can be calculated by the structure finder with no guidance.\nHowever, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.\n ##Required authorization\n* Cluster privileges: `monitor_text_structure`", "operationId": "text-structure-find-structure", "parameters": [ { @@ -44354,7 +44410,7 @@ "transform" ], "summary": "Get transforms", - "description": "Get configuration information for transforms.", + "description": "Get configuration information for transforms.\n ##Required authorization\n* Cluster privileges: `monitor_transform`", "operationId": "transform-get-transform", "parameters": [ { @@ -44385,7 +44441,7 @@ "transform" ], "summary": "Create a transform", - "description": "Creates a transform.\n\nA transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as\na data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a\nunique row per entity.\n\nYou must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If\nyou choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in\nthe pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values\nin the latest object.\n\nYou must have `create_index`, `index`, and `read` privileges on the destination index and `read` and\n`view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the\ntransform remembers which roles the user that created it had at the time of creation and uses those same roles. If\nthose roles do not have the required privileges on the source and destination indices, the transform fails when it\nattempts unauthorized operations.\n\nNOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any\n`.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do\nnot give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not\ngive users any privileges on `.data-frame-internal*` indices.", + "description": "Creates a transform.\n\nA transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as\na data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a\nunique row per entity.\n\nYou must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If\nyou choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in\nthe pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values\nin the latest object.\n\nYou must have `create_index`, `index`, and `read` privileges on the destination index and `read` and\n`view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the\ntransform remembers which roles the user that created it had at the time of creation and uses those same roles. If\nthose roles do not have the required privileges on the source and destination indices, the transform fails when it\nattempts unauthorized operations.\n\nNOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any\n`.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do\nnot give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not\ngive users any privileges on `.data-frame-internal*` indices.\n ##Required authorization\n* Index privileges: `create_index`,`read`,`index`,`view_index_metadata`* Cluster privileges: `manage_transform`", "operationId": "transform-put-transform", "parameters": [ { @@ -44510,6 +44566,7 @@ "transform" ], "summary": "Delete a transform", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_transform`", "operationId": "transform-delete-transform", "parameters": [ { @@ -44581,7 +44638,7 @@ "transform" ], "summary": "Get transforms", - "description": "Get configuration information for transforms.", + "description": "Get configuration information for transforms.\n ##Required authorization\n* Cluster privileges: `monitor_transform`", "operationId": "transform-get-transform-1", "parameters": [ { @@ -44611,7 +44668,7 @@ "transform" ], "summary": "Get transform stats", - "description": "Get usage information for transforms.", + "description": "Get usage information for transforms.\n ##Required authorization\n* Index privileges: `read`,`view_index_metadata`* Cluster privileges: `monitor_transform`", "operationId": "transform-get-transform-stats", "parameters": [ { @@ -44708,7 +44765,7 @@ "transform" ], "summary": "Preview a transform", - "description": "Generates a preview of the results that you will get when you create a transform with the same configuration.\n\nIt returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also\ngenerates a list of mappings and settings for the destination index. These values are determined based on the field\ntypes of the source index and the transform aggregations.", + "description": "Generates a preview of the results that you will get when you create a transform with the same configuration.\n\nIt returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also\ngenerates a list of mappings and settings for the destination index. These values are determined based on the field\ntypes of the source index and the transform aggregations.\n ##Required authorization\n* Index privileges: `read`,`view_index_metadata`* Cluster privileges: `manage_transform`", "operationId": "transform-preview-transform", "parameters": [ { @@ -44739,7 +44796,7 @@ "transform" ], "summary": "Preview a transform", - "description": "Generates a preview of the results that you will get when you create a transform with the same configuration.\n\nIt returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also\ngenerates a list of mappings and settings for the destination index. These values are determined based on the field\ntypes of the source index and the transform aggregations.", + "description": "Generates a preview of the results that you will get when you create a transform with the same configuration.\n\nIt returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also\ngenerates a list of mappings and settings for the destination index. These values are determined based on the field\ntypes of the source index and the transform aggregations.\n ##Required authorization\n* Index privileges: `read`,`view_index_metadata`* Cluster privileges: `manage_transform`", "operationId": "transform-preview-transform-1", "parameters": [ { @@ -44772,7 +44829,7 @@ "transform" ], "summary": "Preview a transform", - "description": "Generates a preview of the results that you will get when you create a transform with the same configuration.\n\nIt returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also\ngenerates a list of mappings and settings for the destination index. These values are determined based on the field\ntypes of the source index and the transform aggregations.", + "description": "Generates a preview of the results that you will get when you create a transform with the same configuration.\n\nIt returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also\ngenerates a list of mappings and settings for the destination index. These values are determined based on the field\ntypes of the source index and the transform aggregations.\n ##Required authorization\n* Index privileges: `read`,`view_index_metadata`* Cluster privileges: `manage_transform`", "operationId": "transform-preview-transform-2", "parameters": [ { @@ -44800,7 +44857,7 @@ "transform" ], "summary": "Preview a transform", - "description": "Generates a preview of the results that you will get when you create a transform with the same configuration.\n\nIt returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also\ngenerates a list of mappings and settings for the destination index. These values are determined based on the field\ntypes of the source index and the transform aggregations.", + "description": "Generates a preview of the results that you will get when you create a transform with the same configuration.\n\nIt returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also\ngenerates a list of mappings and settings for the destination index. These values are determined based on the field\ntypes of the source index and the transform aggregations.\n ##Required authorization\n* Index privileges: `read`,`view_index_metadata`* Cluster privileges: `manage_transform`", "operationId": "transform-preview-transform-3", "parameters": [ { @@ -44830,7 +44887,7 @@ "transform" ], "summary": "Reset a transform", - "description": "Before you can reset it, you must stop it; alternatively, use the `force` query parameter.\nIf the destination index was created by the transform, it is deleted.", + "description": "Before you can reset it, you must stop it; alternatively, use the `force` query parameter.\nIf the destination index was created by the transform, it is deleted.\n ##Required authorization\n* Cluster privileges: `manage_transform`", "operationId": "transform-reset-transform", "parameters": [ { @@ -44892,7 +44949,7 @@ "transform" ], "summary": "Schedule a transform to start now", - "description": "Instantly run a transform to process data.\nIf you run this API, the transform will process the new data instantly,\nwithout waiting for the configured frequency interval. After the API is called,\nthe transform will be processed again at `now + frequency` unless the API\nis called again in the meantime.", + "description": "Instantly run a transform to process data.\nIf you run this API, the transform will process the new data instantly,\nwithout waiting for the configured frequency interval. After the API is called,\nthe transform will be processed again at `now + frequency` unless the API\nis called again in the meantime.\n ##Required authorization\n* Cluster privileges: `manage_transform`", "operationId": "transform-schedule-now-transform", "parameters": [ { @@ -44944,7 +45001,7 @@ "transform" ], "summary": "Start a transform", - "description": "When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is\nset to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping\ndefinitions for the destination index from the source indices and the transform aggregations. If fields in the\ndestination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations),\nthe transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce\nmapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you\nstart the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings\nin a pivot transform.\n\nWhen the transform starts, a series of validations occur to ensure its success. If you deferred validation when you\ncreated the transform, they occur when you start the transform—​with the exception of privilege checks. When\nElasticsearch security features are enabled, the transform remembers which roles the user that created it had at the\ntime of creation and uses those same roles. If those roles do not have the required privileges on the source and\ndestination indices, the transform fails when it attempts unauthorized operations.", + "description": "When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is\nset to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping\ndefinitions for the destination index from the source indices and the transform aggregations. If fields in the\ndestination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations),\nthe transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce\nmapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you\nstart the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings\nin a pivot transform.\n\nWhen the transform starts, a series of validations occur to ensure its success. If you deferred validation when you\ncreated the transform, they occur when you start the transform—​with the exception of privilege checks. When\nElasticsearch security features are enabled, the transform remembers which roles the user that created it had at the\ntime of creation and uses those same roles. If those roles do not have the required privileges on the source and\ndestination indices, the transform fails when it attempts unauthorized operations.\n ##Required authorization\n* Index privileges: `read`,`view_index_metadata`* Cluster privileges: `manage_transform`", "operationId": "transform-start-transform", "parameters": [ { @@ -45006,7 +45063,7 @@ "transform" ], "summary": "Stop transforms", - "description": "Stops one or more transforms.", + "description": "Stops one or more transforms.\n ##Required authorization\n* Cluster privileges: `manage_transform`", "operationId": "transform-stop-transform", "parameters": [ { @@ -45098,7 +45155,7 @@ "transform" ], "summary": "Update a transform", - "description": "Updates certain properties of a transform.\n\nAll updated properties except `description` do not take effect until after the transform starts the next checkpoint,\nthus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata`\nprivileges for the source indices. You must also have `index` and `read` privileges for the destination index. When\nElasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the\ntime of update and runs with those privileges.", + "description": "Updates certain properties of a transform.\n\nAll updated properties except `description` do not take effect until after the transform starts the next checkpoint,\nthus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata`\nprivileges for the source indices. You must also have `index` and `read` privileges for the destination index. When\nElasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the\ntime of update and runs with those privileges.\n ##Required authorization\n* Index privileges: `read`,`index`,`view_index_metadata`* Cluster privileges: `manage_transform`", "operationId": "transform-update-transform", "parameters": [ { @@ -45271,7 +45328,7 @@ "transform" ], "summary": "Upgrade all transforms", - "description": "Transforms are compatible across minor versions and between supported major versions.\nHowever, over time, the format of transform configuration information may change.\nThis API identifies transforms that have a legacy configuration format and upgrades them to the latest version.\nIt also cleans up the internal data structures that store the transform state and checkpoints.\nThe upgrade does not affect the source and destination indices.\nThe upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged.\n\nIf a transform upgrade step fails, the upgrade stops and an error is returned about the underlying issue.\nResolve the issue then re-run the process again.\nA summary is returned when the upgrade is finished.\n\nTo ensure continuous transforms remain running during a major version upgrade of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster.\nYou may want to perform a recent cluster backup prior to the upgrade.", + "description": "Transforms are compatible across minor versions and between supported major versions.\nHowever, over time, the format of transform configuration information may change.\nThis API identifies transforms that have a legacy configuration format and upgrades them to the latest version.\nIt also cleans up the internal data structures that store the transform state and checkpoints.\nThe upgrade does not affect the source and destination indices.\nThe upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged.\n\nIf a transform upgrade step fails, the upgrade stops and an error is returned about the underlying issue.\nResolve the issue then re-run the process again.\nA summary is returned when the upgrade is finished.\n\nTo ensure continuous transforms remain running during a major version upgrade of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster.\nYou may want to perform a recent cluster backup prior to the upgrade.\n ##Required authorization\n* Cluster privileges: `manage_transform`", "operationId": "transform-upgrade-transforms", "parameters": [ { @@ -45341,7 +45398,7 @@ "document" ], "summary": "Update a document", - "description": "Update a document by running a script or passing a partial document.\n\nIf the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias.\n\nThe script can update, delete, or skip modifying the document.\nThe API also supports passing a partial document, which is merged into the existing document.\nTo fully replace an existing document, use the index API.\nThis operation:\n\n* Gets the document (collocated with the shard) from the index.\n* Runs the specified script.\n* Indexes the result.\n\nThe document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation.\n\nThe `_source` field must be enabled to use this API.\nIn addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp).", + "description": "Update a document by running a script or passing a partial document.\n\nIf the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias.\n\nThe script can update, delete, or skip modifying the document.\nThe API also supports passing a partial document, which is merged into the existing document.\nTo fully replace an existing document, use the index API.\nThis operation:\n\n* Gets the document (collocated with the shard) from the index.\n* Runs the specified script.\n* Indexes the result.\n\nThe document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation.\n\nThe `_source` field must be enabled to use this API.\nIn addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp).\n ##Required authorization\n* Index privileges: `write`", "operationId": "update", "parameters": [ { @@ -45626,7 +45683,7 @@ "document" ], "summary": "Update documents", - "description": "Updates documents that match the specified query.\nIf no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:\n\n* `read`\n* `index` or `write`\n\nYou can specify the query criteria in the request URI or the request body using the same syntax as the search API.\n\nWhen you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning.\nWhen the versions match, the document is updated and the version number is incremented.\nIf a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails.\nYou can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`.\nNote that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query.\n\nNOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number.\n\nWhile processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents.\nA bulk update request is performed for each batch of matching documents.\nAny query or update failures cause the update by query request to fail and the failures are shown in the response.\nAny update requests that completed successfully still stick, they are not rolled back.\n\n**Throttling update requests**\n\nTo control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number.\nThis pads each batch with a wait time to throttle the rate.\nSet `requests_per_second` to `-1` to turn off throttling.\n\nThrottling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is 1000, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nUpdate by query supports sliced scroll to parallelize the update process.\nThis can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nSetting `slices` to `auto` chooses a reasonable number for most data streams and indices.\nThis setting will use one slice per shard, up to a certain limit.\nIf there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.\n\nAdding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks:\n\n* You can see these requests in the tasks APIs. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with slices will cancel each sub-request.\n* Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated.\n* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.\n\nIf you're slicing manually or otherwise tuning automatic slicing, keep in mind that:\n\n* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n* Update performance scales linearly across available resources with the number of slices.\n\nWhether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Update the document source**\n\nUpdate by query supports scripts to update the document source.\nAs with the update API, you can set `ctx.op` to change the operation that is performed.\n\nSet `ctx.op = \"noop\"` if your script decides that it doesn't have to make any changes.\nThe update by query operation skips updating the document and increments the `noop` counter.\n\nSet `ctx.op = \"delete\"` if your script decides that the document should be deleted.\nThe update by query operation deletes the document and increments the `deleted` counter.\n\nUpdate by query supports only `index`, `noop`, and `delete`.\nSetting `ctx.op` to anything else is an error.\nSetting any other field in `ctx` is an error.\nThis API enables you to only modify the source of matching documents; you cannot move them.", + "description": "Updates documents that match the specified query.\nIf no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:\n\n* `read`\n* `index` or `write`\n\nYou can specify the query criteria in the request URI or the request body using the same syntax as the search API.\n\nWhen you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning.\nWhen the versions match, the document is updated and the version number is incremented.\nIf a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails.\nYou can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`.\nNote that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query.\n\nNOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number.\n\nWhile processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents.\nA bulk update request is performed for each batch of matching documents.\nAny query or update failures cause the update by query request to fail and the failures are shown in the response.\nAny update requests that completed successfully still stick, they are not rolled back.\n\n**Throttling update requests**\n\nTo control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number.\nThis pads each batch with a wait time to throttle the rate.\nSet `requests_per_second` to `-1` to turn off throttling.\n\nThrottling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is 1000, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nUpdate by query supports sliced scroll to parallelize the update process.\nThis can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nSetting `slices` to `auto` chooses a reasonable number for most data streams and indices.\nThis setting will use one slice per shard, up to a certain limit.\nIf there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.\n\nAdding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks:\n\n* You can see these requests in the tasks APIs. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with slices will cancel each sub-request.\n* Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated.\n* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.\n\nIf you're slicing manually or otherwise tuning automatic slicing, keep in mind that:\n\n* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n* Update performance scales linearly across available resources with the number of slices.\n\nWhether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Update the document source**\n\nUpdate by query supports scripts to update the document source.\nAs with the update API, you can set `ctx.op` to change the operation that is performed.\n\nSet `ctx.op = \"noop\"` if your script decides that it doesn't have to make any changes.\nThe update by query operation skips updating the document and increments the `noop` counter.\n\nSet `ctx.op = \"delete\"` if your script decides that the document should be deleted.\nThe update by query operation deletes the document and increments the `deleted` counter.\n\nUpdate by query supports only `index`, `noop`, and `delete`.\nSetting `ctx.op` to anything else is an error.\nSetting any other field in `ctx` is an error.\nThis API enables you to only modify the source of matching documents; you cannot move them.\n ##Required authorization\n* Index privileges: `read`,`write`", "operationId": "update-by-query", "parameters": [ { @@ -46152,7 +46209,7 @@ "watcher" ], "summary": "Acknowledge a watch", - "description": "Acknowledging a watch enables you to manually throttle the execution of the watch's actions.\n\nThe acknowledgement state of an action is stored in the `status.actions..ack.state` structure.\n\nIMPORTANT: If the specified watch is currently being executed, this API will return an error\nThe reason for this behavior is to prevent overwriting the watch status from a watch execution.\n\nAcknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`.\nThis happens when the condition of the watch is not met (the condition evaluates to false).", + "description": "Acknowledging a watch enables you to manually throttle the execution of the watch's actions.\n\nThe acknowledgement state of an action is stored in the `status.actions..ack.state` structure.\n\nIMPORTANT: If the specified watch is currently being executed, this API will return an error\nThe reason for this behavior is to prevent overwriting the watch status from a watch execution.\n\nAcknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`.\nThis happens when the condition of the watch is not met (the condition evaluates to false).\n ##Required authorization\n* Cluster privileges: `manage_watcher`", "operationId": "watcher-ack-watch", "parameters": [ { @@ -46177,7 +46234,7 @@ "watcher" ], "summary": "Acknowledge a watch", - "description": "Acknowledging a watch enables you to manually throttle the execution of the watch's actions.\n\nThe acknowledgement state of an action is stored in the `status.actions..ack.state` structure.\n\nIMPORTANT: If the specified watch is currently being executed, this API will return an error\nThe reason for this behavior is to prevent overwriting the watch status from a watch execution.\n\nAcknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`.\nThis happens when the condition of the watch is not met (the condition evaluates to false).", + "description": "Acknowledging a watch enables you to manually throttle the execution of the watch's actions.\n\nThe acknowledgement state of an action is stored in the `status.actions..ack.state` structure.\n\nIMPORTANT: If the specified watch is currently being executed, this API will return an error\nThe reason for this behavior is to prevent overwriting the watch status from a watch execution.\n\nAcknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`.\nThis happens when the condition of the watch is not met (the condition evaluates to false).\n ##Required authorization\n* Cluster privileges: `manage_watcher`", "operationId": "watcher-ack-watch-1", "parameters": [ { @@ -46204,7 +46261,7 @@ "watcher" ], "summary": "Acknowledge a watch", - "description": "Acknowledging a watch enables you to manually throttle the execution of the watch's actions.\n\nThe acknowledgement state of an action is stored in the `status.actions..ack.state` structure.\n\nIMPORTANT: If the specified watch is currently being executed, this API will return an error\nThe reason for this behavior is to prevent overwriting the watch status from a watch execution.\n\nAcknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`.\nThis happens when the condition of the watch is not met (the condition evaluates to false).", + "description": "Acknowledging a watch enables you to manually throttle the execution of the watch's actions.\n\nThe acknowledgement state of an action is stored in the `status.actions..ack.state` structure.\n\nIMPORTANT: If the specified watch is currently being executed, this API will return an error\nThe reason for this behavior is to prevent overwriting the watch status from a watch execution.\n\nAcknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`.\nThis happens when the condition of the watch is not met (the condition evaluates to false).\n ##Required authorization\n* Cluster privileges: `manage_watcher`", "operationId": "watcher-ack-watch-2", "parameters": [ { @@ -46232,7 +46289,7 @@ "watcher" ], "summary": "Acknowledge a watch", - "description": "Acknowledging a watch enables you to manually throttle the execution of the watch's actions.\n\nThe acknowledgement state of an action is stored in the `status.actions..ack.state` structure.\n\nIMPORTANT: If the specified watch is currently being executed, this API will return an error\nThe reason for this behavior is to prevent overwriting the watch status from a watch execution.\n\nAcknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`.\nThis happens when the condition of the watch is not met (the condition evaluates to false).", + "description": "Acknowledging a watch enables you to manually throttle the execution of the watch's actions.\n\nThe acknowledgement state of an action is stored in the `status.actions..ack.state` structure.\n\nIMPORTANT: If the specified watch is currently being executed, this API will return an error\nThe reason for this behavior is to prevent overwriting the watch status from a watch execution.\n\nAcknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`.\nThis happens when the condition of the watch is not met (the condition evaluates to false).\n ##Required authorization\n* Cluster privileges: `manage_watcher`", "operationId": "watcher-ack-watch-3", "parameters": [ { @@ -46262,7 +46319,7 @@ "watcher" ], "summary": "Activate a watch", - "description": "A watch can be either active or inactive.", + "description": "A watch can be either active or inactive.\n ##Required authorization\n* Cluster privileges: `manage_watcher`", "externalDocs": { "url": "https://www.elastic.co/docs/explore-analyze/alerts-cases/watcher/how-watcher-works" }, @@ -46284,7 +46341,7 @@ "watcher" ], "summary": "Activate a watch", - "description": "A watch can be either active or inactive.", + "description": "A watch can be either active or inactive.\n ##Required authorization\n* Cluster privileges: `manage_watcher`", "externalDocs": { "url": "https://www.elastic.co/docs/explore-analyze/alerts-cases/watcher/how-watcher-works" }, @@ -46308,7 +46365,7 @@ "watcher" ], "summary": "Deactivate a watch", - "description": "A watch can be either active or inactive.", + "description": "A watch can be either active or inactive.\n ##Required authorization\n* Cluster privileges: `manage_watcher`", "externalDocs": { "url": "https://www.elastic.co/docs/explore-analyze/alerts-cases/watcher/how-watcher-works" }, @@ -46330,7 +46387,7 @@ "watcher" ], "summary": "Deactivate a watch", - "description": "A watch can be either active or inactive.", + "description": "A watch can be either active or inactive.\n ##Required authorization\n* Cluster privileges: `manage_watcher`", "externalDocs": { "url": "https://www.elastic.co/docs/explore-analyze/alerts-cases/watcher/how-watcher-works" }, @@ -46354,6 +46411,7 @@ "watcher" ], "summary": "Get a watch", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_watcher`", "operationId": "watcher-get-watch", "parameters": [ { @@ -46426,7 +46484,7 @@ "watcher" ], "summary": "Create or update a watch", - "description": "When a watch is registered, a new document that represents the watch is added to the `.watches` index and its trigger is immediately registered with the relevant trigger engine.\nTypically for the `schedule` trigger, the scheduler is the trigger engine.\n\nIMPORTANT: You must use Kibana or this API to create a watch.\nDo not add a watch directly to the `.watches` index by using the Elasticsearch index API.\nIf Elasticsearch security features are enabled, do not give users write privileges on the `.watches` index.\n\nWhen you add a watch you can also define its initial active state by setting the *active* parameter.\n\nWhen Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges.\nIf the user is able to read index `a`, but not index `b`, the same will apply when the watch runs.", + "description": "When a watch is registered, a new document that represents the watch is added to the `.watches` index and its trigger is immediately registered with the relevant trigger engine.\nTypically for the `schedule` trigger, the scheduler is the trigger engine.\n\nIMPORTANT: You must use Kibana or this API to create a watch.\nDo not add a watch directly to the `.watches` index by using the Elasticsearch index API.\nIf Elasticsearch security features are enabled, do not give users write privileges on the `.watches` index.\n\nWhen you add a watch you can also define its initial active state by setting the *active* parameter.\n\nWhen Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges.\nIf the user is able to read index `a`, but not index `b`, the same will apply when the watch runs.\n ##Required authorization\n* Cluster privileges: `manage_watcher`", "operationId": "watcher-put-watch", "parameters": [ { @@ -46466,7 +46524,7 @@ "watcher" ], "summary": "Create or update a watch", - "description": "When a watch is registered, a new document that represents the watch is added to the `.watches` index and its trigger is immediately registered with the relevant trigger engine.\nTypically for the `schedule` trigger, the scheduler is the trigger engine.\n\nIMPORTANT: You must use Kibana or this API to create a watch.\nDo not add a watch directly to the `.watches` index by using the Elasticsearch index API.\nIf Elasticsearch security features are enabled, do not give users write privileges on the `.watches` index.\n\nWhen you add a watch you can also define its initial active state by setting the *active* parameter.\n\nWhen Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges.\nIf the user is able to read index `a`, but not index `b`, the same will apply when the watch runs.", + "description": "When a watch is registered, a new document that represents the watch is added to the `.watches` index and its trigger is immediately registered with the relevant trigger engine.\nTypically for the `schedule` trigger, the scheduler is the trigger engine.\n\nIMPORTANT: You must use Kibana or this API to create a watch.\nDo not add a watch directly to the `.watches` index by using the Elasticsearch index API.\nIf Elasticsearch security features are enabled, do not give users write privileges on the `.watches` index.\n\nWhen you add a watch you can also define its initial active state by setting the *active* parameter.\n\nWhen Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges.\nIf the user is able to read index `a`, but not index `b`, the same will apply when the watch runs.\n ##Required authorization\n* Cluster privileges: `manage_watcher`", "operationId": "watcher-put-watch-1", "parameters": [ { @@ -46506,7 +46564,7 @@ "watcher" ], "summary": "Delete a watch", - "description": "When the watch is removed, the document representing the watch in the `.watches` index is gone and it will never be run again.\n\nDeleting a watch does not delete any watch execution records related to this watch from the watch history.\n\nIMPORTANT: Deleting a watch must be done by using only this API.\nDo not delete the watch directly from the `.watches` index using the Elasticsearch delete document API\nWhen Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index.", + "description": "When the watch is removed, the document representing the watch in the `.watches` index is gone and it will never be run again.\n\nDeleting a watch does not delete any watch execution records related to this watch from the watch history.\n\nIMPORTANT: Deleting a watch must be done by using only this API.\nDo not delete the watch directly from the `.watches` index using the Elasticsearch delete document API\nWhen Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index.\n ##Required authorization\n* Cluster privileges: `manage_watcher`", "operationId": "watcher-delete-watch", "parameters": [ { @@ -46570,7 +46628,7 @@ "watcher" ], "summary": "Run a watch", - "description": "This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes.\n\nFor testing and debugging purposes, you also have fine-grained control on how the watch runs.\nYou can run the watch without running all of its actions or alternatively by simulating them.\nYou can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs.\n\nYou can use the run watch API to run watches that are not yet registered by specifying the watch definition inline.\nThis serves as great tool for testing and debugging your watches prior to adding them to Watcher.\n\nWhen Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches.\nIf your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch.\n\nWhen using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch.", + "description": "This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes.\n\nFor testing and debugging purposes, you also have fine-grained control on how the watch runs.\nYou can run the watch without running all of its actions or alternatively by simulating them.\nYou can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs.\n\nYou can use the run watch API to run watches that are not yet registered by specifying the watch definition inline.\nThis serves as great tool for testing and debugging your watches prior to adding them to Watcher.\n\nWhen Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches.\nIf your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch.\n\nWhen using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch.\n ##Required authorization\n* Cluster privileges: `manage_watcher`", "operationId": "watcher-execute-watch", "parameters": [ { @@ -46601,7 +46659,7 @@ "watcher" ], "summary": "Run a watch", - "description": "This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes.\n\nFor testing and debugging purposes, you also have fine-grained control on how the watch runs.\nYou can run the watch without running all of its actions or alternatively by simulating them.\nYou can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs.\n\nYou can use the run watch API to run watches that are not yet registered by specifying the watch definition inline.\nThis serves as great tool for testing and debugging your watches prior to adding them to Watcher.\n\nWhen Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches.\nIf your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch.\n\nWhen using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch.", + "description": "This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes.\n\nFor testing and debugging purposes, you also have fine-grained control on how the watch runs.\nYou can run the watch without running all of its actions or alternatively by simulating them.\nYou can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs.\n\nYou can use the run watch API to run watches that are not yet registered by specifying the watch definition inline.\nThis serves as great tool for testing and debugging your watches prior to adding them to Watcher.\n\nWhen Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches.\nIf your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch.\n\nWhen using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch.\n ##Required authorization\n* Cluster privileges: `manage_watcher`", "operationId": "watcher-execute-watch-1", "parameters": [ { @@ -46634,7 +46692,7 @@ "watcher" ], "summary": "Run a watch", - "description": "This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes.\n\nFor testing and debugging purposes, you also have fine-grained control on how the watch runs.\nYou can run the watch without running all of its actions or alternatively by simulating them.\nYou can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs.\n\nYou can use the run watch API to run watches that are not yet registered by specifying the watch definition inline.\nThis serves as great tool for testing and debugging your watches prior to adding them to Watcher.\n\nWhen Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches.\nIf your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch.\n\nWhen using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch.", + "description": "This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes.\n\nFor testing and debugging purposes, you also have fine-grained control on how the watch runs.\nYou can run the watch without running all of its actions or alternatively by simulating them.\nYou can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs.\n\nYou can use the run watch API to run watches that are not yet registered by specifying the watch definition inline.\nThis serves as great tool for testing and debugging your watches prior to adding them to Watcher.\n\nWhen Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches.\nIf your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch.\n\nWhen using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch.\n ##Required authorization\n* Cluster privileges: `manage_watcher`", "operationId": "watcher-execute-watch-2", "parameters": [ { @@ -46662,7 +46720,7 @@ "watcher" ], "summary": "Run a watch", - "description": "This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes.\n\nFor testing and debugging purposes, you also have fine-grained control on how the watch runs.\nYou can run the watch without running all of its actions or alternatively by simulating them.\nYou can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs.\n\nYou can use the run watch API to run watches that are not yet registered by specifying the watch definition inline.\nThis serves as great tool for testing and debugging your watches prior to adding them to Watcher.\n\nWhen Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches.\nIf your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch.\n\nWhen using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch.", + "description": "This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes.\n\nFor testing and debugging purposes, you also have fine-grained control on how the watch runs.\nYou can run the watch without running all of its actions or alternatively by simulating them.\nYou can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs.\n\nYou can use the run watch API to run watches that are not yet registered by specifying the watch definition inline.\nThis serves as great tool for testing and debugging your watches prior to adding them to Watcher.\n\nWhen Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches.\nIf your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch.\n\nWhen using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch.\n ##Required authorization\n* Cluster privileges: `manage_watcher`", "operationId": "watcher-execute-watch-3", "parameters": [ { @@ -46739,7 +46797,7 @@ "watcher" ], "summary": "Update Watcher index settings", - "description": "Update settings for the Watcher internal index (`.watches`).\nOnly a subset of settings can be modified.\nThis includes `index.auto_expand_replicas`, `index.number_of_replicas`, `index.routing.allocation.exclude.*`,\n`index.routing.allocation.include.*` and `index.routing.allocation.require.*`.\nModification of `index.routing.allocation.include._tier_preference` is an exception and is not allowed as the\nWatcher shards must always be in the `data_content` tier.", + "description": "Update settings for the Watcher internal index (`.watches`).\nOnly a subset of settings can be modified.\nThis includes `index.auto_expand_replicas`, `index.number_of_replicas`, `index.routing.allocation.exclude.*`,\n`index.routing.allocation.include.*` and `index.routing.allocation.require.*`.\nModification of `index.routing.allocation.include._tier_preference` is an exception and is not allowed as the\nWatcher shards must always be in the `data_content` tier.\n ##Required authorization\n* Cluster privileges: `manage_watcher`", "operationId": "watcher-update-settings", "parameters": [ { @@ -46821,7 +46879,7 @@ "watcher" ], "summary": "Query watches", - "description": "Get all registered watches in a paginated manner and optionally filter watches by a query.\n\nNote that only the `_id` and `metadata.*` fields are queryable or sortable.", + "description": "Get all registered watches in a paginated manner and optionally filter watches by a query.\n\nNote that only the `_id` and `metadata.*` fields are queryable or sortable.\n ##Required authorization\n* Cluster privileges: `monitor_watcher`", "operationId": "watcher-query-watches", "requestBody": { "$ref": "#/components/requestBodies/watcher.query_watches" @@ -46844,7 +46902,7 @@ "watcher" ], "summary": "Query watches", - "description": "Get all registered watches in a paginated manner and optionally filter watches by a query.\n\nNote that only the `_id` and `metadata.*` fields are queryable or sortable.", + "description": "Get all registered watches in a paginated manner and optionally filter watches by a query.\n\nNote that only the `_id` and `metadata.*` fields are queryable or sortable.\n ##Required authorization\n* Cluster privileges: `monitor_watcher`", "operationId": "watcher-query-watches-1", "requestBody": { "$ref": "#/components/requestBodies/watcher.query_watches" @@ -46869,7 +46927,7 @@ "watcher" ], "summary": "Start the watch service", - "description": "Start the Watcher service if it is not already running.", + "description": "Start the Watcher service if it is not already running.\n ##Required authorization\n* Cluster privileges: `manage_watcher`", "operationId": "watcher-start", "parameters": [ { @@ -46916,7 +46974,7 @@ "watcher" ], "summary": "Get Watcher statistics", - "description": "This API always returns basic metrics.\nYou retrieve more metrics by using the metric parameter.", + "description": "This API always returns basic metrics.\nYou retrieve more metrics by using the metric parameter.\n ##Required authorization\n* Cluster privileges: `monitor_watcher`", "operationId": "watcher-stats", "parameters": [ { @@ -46946,7 +47004,7 @@ "watcher" ], "summary": "Get Watcher statistics", - "description": "This API always returns basic metrics.\nYou retrieve more metrics by using the metric parameter.", + "description": "This API always returns basic metrics.\nYou retrieve more metrics by using the metric parameter.\n ##Required authorization\n* Cluster privileges: `monitor_watcher`", "operationId": "watcher-stats-1", "parameters": [ { @@ -46979,7 +47037,7 @@ "watcher" ], "summary": "Stop the watch service", - "description": "Stop the Watcher service if it is running.", + "description": "Stop the Watcher service if it is running.\n ##Required authorization\n* Cluster privileges: `manage_watcher`", "operationId": "watcher-stop", "parameters": [ { @@ -47026,7 +47084,7 @@ "xpack" ], "summary": "Get information", - "description": "The information provided by the API includes:\n\n* Build information including the build number and timestamp.\n* License information about the currently installed license.\n* Feature information for the features that are currently enabled and available under the current license.", + "description": "The information provided by the API includes:\n\n* Build information including the build number and timestamp.\n* License information about the currently installed license.\n* Feature information for the features that are currently enabled and available under the current license.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "xpack-info", "parameters": [ { @@ -47116,7 +47174,7 @@ "xpack" ], "summary": "Get usage information", - "description": "Get information about the features that are currently enabled and available under the current license.\nThe API also provides some usage statistics.", + "description": "Get information about the features that are currently enabled and available under the current license.\nThe API also provides some usage statistics.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "xpack-usage", "parameters": [ { diff --git a/output/openapi/elasticsearch-serverless-openapi.json b/output/openapi/elasticsearch-serverless-openapi.json index 7b45b69d1e..bc25f952ed 100644 --- a/output/openapi/elasticsearch-serverless-openapi.json +++ b/output/openapi/elasticsearch-serverless-openapi.json @@ -127,7 +127,7 @@ "search" ], "summary": "Get the async search status", - "description": "Get the status of a previously submitted async search request given its identifier, without retrieving search results.\nIf the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to:\n\n* The user or API key that submitted the original async search request.\n* Users that have the `monitor` cluster privilege or greater privileges.", + "description": "Get the status of a previously submitted async search request given its identifier, without retrieving search results.\nIf the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to:\n\n* The user or API key that submitted the original async search request.\n* Users that have the `monitor` cluster privilege or greater privileges.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "async-search-status", "parameters": [ { @@ -765,7 +765,7 @@ "cat" ], "summary": "Get aliases", - "description": "Get the cluster's index aliases, including filter and routing information.\nThis API does not return data stream aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.", + "description": "Get the cluster's index aliases, including filter and routing information.\nThis API does not return data stream aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "cat-aliases", "parameters": [ { @@ -801,7 +801,7 @@ "cat" ], "summary": "Get aliases", - "description": "Get the cluster's index aliases, including filter and routing information.\nThis API does not return data stream aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.", + "description": "Get the cluster's index aliases, including filter and routing information.\nThis API does not return data stream aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "cat-aliases-1", "parameters": [ { @@ -840,7 +840,7 @@ "cat" ], "summary": "Get component templates", - "description": "Get information about component templates in a cluster.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the get component template API.", + "description": "Get information about component templates in a cluster.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the get component template API.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "cat-component-templates", "parameters": [ { @@ -876,7 +876,7 @@ "cat" ], "summary": "Get component templates", - "description": "Get information about component templates in a cluster.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the get component template API.", + "description": "Get information about component templates in a cluster.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the get component template API.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "cat-component-templates-1", "parameters": [ { @@ -915,7 +915,7 @@ "cat" ], "summary": "Get a document count", - "description": "Get quick access to a document count for a data stream, an index, or an entire cluster.\nThe document count only includes live documents, not deleted documents which have not yet been removed by the merge process.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the count API.", + "description": "Get quick access to a document count for a data stream, an index, or an entire cluster.\nThe document count only includes live documents, not deleted documents which have not yet been removed by the merge process.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the count API.\n ##Required authorization\n* Index privileges: `read`", "operationId": "cat-count", "parameters": [ { @@ -945,7 +945,7 @@ "cat" ], "summary": "Get a document count", - "description": "Get quick access to a document count for a data stream, an index, or an entire cluster.\nThe document count only includes live documents, not deleted documents which have not yet been removed by the merge process.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the count API.", + "description": "Get quick access to a document count for a data stream, an index, or an entire cluster.\nThe document count only includes live documents, not deleted documents which have not yet been removed by the merge process.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the count API.\n ##Required authorization\n* Index privileges: `read`", "operationId": "cat-count-1", "parameters": [ { @@ -1001,7 +1001,7 @@ "cat" ], "summary": "Get index information", - "description": "Get high-level information about indices in a cluster, including backing indices for data streams.\n\nUse this request to get the following information for each index in a cluster:\n- shard count\n- document count\n- deleted document count\n- primary store size\n- total store size of all shards, including shard replicas\n\nThese metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents.\nTo get an accurate count of Elasticsearch documents, use the cat count or count APIs.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use an index endpoint.", + "description": "Get high-level information about indices in a cluster, including backing indices for data streams.\n\nUse this request to get the following information for each index in a cluster:\n- shard count\n- document count\n- deleted document count\n- primary store size\n- total store size of all shards, including shard replicas\n\nThese metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents.\nTo get an accurate count of Elasticsearch documents, use the cat count or count APIs.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use an index endpoint.\n ##Required authorization\n* Index privileges: `monitor`* Cluster privileges: `monitor`", "operationId": "cat-indices", "parameters": [ { @@ -1052,7 +1052,7 @@ "cat" ], "summary": "Get index information", - "description": "Get high-level information about indices in a cluster, including backing indices for data streams.\n\nUse this request to get the following information for each index in a cluster:\n- shard count\n- document count\n- deleted document count\n- primary store size\n- total store size of all shards, including shard replicas\n\nThese metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents.\nTo get an accurate count of Elasticsearch documents, use the cat count or count APIs.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use an index endpoint.", + "description": "Get high-level information about indices in a cluster, including backing indices for data streams.\n\nUse this request to get the following information for each index in a cluster:\n- shard count\n- document count\n- deleted document count\n- primary store size\n- total store size of all shards, including shard replicas\n\nThese metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents.\nTo get an accurate count of Elasticsearch documents, use the cat count or count APIs.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use an index endpoint.\n ##Required authorization\n* Index privileges: `monitor`* Cluster privileges: `monitor`", "operationId": "cat-indices-1", "parameters": [ { @@ -1106,7 +1106,7 @@ "cat" ], "summary": "Get data frame analytics jobs", - "description": "Get configuration and usage information about data frame analytics jobs.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get data frame analytics jobs statistics API.", + "description": "Get configuration and usage information about data frame analytics jobs.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get data frame analytics jobs statistics API.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "cat-ml-data-frame-analytics", "parameters": [ { @@ -1145,7 +1145,7 @@ "cat" ], "summary": "Get data frame analytics jobs", - "description": "Get configuration and usage information about data frame analytics jobs.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get data frame analytics jobs statistics API.", + "description": "Get configuration and usage information about data frame analytics jobs.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get data frame analytics jobs statistics API.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "cat-ml-data-frame-analytics-1", "parameters": [ { @@ -1187,7 +1187,7 @@ "cat" ], "summary": "Get datafeeds", - "description": "Get configuration and usage information about datafeeds.\nThis API returns a maximum of 10,000 datafeeds.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage`\ncluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get datafeed statistics API.", + "description": "Get configuration and usage information about datafeeds.\nThis API returns a maximum of 10,000 datafeeds.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage`\ncluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get datafeed statistics API.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "cat-ml-datafeeds", "parameters": [ { @@ -1223,7 +1223,7 @@ "cat" ], "summary": "Get datafeeds", - "description": "Get configuration and usage information about datafeeds.\nThis API returns a maximum of 10,000 datafeeds.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage`\ncluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get datafeed statistics API.", + "description": "Get configuration and usage information about datafeeds.\nThis API returns a maximum of 10,000 datafeeds.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage`\ncluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get datafeed statistics API.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "cat-ml-datafeeds-1", "parameters": [ { @@ -1262,7 +1262,7 @@ "cat" ], "summary": "Get anomaly detection jobs", - "description": "Get configuration and usage information for anomaly detection jobs.\nThis API returns a maximum of 10,000 jobs.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`,\n`monitor`, `manage_ml`, or `manage` cluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get anomaly detection job statistics API.", + "description": "Get configuration and usage information for anomaly detection jobs.\nThis API returns a maximum of 10,000 jobs.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`,\n`monitor`, `manage_ml`, or `manage` cluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get anomaly detection job statistics API.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "cat-ml-jobs", "parameters": [ { @@ -1301,7 +1301,7 @@ "cat" ], "summary": "Get anomaly detection jobs", - "description": "Get configuration and usage information for anomaly detection jobs.\nThis API returns a maximum of 10,000 jobs.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`,\n`monitor`, `manage_ml`, or `manage` cluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get anomaly detection job statistics API.", + "description": "Get configuration and usage information for anomaly detection jobs.\nThis API returns a maximum of 10,000 jobs.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`,\n`monitor`, `manage_ml`, or `manage` cluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get anomaly detection job statistics API.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "cat-ml-jobs-1", "parameters": [ { @@ -1343,7 +1343,7 @@ "cat" ], "summary": "Get trained models", - "description": "Get configuration and usage information about inference trained models.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get trained models statistics API.", + "description": "Get configuration and usage information about inference trained models.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get trained models statistics API.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "cat-ml-trained-models", "parameters": [ { @@ -1388,7 +1388,7 @@ "cat" ], "summary": "Get trained models", - "description": "Get configuration and usage information about inference trained models.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get trained models statistics API.", + "description": "Get configuration and usage information about inference trained models.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get trained models statistics API.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "cat-ml-trained-models-1", "parameters": [ { @@ -1436,7 +1436,7 @@ "cat" ], "summary": "Get transform information", - "description": "Get configuration and usage information about transforms.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get transform statistics API.", + "description": "Get configuration and usage information about transforms.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get transform statistics API.\n ##Required authorization\n* Cluster privileges: `monitor_transform`", "operationId": "cat-transforms", "parameters": [ { @@ -1478,7 +1478,7 @@ "cat" ], "summary": "Get transform information", - "description": "Get configuration and usage information about transforms.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get transform statistics API.", + "description": "Get configuration and usage information about transforms.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get transform statistics API.\n ##Required authorization\n* Cluster privileges: `monitor_transform`", "operationId": "cat-transforms-1", "parameters": [ { @@ -1523,7 +1523,7 @@ "search" ], "summary": "Run a scrolling search", - "description": "IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT).\n\nThe scroll API gets large sets of results from a single scrolling search request.\nTo get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter.\nThe `scroll` parameter indicates how long Elasticsearch should retain the search context for the request.\nThe search response returns a scroll ID in the `_scroll_id` response body parameter.\nYou can then use the scroll ID with the scroll API to retrieve the next batch of results for the request.\nIf the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search.\n\nYou can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context.\n\nIMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.", + "description": "IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT).\n\nThe scroll API gets large sets of results from a single scrolling search request.\nTo get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter.\nThe `scroll` parameter indicates how long Elasticsearch should retain the search context for the request.\nThe search response returns a scroll ID in the `_scroll_id` response body parameter.\nYou can then use the scroll ID with the scroll API to retrieve the next batch of results for the request.\nIf the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search.\n\nYou can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context.\n\nIMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/rest-apis/paginate-search-results#scroll-search-results" }, @@ -1560,7 +1560,7 @@ "search" ], "summary": "Run a scrolling search", - "description": "IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT).\n\nThe scroll API gets large sets of results from a single scrolling search request.\nTo get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter.\nThe `scroll` parameter indicates how long Elasticsearch should retain the search context for the request.\nThe search response returns a scroll ID in the `_scroll_id` response body parameter.\nYou can then use the scroll ID with the scroll API to retrieve the next batch of results for the request.\nIf the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search.\n\nYou can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context.\n\nIMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.", + "description": "IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT).\n\nThe scroll API gets large sets of results from a single scrolling search request.\nTo get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter.\nThe `scroll` parameter indicates how long Elasticsearch should retain the search context for the request.\nThe search response returns a scroll ID in the `_scroll_id` response body parameter.\nYou can then use the scroll ID with the scroll API to retrieve the next batch of results for the request.\nIf the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search.\n\nYou can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context.\n\nIMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/rest-apis/paginate-search-results#scroll-search-results" }, @@ -1625,7 +1625,7 @@ "search" ], "summary": "Run a scrolling search", - "description": "IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT).\n\nThe scroll API gets large sets of results from a single scrolling search request.\nTo get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter.\nThe `scroll` parameter indicates how long Elasticsearch should retain the search context for the request.\nThe search response returns a scroll ID in the `_scroll_id` response body parameter.\nYou can then use the scroll ID with the scroll API to retrieve the next batch of results for the request.\nIf the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search.\n\nYou can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context.\n\nIMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.", + "description": "IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT).\n\nThe scroll API gets large sets of results from a single scrolling search request.\nTo get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter.\nThe `scroll` parameter indicates how long Elasticsearch should retain the search context for the request.\nThe search response returns a scroll ID in the `_scroll_id` response body parameter.\nYou can then use the scroll ID with the scroll API to retrieve the next batch of results for the request.\nIf the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search.\n\nYou can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context.\n\nIMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/rest-apis/paginate-search-results#scroll-search-results" }, @@ -1665,7 +1665,7 @@ "search" ], "summary": "Run a scrolling search", - "description": "IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT).\n\nThe scroll API gets large sets of results from a single scrolling search request.\nTo get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter.\nThe `scroll` parameter indicates how long Elasticsearch should retain the search context for the request.\nThe search response returns a scroll ID in the `_scroll_id` response body parameter.\nYou can then use the scroll ID with the scroll API to retrieve the next batch of results for the request.\nIf the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search.\n\nYou can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context.\n\nIMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.", + "description": "IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT).\n\nThe scroll API gets large sets of results from a single scrolling search request.\nTo get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter.\nThe `scroll` parameter indicates how long Elasticsearch should retain the search context for the request.\nThe search response returns a scroll ID in the `_scroll_id` response body parameter.\nYou can then use the scroll ID with the scroll API to retrieve the next batch of results for the request.\nIf the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search.\n\nYou can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context.\n\nIMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/rest-apis/paginate-search-results#scroll-search-results" }, @@ -1810,7 +1810,7 @@ "indices" ], "summary": "Get component templates", - "description": "Get information about component templates.", + "description": "Get information about component templates.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "cluster-get-component-template-1", "parameters": [ { @@ -1841,7 +1841,7 @@ "indices" ], "summary": "Create or update a component template", - "description": "Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nAn index template can be composed of multiple component templates.\nTo use a component template, specify it in an index template’s `composed_of` list.\nComponent templates are only applied to new data streams and indices as part of a matching index template.\n\nSettings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.\n\nComponent templates are only used during index creation.\nFor data streams, this includes data stream creation and the creation of a stream’s backing indices.\nChanges to component templates do not affect existing indices, including a stream’s backing indices.\n\nYou can use C-style `/* *\\/` block comments in component templates.\nYou can include comments anywhere in the request body except before the opening curly bracket.\n\n**Applying component templates**\n\nYou cannot directly apply a component template to a data stream or index.\nTo be applied, a component template must be included in an index template's `composed_of` list.", + "description": "Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nAn index template can be composed of multiple component templates.\nTo use a component template, specify it in an index template’s `composed_of` list.\nComponent templates are only applied to new data streams and indices as part of a matching index template.\n\nSettings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.\n\nComponent templates are only used during index creation.\nFor data streams, this includes data stream creation and the creation of a stream’s backing indices.\nChanges to component templates do not affect existing indices, including a stream’s backing indices.\n\nYou can use C-style `/* *\\/` block comments in component templates.\nYou can include comments anywhere in the request body except before the opening curly bracket.\n\n**Applying component templates**\n\nYou cannot directly apply a component template to a data stream or index.\nTo be applied, a component template must be included in an index template's `composed_of` list.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "cluster-put-component-template", "parameters": [ { @@ -1875,7 +1875,7 @@ "indices" ], "summary": "Create or update a component template", - "description": "Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nAn index template can be composed of multiple component templates.\nTo use a component template, specify it in an index template’s `composed_of` list.\nComponent templates are only applied to new data streams and indices as part of a matching index template.\n\nSettings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.\n\nComponent templates are only used during index creation.\nFor data streams, this includes data stream creation and the creation of a stream’s backing indices.\nChanges to component templates do not affect existing indices, including a stream’s backing indices.\n\nYou can use C-style `/* *\\/` block comments in component templates.\nYou can include comments anywhere in the request body except before the opening curly bracket.\n\n**Applying component templates**\n\nYou cannot directly apply a component template to a data stream or index.\nTo be applied, a component template must be included in an index template's `composed_of` list.", + "description": "Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nAn index template can be composed of multiple component templates.\nTo use a component template, specify it in an index template’s `composed_of` list.\nComponent templates are only applied to new data streams and indices as part of a matching index template.\n\nSettings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.\n\nComponent templates are only used during index creation.\nFor data streams, this includes data stream creation and the creation of a stream’s backing indices.\nChanges to component templates do not affect existing indices, including a stream’s backing indices.\n\nYou can use C-style `/* *\\/` block comments in component templates.\nYou can include comments anywhere in the request body except before the opening curly bracket.\n\n**Applying component templates**\n\nYou cannot directly apply a component template to a data stream or index.\nTo be applied, a component template must be included in an index template's `composed_of` list.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "cluster-put-component-template-1", "parameters": [ { @@ -1909,7 +1909,7 @@ "indices" ], "summary": "Delete component templates", - "description": "Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.", + "description": "Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "cluster-delete-component-template", "parameters": [ { @@ -2015,7 +2015,7 @@ "indices" ], "summary": "Get component templates", - "description": "Get information about component templates.", + "description": "Get information about component templates.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "cluster-get-component-template", "parameters": [ { @@ -3739,7 +3739,7 @@ "search" ], "summary": "Count search results", - "description": "Get the number of documents matching a query.\n\nThe query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body.\nThe query is optional. When no query is provided, the API uses `match_all` to count all the documents.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.", + "description": "Get the number of documents matching a query.\n\nThe query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body.\nThe query is optional. When no query is provided, the API uses `match_all` to count all the documents.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.\n ##Required authorization\n* Index privileges: `read`", "operationId": "count-1", "parameters": [ { @@ -3806,7 +3806,7 @@ "search" ], "summary": "Count search results", - "description": "Get the number of documents matching a query.\n\nThe query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body.\nThe query is optional. When no query is provided, the API uses `match_all` to count all the documents.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.", + "description": "Get the number of documents matching a query.\n\nThe query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body.\nThe query is optional. When no query is provided, the API uses `match_all` to count all the documents.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.\n ##Required authorization\n* Index privileges: `read`", "operationId": "count", "parameters": [ { @@ -3875,7 +3875,7 @@ "search" ], "summary": "Count search results", - "description": "Get the number of documents matching a query.\n\nThe query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body.\nThe query is optional. When no query is provided, the API uses `match_all` to count all the documents.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.", + "description": "Get the number of documents matching a query.\n\nThe query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body.\nThe query is optional. When no query is provided, the API uses `match_all` to count all the documents.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.\n ##Required authorization\n* Index privileges: `read`", "operationId": "count-3", "parameters": [ { @@ -3945,7 +3945,7 @@ "search" ], "summary": "Count search results", - "description": "Get the number of documents matching a query.\n\nThe query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body.\nThe query is optional. When no query is provided, the API uses `match_all` to count all the documents.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.", + "description": "Get the number of documents matching a query.\n\nThe query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body.\nThe query is optional. When no query is provided, the API uses `match_all` to count all the documents.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.\n ##Required authorization\n* Index privileges: `read`", "operationId": "count-2", "parameters": [ { @@ -4017,7 +4017,7 @@ "document" ], "summary": "Create a new document in the index", - "description": "You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs\nUsing `_create` guarantees that the document is indexed only if it does not already exist.\nIt returns a 409 response when a document with a same ID already exists in the index.\nTo update an existing document, you must use the `//_doc/` API.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.", + "description": "You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs\nUsing `_create` guarantees that the document is indexed only if it does not already exist.\nIt returns a 409 response when a document with a same ID already exists in the index.\nTo update an existing document, you must use the `//_doc/` API.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n ##Required authorization\n* Index privileges: `create`", "externalDocs": { "url": "https://www.elastic.co/docs/manage-data/data-store/data-streams" }, @@ -4090,7 +4090,7 @@ "document" ], "summary": "Create a new document in the index", - "description": "You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs\nUsing `_create` guarantees that the document is indexed only if it does not already exist.\nIt returns a 409 response when a document with a same ID already exists in the index.\nTo update an existing document, you must use the `//_doc/` API.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.", + "description": "You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs\nUsing `_create` guarantees that the document is indexed only if it does not already exist.\nIt returns a 409 response when a document with a same ID already exists in the index.\nTo update an existing document, you must use the `//_doc/` API.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n ##Required authorization\n* Index privileges: `create`", "externalDocs": { "url": "https://www.elastic.co/docs/manage-data/data-store/data-streams" }, @@ -4165,7 +4165,7 @@ "document" ], "summary": "Get a document by its ID", - "description": "Get a document and its source or stored fields from an index.\n\nBy default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search).\nIn the case where stored fields are requested with the `stored_fields` parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields.\nTo turn off realtime behavior, set the `realtime` parameter to false.\n\n**Source filtering**\n\nBy default, the API returns the contents of the `_source` field unless you have used the `stored_fields` parameter or the `_source` field is turned off.\nYou can turn off `_source` retrieval by using the `_source` parameter:\n\n```\nGET my-index-000001/_doc/0?_source=false\n```\n\nIf you only need one or two fields from the `_source`, use the `_source_includes` or `_source_excludes` parameters to include or filter out particular fields.\nThis can be helpful with large documents where partial retrieval can save on network overhead\nBoth parameters take a comma separated list of fields or wildcard expressions.\nFor example:\n\n```\nGET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities\n```\n\nIf you only want to specify includes, you can use a shorter notation:\n\n```\nGET my-index-000001/_doc/0?_source=*.id\n```\n\n**Routing**\n\nIf routing is used during indexing, the routing value also needs to be specified to retrieve a document.\nFor example:\n\n```\nGET my-index-000001/_doc/2?routing=user1\n```\n\nThis request gets the document with ID 2, but it is routed based on the user.\nThe document is not fetched if the correct routing is not specified.\n\n**Distributed**\n\nThe GET operation is hashed into a specific shard ID.\nIt is then redirected to one of the replicas within that shard ID and returns the result.\nThe replicas are the primary shard and its replicas within that shard ID group.\nThis means that the more replicas you have, the better your GET scaling will be.\n\n**Versioning support**\n\nYou can use the `version` parameter to retrieve the document only if its current version is equal to the specified one.\n\nInternally, Elasticsearch has marked the old document as deleted and added an entirely new document.\nThe old version of the document doesn't disappear immediately, although you won't be able to access it.\nElasticsearch cleans up deleted documents in the background as you continue to index more data.", + "description": "Get a document and its source or stored fields from an index.\n\nBy default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search).\nIn the case where stored fields are requested with the `stored_fields` parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields.\nTo turn off realtime behavior, set the `realtime` parameter to false.\n\n**Source filtering**\n\nBy default, the API returns the contents of the `_source` field unless you have used the `stored_fields` parameter or the `_source` field is turned off.\nYou can turn off `_source` retrieval by using the `_source` parameter:\n\n```\nGET my-index-000001/_doc/0?_source=false\n```\n\nIf you only need one or two fields from the `_source`, use the `_source_includes` or `_source_excludes` parameters to include or filter out particular fields.\nThis can be helpful with large documents where partial retrieval can save on network overhead\nBoth parameters take a comma separated list of fields or wildcard expressions.\nFor example:\n\n```\nGET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities\n```\n\nIf you only want to specify includes, you can use a shorter notation:\n\n```\nGET my-index-000001/_doc/0?_source=*.id\n```\n\n**Routing**\n\nIf routing is used during indexing, the routing value also needs to be specified to retrieve a document.\nFor example:\n\n```\nGET my-index-000001/_doc/2?routing=user1\n```\n\nThis request gets the document with ID 2, but it is routed based on the user.\nThe document is not fetched if the correct routing is not specified.\n\n**Distributed**\n\nThe GET operation is hashed into a specific shard ID.\nIt is then redirected to one of the replicas within that shard ID and returns the result.\nThe replicas are the primary shard and its replicas within that shard ID group.\nThis means that the more replicas you have, the better your GET scaling will be.\n\n**Versioning support**\n\nYou can use the `version` parameter to retrieve the document only if its current version is equal to the specified one.\n\nInternally, Elasticsearch has marked the old document as deleted and added an entirely new document.\nThe old version of the document doesn't disappear immediately, although you won't be able to access it.\nElasticsearch cleans up deleted documents in the background as you continue to index more data.\n ##Required authorization\n* Index privileges: `read`", "operationId": "get", "parameters": [ { @@ -4333,7 +4333,7 @@ "document" ], "summary": "Create or update a document in an index", - "description": "Add a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.", + "description": "Add a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.\n ##Required authorization\n* Index privileges: `index`", "externalDocs": { "url": "https://www.elastic.co/docs/manage-data/data-store/data-streams" }, @@ -4403,7 +4403,7 @@ "document" ], "summary": "Create or update a document in an index", - "description": "Add a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.", + "description": "Add a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.\n ##Required authorization\n* Index privileges: `index`", "externalDocs": { "url": "https://www.elastic.co/docs/manage-data/data-store/data-streams" }, @@ -4473,7 +4473,7 @@ "document" ], "summary": "Delete a document", - "description": "Remove a JSON document from the specified index.\n\nNOTE: You cannot send deletion requests directly to a data stream.\nTo delete a document in a data stream, you must target the backing index containing the document.\n\n**Optimistic concurrency control**\n\nDelete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Versioning**\n\nEach document indexed is versioned.\nWhen deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime.\nEvery write operation run on a document, deletes included, causes its version to be incremented.\nThe version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations.\nThe length of time for which a deleted document's version remains available is determined by the `index.gc_deletes` index setting.\n\n**Routing**\n\nIf routing is used during indexing, the routing value also needs to be specified to delete a document.\n\nIf the `_routing` mapping is set to `required` and no routing value is specified, the delete API throws a `RoutingMissingException` and rejects the request.\n\nFor example:\n\n```\nDELETE /my-index-000001/_doc/1?routing=shard-1\n```\n\nThis request deletes the document with ID 1, but it is routed based on the user.\nThe document is not deleted if the correct routing is not specified.\n\n**Distributed**\n\nThe delete operation gets hashed into a specific shard ID.\nIt then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group.", + "description": "Remove a JSON document from the specified index.\n\nNOTE: You cannot send deletion requests directly to a data stream.\nTo delete a document in a data stream, you must target the backing index containing the document.\n\n**Optimistic concurrency control**\n\nDelete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Versioning**\n\nEach document indexed is versioned.\nWhen deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime.\nEvery write operation run on a document, deletes included, causes its version to be incremented.\nThe version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations.\nThe length of time for which a deleted document's version remains available is determined by the `index.gc_deletes` index setting.\n\n**Routing**\n\nIf routing is used during indexing, the routing value also needs to be specified to delete a document.\n\nIf the `_routing` mapping is set to `required` and no routing value is specified, the delete API throws a `RoutingMissingException` and rejects the request.\n\nFor example:\n\n```\nDELETE /my-index-000001/_doc/1?routing=shard-1\n```\n\nThis request deletes the document with ID 1, but it is routed based on the user.\nThe document is not deleted if the correct routing is not specified.\n\n**Distributed**\n\nThe delete operation gets hashed into a specific shard ID.\nIt then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group.\n ##Required authorization\n* Index privileges: `delete`", "operationId": "delete", "parameters": [ { @@ -4753,7 +4753,7 @@ "document" ], "summary": "Delete documents", - "description": "Deletes documents that match the specified query.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:\n\n* `read`\n* `delete` or `write`\n\nYou can specify the query criteria in the request URI or the request body using the same syntax as the search API.\nWhen you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning.\nIf a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails.\n\nNOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number.\n\nWhile processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete.\nA bulk delete request is performed for each batch of matching documents.\nIf a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off.\nIf the maximum retry limit is reached, processing halts and all failed requests are returned in the response.\nAny delete requests that completed successfully still stick, they are not rolled back.\n\nYou can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`.\nNote that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query.\n\n**Throttling delete requests**\n\nTo control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number.\nThis pads each batch with a wait time to throttle the rate.\nSet `requests_per_second` to `-1` to disable throttling.\n\nThrottling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is `1000`, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nDelete by query supports sliced scroll to parallelize the delete process.\nThis can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nSetting `slices` to `auto` lets Elasticsearch choose the number of slices to use.\nThis setting will use one slice per shard, up to a certain limit.\nIf there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.\nAdding slices to the delete by query operation creates sub-requests which means it has some quirks:\n\n* You can see these requests in the tasks APIs. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with slices only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with `slices` will cancel each sub-request.\n* Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being deleted.\n* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.\n\nIf you're slicing manually or otherwise tuning automatic slicing, keep in mind that:\n\n* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many `slices` hurts performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead.\n* Delete performance scales linearly across available resources with the number of slices.\n\nWhether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Cancel a delete by query operation**\n\nAny delete by query can be canceled using the task cancel API. For example:\n\n```\nPOST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel\n```\n\nThe task ID can be found by using the get tasks API.\n\nCancellation should happen quickly but might take a few seconds.\nThe get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself.", + "description": "Deletes documents that match the specified query.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:\n\n* `read`\n* `delete` or `write`\n\nYou can specify the query criteria in the request URI or the request body using the same syntax as the search API.\nWhen you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning.\nIf a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails.\n\nNOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number.\n\nWhile processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete.\nA bulk delete request is performed for each batch of matching documents.\nIf a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off.\nIf the maximum retry limit is reached, processing halts and all failed requests are returned in the response.\nAny delete requests that completed successfully still stick, they are not rolled back.\n\nYou can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`.\nNote that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query.\n\n**Throttling delete requests**\n\nTo control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number.\nThis pads each batch with a wait time to throttle the rate.\nSet `requests_per_second` to `-1` to disable throttling.\n\nThrottling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is `1000`, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nDelete by query supports sliced scroll to parallelize the delete process.\nThis can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nSetting `slices` to `auto` lets Elasticsearch choose the number of slices to use.\nThis setting will use one slice per shard, up to a certain limit.\nIf there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.\nAdding slices to the delete by query operation creates sub-requests which means it has some quirks:\n\n* You can see these requests in the tasks APIs. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with slices only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with `slices` will cancel each sub-request.\n* Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being deleted.\n* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.\n\nIf you're slicing manually or otherwise tuning automatic slicing, keep in mind that:\n\n* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many `slices` hurts performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead.\n* Delete performance scales linearly across available resources with the number of slices.\n\nWhether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Cancel a delete by query operation**\n\nAny delete by query can be canceled using the task cancel API. For example:\n\n```\nPOST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel\n```\n\nThe task ID can be found by using the get tasks API.\n\nCancellation should happen quickly but might take a few seconds.\nThe get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself.\n ##Required authorization\n* Index privileges: `read`,`delete`", "operationId": "delete-by-query", "parameters": [ { @@ -5202,7 +5202,7 @@ "script" ], "summary": "Get a script or search template", - "description": "Retrieves a stored script or search template.", + "description": "Retrieves a stored script or search template.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "get-script", "parameters": [ { @@ -5261,7 +5261,7 @@ "script" ], "summary": "Create or update a script or search template", - "description": "Creates or updates a stored script or search template.", + "description": "Creates or updates a stored script or search template.\n ##Required authorization\n* Cluster privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/search-templates" }, @@ -5301,7 +5301,7 @@ "script" ], "summary": "Create or update a script or search template", - "description": "Creates or updates a stored script or search template.", + "description": "Creates or updates a stored script or search template.\n ##Required authorization\n* Cluster privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/search-templates" }, @@ -5341,7 +5341,7 @@ "script" ], "summary": "Delete a script or search template", - "description": "Deletes a stored script or search template.", + "description": "Deletes a stored script or search template.\n ##Required authorization\n* Cluster privileges: `manage`", "operationId": "delete-script", "parameters": [ { @@ -5887,7 +5887,7 @@ "esql" ], "summary": "Get a specific running ES|QL query information", - "description": "Returns an object extended information about a running ES|QL query.", + "description": "Returns an object extended information about a running ES|QL query.\n ##Required authorization\n* Cluster privileges: `monitor_esql`", "operationId": "esql-get-query", "parameters": [ { @@ -5958,7 +5958,7 @@ "esql" ], "summary": "Get running ES|QL queries information", - "description": "Returns an object containing IDs and other information about the running ES|QL queries.", + "description": "Returns an object containing IDs and other information about the running ES|QL queries.\n ##Required authorization\n* Cluster privileges: `monitor_esql`", "operationId": "esql-list-queries", "responses": { "200": { @@ -6126,7 +6126,7 @@ "document" ], "summary": "Get a document's source", - "description": "Get the source of a document.\nFor example:\n\n```\nGET my-index-000001/_source/1\n```\n\nYou can use the source filtering parameters to control which parts of the `_source` are returned:\n\n```\nGET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities\n```", + "description": "Get the source of a document.\nFor example:\n\n```\nGET my-index-000001/_source/1\n```\n\nYou can use the source filtering parameters to control which parts of the `_source` are returned:\n\n```\nGET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities\n```\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/mapping-reference/mapping-source-field" }, @@ -6274,7 +6274,7 @@ "document" ], "summary": "Check for a document source", - "description": "Check whether a document source exists in an index.\nFor example:\n\n```\nHEAD my-index-000001/_source/1\n```\n\nA document's source is not available if it is disabled in the mapping.", + "description": "Check whether a document source exists in an index.\nFor example:\n\n```\nHEAD my-index-000001/_source/1\n```\n\nA document's source is not available if it is disabled in the mapping.\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/mapping-reference/mapping-source-field" }, @@ -6410,7 +6410,7 @@ "search" ], "summary": "Explain a document match result", - "description": "Get information about why a specific document matches, or doesn't match, a query.\nIt computes a score explanation for a query and a specific document.", + "description": "Get information about why a specific document matches, or doesn't match, a query.\nIt computes a score explanation for a query and a specific document.\n ##Required authorization\n* Index privileges: `read`", "operationId": "explain", "parameters": [ { @@ -6477,7 +6477,7 @@ "search" ], "summary": "Explain a document match result", - "description": "Get information about why a specific document matches, or doesn't match, a query.\nIt computes a score explanation for a query and a specific document.", + "description": "Get information about why a specific document matches, or doesn't match, a query.\nIt computes a score explanation for a query and a specific document.\n ##Required authorization\n* Index privileges: `read`", "operationId": "explain-1", "parameters": [ { @@ -6546,7 +6546,7 @@ "search" ], "summary": "Get the field capabilities", - "description": "Get information about the capabilities of fields among multiple indices.\n\nFor data streams, the API returns field capabilities among the stream’s backing indices.\nIt returns runtime fields like any other field.\nFor example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family.", + "description": "Get information about the capabilities of fields among multiple indices.\n\nFor data streams, the API returns field capabilities among the stream’s backing indices.\nIt returns runtime fields like any other field.\nFor example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family.\n ##Required authorization\n* Index privileges: `view_index_metadata`,`read`", "operationId": "field-caps", "parameters": [ { @@ -6595,7 +6595,7 @@ "search" ], "summary": "Get the field capabilities", - "description": "Get information about the capabilities of fields among multiple indices.\n\nFor data streams, the API returns field capabilities among the stream’s backing indices.\nIt returns runtime fields like any other field.\nFor example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family.", + "description": "Get information about the capabilities of fields among multiple indices.\n\nFor data streams, the API returns field capabilities among the stream’s backing indices.\nIt returns runtime fields like any other field.\nFor example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family.\n ##Required authorization\n* Index privileges: `view_index_metadata`,`read`", "operationId": "field-caps-1", "parameters": [ { @@ -6646,7 +6646,7 @@ "search" ], "summary": "Get the field capabilities", - "description": "Get information about the capabilities of fields among multiple indices.\n\nFor data streams, the API returns field capabilities among the stream’s backing indices.\nIt returns runtime fields like any other field.\nFor example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family.", + "description": "Get information about the capabilities of fields among multiple indices.\n\nFor data streams, the API returns field capabilities among the stream’s backing indices.\nIt returns runtime fields like any other field.\nFor example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family.\n ##Required authorization\n* Index privileges: `view_index_metadata`,`read`", "operationId": "field-caps-2", "parameters": [ { @@ -6698,7 +6698,7 @@ "search" ], "summary": "Get the field capabilities", - "description": "Get information about the capabilities of fields among multiple indices.\n\nFor data streams, the API returns field capabilities among the stream’s backing indices.\nIt returns runtime fields like any other field.\nFor example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family.", + "description": "Get information about the capabilities of fields among multiple indices.\n\nFor data streams, the API returns field capabilities among the stream’s backing indices.\nIt returns runtime fields like any other field.\nFor example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family.\n ##Required authorization\n* Index privileges: `view_index_metadata`,`read`", "operationId": "field-caps-3", "parameters": [ { @@ -6828,7 +6828,7 @@ "document" ], "summary": "Create or update a document in an index", - "description": "Add a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.", + "description": "Add a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.\n ##Required authorization\n* Index privileges: `index`", "externalDocs": { "url": "https://www.elastic.co/docs/manage-data/data-store/data-streams" }, @@ -7025,7 +7025,7 @@ "indices" ], "summary": "Get tokens from text analysis", - "description": "The analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.", + "description": "The analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.\n ##Required authorization\n* Index privileges: `index`", "externalDocs": { "url": "https://www.elastic.co/docs/manage-data/data-store/text-analysis" }, @@ -7056,7 +7056,7 @@ "indices" ], "summary": "Get tokens from text analysis", - "description": "The analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.", + "description": "The analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.\n ##Required authorization\n* Index privileges: `index`", "externalDocs": { "url": "https://www.elastic.co/docs/manage-data/data-store/text-analysis" }, @@ -7089,7 +7089,7 @@ "indices" ], "summary": "Get tokens from text analysis", - "description": "The analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.", + "description": "The analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.\n ##Required authorization\n* Index privileges: `index`", "externalDocs": { "url": "https://www.elastic.co/docs/manage-data/data-store/text-analysis" }, @@ -7123,7 +7123,7 @@ "indices" ], "summary": "Get tokens from text analysis", - "description": "The analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.", + "description": "The analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.\n ##Required authorization\n* Index privileges: `index`", "externalDocs": { "url": "https://www.elastic.co/docs/manage-data/data-store/text-analysis" }, @@ -7159,7 +7159,7 @@ "indices" ], "summary": "Get index information", - "description": "Get information about one or more indices. For data streams, the API returns information about the\nstream’s backing indices.", + "description": "Get information about one or more indices. For data streams, the API returns information about the\nstream’s backing indices.\n ##Required authorization\n* Index privileges: `view_index_metadata`,`manage`", "operationId": "indices-get", "parameters": [ { @@ -7276,7 +7276,7 @@ "indices" ], "summary": "Create an index", - "description": "You can use the create index API to add a new index to an Elasticsearch cluster.\nWhen creating an index, you can specify the following:\n\n* Settings for the index.\n* Mappings for fields in the index.\n* Index aliases\n\n**Wait for active shards**\n\nBy default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out.\nThe index creation response will indicate what happened.\nFor example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out.\nNote that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful.\nThese values simply indicate whether the operation completed before the timeout.\nIf `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon.\nIf `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`).\n\nYou can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`.\nNote that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations.", + "description": "You can use the create index API to add a new index to an Elasticsearch cluster.\nWhen creating an index, you can specify the following:\n\n* Settings for the index.\n* Mappings for fields in the index.\n* Index aliases\n\n**Wait for active shards**\n\nBy default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out.\nThe index creation response will indicate what happened.\nFor example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out.\nNote that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful.\nThese values simply indicate whether the operation completed before the timeout.\nIf `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon.\nIf `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`).\n\nYou can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`.\nNote that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations.\n ##Required authorization\n* Index privileges: `create_index`,`manage`", "operationId": "indices-create", "parameters": [ { @@ -7403,7 +7403,7 @@ "indices" ], "summary": "Delete indices", - "description": "Deleting an index deletes its documents, shards, and metadata.\nIt does not delete related Kibana components, such as data views, visualizations, or dashboards.\n\nYou cannot delete the current write index of a data stream.\nTo delete the index, you must roll over the data stream so a new write index is created.\nYou can then use the delete index API to delete the previous write index.", + "description": "Deleting an index deletes its documents, shards, and metadata.\nIt does not delete related Kibana components, such as data views, visualizations, or dashboards.\n\nYou cannot delete the current write index of a data stream.\nTo delete the index, you must roll over the data stream so a new write index is created.\nYou can then use the delete index API to delete the previous write index.\n ##Required authorization\n* Index privileges: `delete_index`", "operationId": "indices-delete", "parameters": [ { @@ -7579,7 +7579,7 @@ "data stream" ], "summary": "Get data streams", - "description": "Get information about one or more data streams.", + "description": "Get information about one or more data streams.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-data-stream-1", "parameters": [ { @@ -7610,7 +7610,7 @@ "data stream" ], "summary": "Create a data stream", - "description": "You must have a matching index template with data stream enabled.", + "description": "You must have a matching index template with data stream enabled.\n ##Required authorization\n* Index privileges: `create_index`", "operationId": "indices-create-data-stream", "parameters": [ { @@ -7664,7 +7664,7 @@ "data stream" ], "summary": "Delete data streams", - "description": "Deletes one or more data streams and their backing indices.", + "description": "Deletes one or more data streams and their backing indices.\n ##Required authorization\n* Index privileges: `delete_index`", "operationId": "indices-delete-data-stream", "parameters": [ { @@ -7720,7 +7720,7 @@ "indices" ], "summary": "Get aliases", - "description": "Retrieves information for one or more data stream or index aliases.", + "description": "Retrieves information for one or more data stream or index aliases.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-alias-2", "parameters": [ { @@ -7828,7 +7828,7 @@ "indices" ], "summary": "Delete an alias", - "description": "Removes a data stream or index from an alias.", + "description": "Removes a data stream or index from an alias.\n ##Required authorization\n* Index privileges: `manage`", "operationId": "indices-delete-alias", "parameters": [ { @@ -7966,7 +7966,7 @@ "indices" ], "summary": "Delete an alias", - "description": "Removes a data stream or index from an alias.", + "description": "Removes a data stream or index from an alias.\n ##Required authorization\n* Index privileges: `manage`", "operationId": "indices-delete-alias-1", "parameters": [ { @@ -7996,7 +7996,7 @@ "indices" ], "summary": "Get index templates", - "description": "Get information about one or more index templates.", + "description": "Get information about one or more index templates.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "indices-get-index-template-1", "parameters": [ { @@ -8027,7 +8027,7 @@ "indices" ], "summary": "Create or update an index template", - "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.\n\nElasticsearch applies templates to new indices based on an wildcard pattern that matches the index name.\nIndex templates are applied during data stream or index creation.\nFor data streams, these settings and mappings are applied when the stream's backing indices are created.\nSettings and mappings specified in a create index API request override any settings or mappings specified in an index template.\nChanges to index templates do not affect existing indices, including the existing backing indices of a data stream.\n\nYou can use C-style `/* *\\/` block comments in index templates.\nYou can include comments anywhere in the request body, except before the opening curly bracket.\n\n**Multiple matching templates**\n\nIf multiple index templates match the name of a new index or data stream, the template with the highest priority is used.\n\nMultiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities.\n\n**Composing aliases, mappings, and settings**\n\nWhen multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates.\nAny mappings, settings, or aliases from the parent index template are merged in next.\nFinally, any configuration on the index request itself is merged.\nMapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration.\nIf a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one.\nThis recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`.\nIf an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end.\nIf an entry already exists with the same key, then it is overwritten by the new definition.", + "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.\n\nElasticsearch applies templates to new indices based on an wildcard pattern that matches the index name.\nIndex templates are applied during data stream or index creation.\nFor data streams, these settings and mappings are applied when the stream's backing indices are created.\nSettings and mappings specified in a create index API request override any settings or mappings specified in an index template.\nChanges to index templates do not affect existing indices, including the existing backing indices of a data stream.\n\nYou can use C-style `/* *\\/` block comments in index templates.\nYou can include comments anywhere in the request body, except before the opening curly bracket.\n\n**Multiple matching templates**\n\nIf multiple index templates match the name of a new index or data stream, the template with the highest priority is used.\n\nMultiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities.\n\n**Composing aliases, mappings, and settings**\n\nWhen multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates.\nAny mappings, settings, or aliases from the parent index template are merged in next.\nFinally, any configuration on the index request itself is merged.\nMapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration.\nIf a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one.\nThis recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`.\nIf an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end.\nIf an entry already exists with the same key, then it is overwritten by the new definition.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "indices-put-index-template", "parameters": [ { @@ -8064,7 +8064,7 @@ "indices" ], "summary": "Create or update an index template", - "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.\n\nElasticsearch applies templates to new indices based on an wildcard pattern that matches the index name.\nIndex templates are applied during data stream or index creation.\nFor data streams, these settings and mappings are applied when the stream's backing indices are created.\nSettings and mappings specified in a create index API request override any settings or mappings specified in an index template.\nChanges to index templates do not affect existing indices, including the existing backing indices of a data stream.\n\nYou can use C-style `/* *\\/` block comments in index templates.\nYou can include comments anywhere in the request body, except before the opening curly bracket.\n\n**Multiple matching templates**\n\nIf multiple index templates match the name of a new index or data stream, the template with the highest priority is used.\n\nMultiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities.\n\n**Composing aliases, mappings, and settings**\n\nWhen multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates.\nAny mappings, settings, or aliases from the parent index template are merged in next.\nFinally, any configuration on the index request itself is merged.\nMapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration.\nIf a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one.\nThis recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`.\nIf an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end.\nIf an entry already exists with the same key, then it is overwritten by the new definition.", + "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.\n\nElasticsearch applies templates to new indices based on an wildcard pattern that matches the index name.\nIndex templates are applied during data stream or index creation.\nFor data streams, these settings and mappings are applied when the stream's backing indices are created.\nSettings and mappings specified in a create index API request override any settings or mappings specified in an index template.\nChanges to index templates do not affect existing indices, including the existing backing indices of a data stream.\n\nYou can use C-style `/* *\\/` block comments in index templates.\nYou can include comments anywhere in the request body, except before the opening curly bracket.\n\n**Multiple matching templates**\n\nIf multiple index templates match the name of a new index or data stream, the template with the highest priority is used.\n\nMultiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities.\n\n**Composing aliases, mappings, and settings**\n\nWhen multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates.\nAny mappings, settings, or aliases from the parent index template are merged in next.\nFinally, any configuration on the index request itself is merged.\nMapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration.\nIf a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one.\nThis recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`.\nIf an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end.\nIf an entry already exists with the same key, then it is overwritten by the new definition.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "indices-put-index-template-1", "parameters": [ { @@ -8101,7 +8101,7 @@ "indices" ], "summary": "Delete an index template", - "description": "The provided may contain multiple template names separated by a comma. If multiple template\nnames are specified then there is no wildcard support and the provided names should match completely with\nexisting templates.", + "description": "The provided may contain multiple template names separated by a comma. If multiple template\nnames are specified then there is no wildcard support and the provided names should match completely with\nexisting templates.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "indices-delete-index-template", "parameters": [ { @@ -8217,7 +8217,7 @@ "indices" ], "summary": "Get aliases", - "description": "Retrieves information for one or more data stream or index aliases.", + "description": "Retrieves information for one or more data stream or index aliases.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-alias-1", "parameters": [ { @@ -8366,7 +8366,7 @@ "indices" ], "summary": "Get aliases", - "description": "Retrieves information for one or more data stream or index aliases.", + "description": "Retrieves information for one or more data stream or index aliases.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-alias", "parameters": [ { @@ -8396,7 +8396,7 @@ "indices" ], "summary": "Get aliases", - "description": "Retrieves information for one or more data stream or index aliases.", + "description": "Retrieves information for one or more data stream or index aliases.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-alias-3", "parameters": [ { @@ -8626,7 +8626,7 @@ "data stream" ], "summary": "Get data streams", - "description": "Get information about one or more data streams.", + "description": "Get information about one or more data streams.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-data-stream", "parameters": [ { @@ -8801,7 +8801,7 @@ "data stream" ], "summary": "Get data stream settings", - "description": "Get setting information for one or more data streams.", + "description": "Get setting information for one or more data streams.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-data-stream-settings", "parameters": [ { @@ -8863,7 +8863,7 @@ "data stream" ], "summary": "Update data stream settings", - "description": "This API can be used to override settings on specific data streams. These overrides will take precedence over what\nis specified in the template that the data stream matches. To prevent your data stream from getting into an invalid state,\nonly certain settings are allowed. If possible, the setting change is applied to all\nbacking indices. Otherwise, it will be applied when the data stream is next rolled over.", + "description": "This API can be used to override settings on specific data streams. These overrides will take precedence over what\nis specified in the template that the data stream matches. To prevent your data stream from getting into an invalid state,\nonly certain settings are allowed. If possible, the setting change is applied to all\nbacking indices. Otherwise, it will be applied when the data stream is next rolled over.\n ##Required authorization\n* Index privileges: `manage`", "operationId": "indices-put-data-stream-settings", "parameters": [ { @@ -8974,7 +8974,7 @@ "indices" ], "summary": "Get index templates", - "description": "Get information about one or more index templates.", + "description": "Get information about one or more index templates.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "indices-get-index-template", "parameters": [ { @@ -9004,7 +9004,7 @@ "indices" ], "summary": "Get mapping definitions", - "description": "For data streams, the API retrieves mappings for the stream’s backing indices.", + "description": "For data streams, the API retrieves mappings for the stream’s backing indices.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-mapping", "parameters": [ { @@ -9037,7 +9037,7 @@ "indices" ], "summary": "Get mapping definitions", - "description": "For data streams, the API retrieves mappings for the stream’s backing indices.", + "description": "For data streams, the API retrieves mappings for the stream’s backing indices.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-mapping-1", "parameters": [ { @@ -9071,7 +9071,7 @@ "indices" ], "summary": "Update field mappings", - "description": "Add new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields and add new properties to existing object fields.\nFor data streams, these changes are applied to all backing indices by default.\n\n**Add multi-fields to an existing field**\n\nMulti-fields let you index the same field in different ways.\nYou can use this API to update the fields mapping parameter and enable multi-fields for an existing field.\nWARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field.\nYou can populate the new multi-field with the update by query API.\n\n**Change supported mapping parameters for an existing field**\n\nThe documentation for each mapping parameter indicates whether you can update it for an existing field using this API.\nFor example, you can use the update mapping API to update the `ignore_above` parameter.\n\n**Change the mapping of an existing field**\n\nExcept for supported mapping parameters, you can't change the mapping or field type of an existing field.\nChanging an existing field could invalidate data that's already indexed.\n\nIf you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams.\nIf you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index.\n\n**Rename a field**\n\nRenaming a field would invalidate data already indexed under the old field name.\nInstead, add an alias field to create an alternate field name.", + "description": "Add new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields and add new properties to existing object fields.\nFor data streams, these changes are applied to all backing indices by default.\n\n**Add multi-fields to an existing field**\n\nMulti-fields let you index the same field in different ways.\nYou can use this API to update the fields mapping parameter and enable multi-fields for an existing field.\nWARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field.\nYou can populate the new multi-field with the update by query API.\n\n**Change supported mapping parameters for an existing field**\n\nThe documentation for each mapping parameter indicates whether you can update it for an existing field using this API.\nFor example, you can use the update mapping API to update the `ignore_above` parameter.\n\n**Change the mapping of an existing field**\n\nExcept for supported mapping parameters, you can't change the mapping or field type of an existing field.\nChanging an existing field could invalidate data that's already indexed.\n\nIf you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams.\nIf you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index.\n\n**Rename a field**\n\nRenaming a field would invalidate data already indexed under the old field name.\nInstead, add an alias field to create an alternate field name.\n ##Required authorization\n* Index privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/mapping-reference/mapping-parameters" }, @@ -9120,7 +9120,7 @@ "indices" ], "summary": "Update field mappings", - "description": "Add new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields and add new properties to existing object fields.\nFor data streams, these changes are applied to all backing indices by default.\n\n**Add multi-fields to an existing field**\n\nMulti-fields let you index the same field in different ways.\nYou can use this API to update the fields mapping parameter and enable multi-fields for an existing field.\nWARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field.\nYou can populate the new multi-field with the update by query API.\n\n**Change supported mapping parameters for an existing field**\n\nThe documentation for each mapping parameter indicates whether you can update it for an existing field using this API.\nFor example, you can use the update mapping API to update the `ignore_above` parameter.\n\n**Change the mapping of an existing field**\n\nExcept for supported mapping parameters, you can't change the mapping or field type of an existing field.\nChanging an existing field could invalidate data that's already indexed.\n\nIf you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams.\nIf you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index.\n\n**Rename a field**\n\nRenaming a field would invalidate data already indexed under the old field name.\nInstead, add an alias field to create an alternate field name.", + "description": "Add new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields and add new properties to existing object fields.\nFor data streams, these changes are applied to all backing indices by default.\n\n**Add multi-fields to an existing field**\n\nMulti-fields let you index the same field in different ways.\nYou can use this API to update the fields mapping parameter and enable multi-fields for an existing field.\nWARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field.\nYou can populate the new multi-field with the update by query API.\n\n**Change supported mapping parameters for an existing field**\n\nThe documentation for each mapping parameter indicates whether you can update it for an existing field using this API.\nFor example, you can use the update mapping API to update the `ignore_above` parameter.\n\n**Change the mapping of an existing field**\n\nExcept for supported mapping parameters, you can't change the mapping or field type of an existing field.\nChanging an existing field could invalidate data that's already indexed.\n\nIf you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams.\nIf you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index.\n\n**Rename a field**\n\nRenaming a field would invalidate data already indexed under the old field name.\nInstead, add an alias field to create an alternate field name.\n ##Required authorization\n* Index privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/mapping-reference/mapping-parameters" }, @@ -9171,7 +9171,7 @@ "indices" ], "summary": "Get index settings", - "description": "Get setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.", + "description": "Get setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-settings", "parameters": [ { @@ -9208,7 +9208,7 @@ "indices" ], "summary": "Update index settings", - "description": "Changes dynamic index settings in real time.\nFor data streams, index setting changes are applied to all backing indices by default.\n\nTo revert a setting to the default value, use a null value.\nThe list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation.\nTo preserve existing settings from being updated, set the `preserve_existing` parameter to `true`.\n\n There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example:\n\n```\n{\n \"number_of_replicas\": 1\n}\n```\n\nOr you can use an `index` setting object:\n```\n{\n \"index\": {\n \"number_of_replicas\": 1\n }\n}\n```\n\nOr you can use dot annotation:\n```\n{\n \"index.number_of_replicas\": 1\n}\n```\n\nOr you can embed any of the aforementioned options in a `settings` object. For example:\n\n```\n{\n \"settings\": {\n \"index\": {\n \"number_of_replicas\": 1\n }\n }\n}\n```\n\nNOTE: You can only define new analyzers on closed indices.\nTo add an analyzer, you must close the index, define the analyzer, and reopen the index.\nYou cannot close the write index of a data stream.\nTo update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream.\nThen roll over the data stream to apply the new analyzer to the stream's write index and future backing indices.\nThis affects searches and any new data added to the stream after the rollover.\nHowever, it does not affect the data stream's backing indices or their existing data.\nTo change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.", + "description": "Changes dynamic index settings in real time.\nFor data streams, index setting changes are applied to all backing indices by default.\n\nTo revert a setting to the default value, use a null value.\nThe list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation.\nTo preserve existing settings from being updated, set the `preserve_existing` parameter to `true`.\n\n There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example:\n\n```\n{\n \"number_of_replicas\": 1\n}\n```\n\nOr you can use an `index` setting object:\n```\n{\n \"index\": {\n \"number_of_replicas\": 1\n }\n}\n```\n\nOr you can use dot annotation:\n```\n{\n \"index.number_of_replicas\": 1\n}\n```\n\nOr you can embed any of the aforementioned options in a `settings` object. For example:\n\n```\n{\n \"settings\": {\n \"index\": {\n \"number_of_replicas\": 1\n }\n }\n}\n```\n\nNOTE: You can only define new analyzers on closed indices.\nTo add an analyzer, you must close the index, define the analyzer, and reopen the index.\nYou cannot close the write index of a data stream.\nTo update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream.\nThen roll over the data stream to apply the new analyzer to the stream's write index and future backing indices.\nThis affects searches and any new data added to the stream after the rollover.\nHowever, it does not affect the data stream's backing indices or their existing data.\nTo change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.\n ##Required authorization\n* Index privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/index-settings/" }, @@ -9262,7 +9262,7 @@ "indices" ], "summary": "Get index settings", - "description": "Get setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.", + "description": "Get setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-settings-1", "parameters": [ { @@ -9302,7 +9302,7 @@ "indices" ], "summary": "Update index settings", - "description": "Changes dynamic index settings in real time.\nFor data streams, index setting changes are applied to all backing indices by default.\n\nTo revert a setting to the default value, use a null value.\nThe list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation.\nTo preserve existing settings from being updated, set the `preserve_existing` parameter to `true`.\n\n There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example:\n\n```\n{\n \"number_of_replicas\": 1\n}\n```\n\nOr you can use an `index` setting object:\n```\n{\n \"index\": {\n \"number_of_replicas\": 1\n }\n}\n```\n\nOr you can use dot annotation:\n```\n{\n \"index.number_of_replicas\": 1\n}\n```\n\nOr you can embed any of the aforementioned options in a `settings` object. For example:\n\n```\n{\n \"settings\": {\n \"index\": {\n \"number_of_replicas\": 1\n }\n }\n}\n```\n\nNOTE: You can only define new analyzers on closed indices.\nTo add an analyzer, you must close the index, define the analyzer, and reopen the index.\nYou cannot close the write index of a data stream.\nTo update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream.\nThen roll over the data stream to apply the new analyzer to the stream's write index and future backing indices.\nThis affects searches and any new data added to the stream after the rollover.\nHowever, it does not affect the data stream's backing indices or their existing data.\nTo change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.", + "description": "Changes dynamic index settings in real time.\nFor data streams, index setting changes are applied to all backing indices by default.\n\nTo revert a setting to the default value, use a null value.\nThe list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation.\nTo preserve existing settings from being updated, set the `preserve_existing` parameter to `true`.\n\n There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example:\n\n```\n{\n \"number_of_replicas\": 1\n}\n```\n\nOr you can use an `index` setting object:\n```\n{\n \"index\": {\n \"number_of_replicas\": 1\n }\n}\n```\n\nOr you can use dot annotation:\n```\n{\n \"index.number_of_replicas\": 1\n}\n```\n\nOr you can embed any of the aforementioned options in a `settings` object. For example:\n\n```\n{\n \"settings\": {\n \"index\": {\n \"number_of_replicas\": 1\n }\n }\n}\n```\n\nNOTE: You can only define new analyzers on closed indices.\nTo add an analyzer, you must close the index, define the analyzer, and reopen the index.\nYou cannot close the write index of a data stream.\nTo update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream.\nThen roll over the data stream to apply the new analyzer to the stream's write index and future backing indices.\nThis affects searches and any new data added to the stream after the rollover.\nHowever, it does not affect the data stream's backing indices or their existing data.\nTo change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.\n ##Required authorization\n* Index privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/index-settings/" }, @@ -9359,7 +9359,7 @@ "indices" ], "summary": "Get index settings", - "description": "Get setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.", + "description": "Get setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-settings-2", "parameters": [ { @@ -9404,7 +9404,7 @@ "indices" ], "summary": "Get index settings", - "description": "Get setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.", + "description": "Get setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-get-settings-3", "parameters": [ { @@ -9446,7 +9446,7 @@ "data stream" ], "summary": "Convert an index alias to a data stream", - "description": "Converts an index alias to a data stream.\nYou must have a matching index template that is data stream enabled.\nThe alias must meet the following criteria:\nThe alias must have a write index;\nAll indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type;\nThe alias must not have any filters;\nThe alias must not use custom routing.\nIf successful, the request removes the alias and creates a data stream with the same name.\nThe indices for the alias become hidden backing indices for the stream.\nThe write index for the alias becomes the write index for the stream.", + "description": "Converts an index alias to a data stream.\nYou must have a matching index template that is data stream enabled.\nThe alias must meet the following criteria:\nThe alias must have a write index;\nAll indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type;\nThe alias must not have any filters;\nThe alias must not use custom routing.\nIf successful, the request removes the alias and creates a data stream with the same name.\nThe indices for the alias become hidden backing indices for the stream.\nThe write index for the alias becomes the write index for the stream.\n ##Required authorization\n* Index privileges: `manage`", "operationId": "indices-migrate-to-data-stream", "parameters": [ { @@ -9547,7 +9547,7 @@ "indices" ], "summary": "Refresh an index", - "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.", + "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.\n ##Required authorization\n* Index privileges: `maintenance`", "operationId": "indices-refresh-1", "parameters": [ { @@ -9572,7 +9572,7 @@ "indices" ], "summary": "Refresh an index", - "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.", + "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.\n ##Required authorization\n* Index privileges: `maintenance`", "operationId": "indices-refresh", "parameters": [ { @@ -9599,7 +9599,7 @@ "indices" ], "summary": "Refresh an index", - "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.", + "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.\n ##Required authorization\n* Index privileges: `maintenance`", "operationId": "indices-refresh-3", "parameters": [ { @@ -9627,7 +9627,7 @@ "indices" ], "summary": "Refresh an index", - "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.", + "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.\n ##Required authorization\n* Index privileges: `maintenance`", "operationId": "indices-refresh-2", "parameters": [ { @@ -9657,7 +9657,7 @@ "indices" ], "summary": "Resolve indices", - "description": "Resolve the names and/or index patterns for indices, aliases, and data streams.\nMultiple patterns and remote clusters are supported.", + "description": "Resolve the names and/or index patterns for indices, aliases, and data streams.\nMultiple patterns and remote clusters are supported.\n ##Required authorization\n* Index privileges: `view_index_metadata`", "operationId": "indices-resolve-index", "parameters": [ { @@ -9760,7 +9760,7 @@ "indices" ], "summary": "Roll over to a new index", - "description": "TIP: It is recommended to use the index lifecycle rollover action to automate rollovers.\n\nThe rollover API creates a new index for a data stream or index alias.\nThe API behavior depends on the rollover target.\n\n**Roll over a data stream**\n\nIf you roll over a data stream, the API creates a new write index for the stream.\nThe stream's previous write index becomes a regular backing index.\nA rollover also increments the data stream's generation.\n\n**Roll over an index alias with a write index**\n\nTIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data.\nData streams replace this functionality, require less maintenance, and automatically integrate with data tiers.\n\nIf an index alias points to multiple indices, one of the indices must be a write index.\nThe rollover API creates a new write index for the alias with `is_write_index` set to `true`.\nThe API also `sets is_write_index` to `false` for the previous write index.\n\n**Roll over an index alias with one index**\n\nIf you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias.\n\nNOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting.\n\n**Increment index names for an alias**\n\nWhen you roll over an index alias, you can specify a name for the new index.\nIf you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number.\nFor example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`.\nThis number is always six characters and zero-padded, regardless of the previous index's name.\n\nIf you use an index alias for time series data, you can use date math in the index name to track the rollover date.\nFor example, you can create an alias that points to an index named ``.\nIf you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`.\nIf you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`.", + "description": "TIP: It is recommended to use the index lifecycle rollover action to automate rollovers.\n\nThe rollover API creates a new index for a data stream or index alias.\nThe API behavior depends on the rollover target.\n\n**Roll over a data stream**\n\nIf you roll over a data stream, the API creates a new write index for the stream.\nThe stream's previous write index becomes a regular backing index.\nA rollover also increments the data stream's generation.\n\n**Roll over an index alias with a write index**\n\nTIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data.\nData streams replace this functionality, require less maintenance, and automatically integrate with data tiers.\n\nIf an index alias points to multiple indices, one of the indices must be a write index.\nThe rollover API creates a new write index for the alias with `is_write_index` set to `true`.\nThe API also `sets is_write_index` to `false` for the previous write index.\n\n**Roll over an index alias with one index**\n\nIf you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias.\n\nNOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting.\n\n**Increment index names for an alias**\n\nWhen you roll over an index alias, you can specify a name for the new index.\nIf you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number.\nFor example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`.\nThis number is always six characters and zero-padded, regardless of the previous index's name.\n\nIf you use an index alias for time series data, you can use date math in the index name to track the rollover date.\nFor example, you can create an alias that points to an index named ``.\nIf you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`.\nIf you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`.\n ##Required authorization\n* Index privileges: `manage`", "operationId": "indices-rollover", "parameters": [ { @@ -9805,7 +9805,7 @@ "indices" ], "summary": "Roll over to a new index", - "description": "TIP: It is recommended to use the index lifecycle rollover action to automate rollovers.\n\nThe rollover API creates a new index for a data stream or index alias.\nThe API behavior depends on the rollover target.\n\n**Roll over a data stream**\n\nIf you roll over a data stream, the API creates a new write index for the stream.\nThe stream's previous write index becomes a regular backing index.\nA rollover also increments the data stream's generation.\n\n**Roll over an index alias with a write index**\n\nTIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data.\nData streams replace this functionality, require less maintenance, and automatically integrate with data tiers.\n\nIf an index alias points to multiple indices, one of the indices must be a write index.\nThe rollover API creates a new write index for the alias with `is_write_index` set to `true`.\nThe API also `sets is_write_index` to `false` for the previous write index.\n\n**Roll over an index alias with one index**\n\nIf you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias.\n\nNOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting.\n\n**Increment index names for an alias**\n\nWhen you roll over an index alias, you can specify a name for the new index.\nIf you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number.\nFor example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`.\nThis number is always six characters and zero-padded, regardless of the previous index's name.\n\nIf you use an index alias for time series data, you can use date math in the index name to track the rollover date.\nFor example, you can create an alias that points to an index named ``.\nIf you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`.\nIf you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`.", + "description": "TIP: It is recommended to use the index lifecycle rollover action to automate rollovers.\n\nThe rollover API creates a new index for a data stream or index alias.\nThe API behavior depends on the rollover target.\n\n**Roll over a data stream**\n\nIf you roll over a data stream, the API creates a new write index for the stream.\nThe stream's previous write index becomes a regular backing index.\nA rollover also increments the data stream's generation.\n\n**Roll over an index alias with a write index**\n\nTIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data.\nData streams replace this functionality, require less maintenance, and automatically integrate with data tiers.\n\nIf an index alias points to multiple indices, one of the indices must be a write index.\nThe rollover API creates a new write index for the alias with `is_write_index` set to `true`.\nThe API also `sets is_write_index` to `false` for the previous write index.\n\n**Roll over an index alias with one index**\n\nIf you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias.\n\nNOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting.\n\n**Increment index names for an alias**\n\nWhen you roll over an index alias, you can specify a name for the new index.\nIf you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number.\nFor example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`.\nThis number is always six characters and zero-padded, regardless of the previous index's name.\n\nIf you use an index alias for time series data, you can use date math in the index name to track the rollover date.\nFor example, you can create an alias that points to an index named ``.\nIf you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`.\nIf you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`.\n ##Required authorization\n* Index privileges: `manage`", "operationId": "indices-rollover-1", "parameters": [ { @@ -9853,7 +9853,7 @@ "indices" ], "summary": "Simulate an index", - "description": "Get the index configuration that would be applied to the specified index from an existing index template.", + "description": "Get the index configuration that would be applied to the specified index from an existing index template.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "indices-simulate-index-template", "parameters": [ { @@ -9955,7 +9955,7 @@ "indices" ], "summary": "Simulate an index template", - "description": "Get the index configuration that would be applied by a particular index template.", + "description": "Get the index configuration that would be applied by a particular index template.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "indices-simulate-template", "parameters": [ { @@ -9994,7 +9994,7 @@ "indices" ], "summary": "Simulate an index template", - "description": "Get the index configuration that would be applied by a particular index template.", + "description": "Get the index configuration that would be applied by a particular index template.\n ##Required authorization\n* Cluster privileges: `manage_index_templates`", "operationId": "indices-simulate-template-1", "parameters": [ { @@ -10529,7 +10529,7 @@ "inference" ], "summary": "Create an inference endpoint", - "description": "IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.", + "description": "IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put", "parameters": [ { @@ -10551,7 +10551,7 @@ "inference" ], "summary": "Perform inference on the service", - "description": "This API enables you to use machine learning models to perform specific tasks on data that you provide as an input.\nIt returns a response with the results of the tasks.\nThe inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.\n\nFor details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation.\n\n> info\n> The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.", + "description": "This API enables you to use machine learning models to perform specific tasks on data that you provide as an input.\nIt returns a response with the results of the tasks.\nThe inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.\n\nFor details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation.\n\n> info\n> The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n ##Required authorization\n* Cluster privileges: `monitor_inference`", "operationId": "inference-inference", "parameters": [ { @@ -10623,7 +10623,7 @@ "inference" ], "summary": "Create an inference endpoint", - "description": "IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.", + "description": "IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-1", "parameters": [ { @@ -10648,7 +10648,7 @@ "inference" ], "summary": "Perform inference on the service", - "description": "This API enables you to use machine learning models to perform specific tasks on data that you provide as an input.\nIt returns a response with the results of the tasks.\nThe inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.\n\nFor details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation.\n\n> info\n> The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.", + "description": "This API enables you to use machine learning models to perform specific tasks on data that you provide as an input.\nIt returns a response with the results of the tasks.\nThe inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.\n\nFor details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation.\n\n> info\n> The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n ##Required authorization\n* Cluster privileges: `monitor_inference`", "operationId": "inference-inference-1", "parameters": [ { @@ -10720,7 +10720,7 @@ "inference" ], "summary": "Create an AlibabaCloud AI Search inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service.", + "description": "Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-alibabacloud", "parameters": [ { @@ -10822,7 +10822,7 @@ "inference" ], "summary": "Create an Amazon Bedrock inference endpoint", - "description": "Creates an inference endpoint to perform an inference task with the `amazonbedrock` service.\n\n>info\n> You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.", + "description": "Creates an inference endpoint to perform an inference task with the `amazonbedrock` service.\n\n>info\n> You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-amazonbedrock", "parameters": [ { @@ -10914,7 +10914,7 @@ "inference" ], "summary": "Create an Anthropic inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `anthropic` service.", + "description": "Create an inference endpoint to perform an inference task with the `anthropic` service.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-anthropic", "parameters": [ { @@ -11000,7 +11000,7 @@ "inference" ], "summary": "Create an Azure AI studio inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `azureaistudio` service.", + "description": "Create an inference endpoint to perform an inference task with the `azureaistudio` service.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-azureaistudio", "parameters": [ { @@ -11092,7 +11092,7 @@ "inference" ], "summary": "Create an Azure OpenAI inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `azureopenai` service.\n\nThe list of chat completion models that you can choose from in your Azure OpenAI deployment include:\n\n* [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models)\n* [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35)\n\nThe list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings).", + "description": "Create an inference endpoint to perform an inference task with the `azureopenai` service.\n\nThe list of chat completion models that you can choose from in your Azure OpenAI deployment include:\n\n* [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models)\n* [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35)\n\nThe list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings).\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-azureopenai", "parameters": [ { @@ -11184,7 +11184,7 @@ "inference" ], "summary": "Create a Cohere inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `cohere` service.", + "description": "Create an inference endpoint to perform an inference task with the `cohere` service.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-cohere", "parameters": [ { @@ -11276,7 +11276,7 @@ "inference" ], "summary": "Create an Elasticsearch inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `elasticsearch` service.\n\n> info\n> Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings.\n\nIf you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet.\n\n> info\n> You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.\n\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "description": "Create an inference endpoint to perform an inference task with the `elasticsearch` service.\n\n> info\n> Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings.\n\nIf you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet.\n\n> info\n> You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.\n\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-elasticsearch", "parameters": [ { @@ -11394,7 +11394,7 @@ "inference" ], "summary": "Create an ELSER inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `elser` service.\nYou can also deploy ELSER by using the Elasticsearch inference integration.\n\n> info\n> Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings.\n\nThe API request will automatically download and deploy the ELSER model if it isn't already downloaded.\n\n> info\n> You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.\n\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "description": "Create an inference endpoint to perform an inference task with the `elser` service.\nYou can also deploy ELSER by using the Elasticsearch inference integration.\n\n> info\n> Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings.\n\nThe API request will automatically download and deploy the ELSER model if it isn't already downloaded.\n\n> info\n> You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.\n\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-elser", "parameters": [ { @@ -11490,7 +11490,7 @@ "inference" ], "summary": "Create an Google AI Studio inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `googleaistudio` service.", + "description": "Create an inference endpoint to perform an inference task with the `googleaistudio` service.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-googleaistudio", "parameters": [ { @@ -11574,7 +11574,7 @@ "inference" ], "summary": "Create a Google Vertex AI inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `googlevertexai` service.", + "description": "Create an inference endpoint to perform an inference task with the `googlevertexai` service.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-googlevertexai", "parameters": [ { @@ -11666,7 +11666,7 @@ "inference" ], "summary": "Create a Hugging Face inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `hugging_face` service.\n\nYou must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL.\nSelect the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section.\nCreate the endpoint and copy the URL after the endpoint initialization has been finished.\n\nThe following models are recommended for the Hugging Face service:\n\n* `all-MiniLM-L6-v2`\n* `all-MiniLM-L12-v2`\n* `all-mpnet-base-v2`\n* `e5-base-v2`\n* `e5-small-v2`\n* `multilingual-e5-base`\n* `multilingual-e5-small`", + "description": "Create an inference endpoint to perform an inference task with the `hugging_face` service.\n\nYou must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL.\nSelect the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section.\nCreate the endpoint and copy the URL after the endpoint initialization has been finished.\n\nThe following models are recommended for the Hugging Face service:\n\n* `all-MiniLM-L6-v2`\n* `all-MiniLM-L12-v2`\n* `all-mpnet-base-v2`\n* `e5-base-v2`\n* `e5-small-v2`\n* `multilingual-e5-base`\n* `multilingual-e5-small`\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-hugging-face", "parameters": [ { @@ -11750,7 +11750,7 @@ "inference" ], "summary": "Create an JinaAI inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `jinaai` service.\n\nTo review the available `rerank` models, refer to .\nTo review the available `text_embedding` models, refer to the .", + "description": "Create an inference endpoint to perform an inference task with the `jinaai` service.\n\nTo review the available `rerank` models, refer to .\nTo review the available `text_embedding` models, refer to the .\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-jinaai", "parameters": [ { @@ -11842,7 +11842,7 @@ "inference" ], "summary": "Create a Mistral inference endpoint", - "description": "Creates an inference endpoint to perform an inference task with the `mistral` service.", + "description": "Creates an inference endpoint to perform an inference task with the `mistral` service.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-mistral", "parameters": [ { @@ -11925,7 +11925,7 @@ "inference" ], "summary": "Create an OpenAI inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs.", + "description": "Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-openai", "parameters": [ { @@ -12017,7 +12017,7 @@ "inference" ], "summary": "Create a VoyageAI inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `voyageai` service.\n\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.", + "description": "Create an inference endpoint to perform an inference task with the `voyageai` service.\n\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-voyageai", "parameters": [ { @@ -12109,7 +12109,7 @@ "inference" ], "summary": "Create a Watsonx inference endpoint", - "description": "Create an inference endpoint to perform an inference task with the `watsonxai` service.\nYou need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service.\nYou can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.", + "description": "Create an inference endpoint to perform an inference task with the `watsonxai` service.\nYou need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service.\nYou can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.\n ##Required authorization\n* Cluster privileges: `manage_inference`", "operationId": "inference-put-watsonx", "parameters": [ { @@ -12189,6 +12189,7 @@ "inference" ], "summary": "Perform rereanking inference on the service", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_inference`", "operationId": "inference-rerank", "parameters": [ { @@ -12482,7 +12483,7 @@ "info" ], "summary": "Get cluster info", - "description": "Get basic build, version, and cluster information.", + "description": "Get basic build, version, and cluster information.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "info", "responses": { "200": { @@ -12833,7 +12834,7 @@ "ingest" ], "summary": "Simulate a pipeline", - "description": "Run an ingest pipeline against a set of provided documents.\nYou can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.", + "description": "Run an ingest pipeline against a set of provided documents.\nYou can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.\n ##Required authorization\n* Cluster privileges: `read_pipeline`", "operationId": "ingest-simulate", "parameters": [ { @@ -12861,7 +12862,7 @@ "ingest" ], "summary": "Simulate a pipeline", - "description": "Run an ingest pipeline against a set of provided documents.\nYou can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.", + "description": "Run an ingest pipeline against a set of provided documents.\nYou can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.\n ##Required authorization\n* Cluster privileges: `read_pipeline`", "operationId": "ingest-simulate-1", "parameters": [ { @@ -12891,7 +12892,7 @@ "ingest" ], "summary": "Simulate a pipeline", - "description": "Run an ingest pipeline against a set of provided documents.\nYou can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.", + "description": "Run an ingest pipeline against a set of provided documents.\nYou can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.\n ##Required authorization\n* Cluster privileges: `read_pipeline`", "operationId": "ingest-simulate-2", "parameters": [ { @@ -12922,7 +12923,7 @@ "ingest" ], "summary": "Simulate a pipeline", - "description": "Run an ingest pipeline against a set of provided documents.\nYou can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.", + "description": "Run an ingest pipeline against a set of provided documents.\nYou can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.\n ##Required authorization\n* Cluster privileges: `read_pipeline`", "operationId": "ingest-simulate-3", "parameters": [ { @@ -13020,7 +13021,7 @@ "logstash" ], "summary": "Get Logstash pipelines", - "description": "Get pipelines that are used for Logstash Central Management.", + "description": "Get pipelines that are used for Logstash Central Management.\n ##Required authorization\n* Cluster privileges: `manage_logstash_pipelines`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/logstash/logstash-centralized-pipeline-management" }, @@ -13048,7 +13049,7 @@ "logstash" ], "summary": "Create or update a Logstash pipeline", - "description": "Create a pipeline that is used for Logstash Central Management.\nIf the specified pipeline exists, it is replaced.", + "description": "Create a pipeline that is used for Logstash Central Management.\nIf the specified pipeline exists, it is replaced.\n ##Required authorization\n* Cluster privileges: `manage_logstash_pipelines`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/logstash/logstash-centralized-pipeline-management" }, @@ -13104,7 +13105,7 @@ "logstash" ], "summary": "Delete a Logstash pipeline", - "description": "Delete a pipeline that is used for Logstash Central Management.\nIf the request succeeds, you receive an empty response with an appropriate status code.", + "description": "Delete a pipeline that is used for Logstash Central Management.\nIf the request succeeds, you receive an empty response with an appropriate status code.\n ##Required authorization\n* Cluster privileges: `manage_logstash_pipelines`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/logstash/logstash-centralized-pipeline-management" }, @@ -13139,7 +13140,7 @@ "logstash" ], "summary": "Get Logstash pipelines", - "description": "Get pipelines that are used for Logstash Central Management.", + "description": "Get pipelines that are used for Logstash Central Management.\n ##Required authorization\n* Cluster privileges: `manage_logstash_pipelines`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/logstash/logstash-centralized-pipeline-management" }, @@ -13164,7 +13165,7 @@ "document" ], "summary": "Get multiple documents", - "description": "Get multiple JSON documents by ID from one or more indices.\nIf you specify an index in the request URI, you only need to specify the document IDs in the request body.\nTo ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.\n\n**Filter source fields**\n\nBy default, the `_source` field is returned for every document (if stored).\nUse the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document.\nYou can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions.\n\n**Get stored fields**\n\nUse the `stored_fields` attribute to specify the set of stored fields you want to retrieve.\nAny requested fields that are not stored are ignored.\nYou can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions.", + "description": "Get multiple JSON documents by ID from one or more indices.\nIf you specify an index in the request URI, you only need to specify the document IDs in the request body.\nTo ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.\n\n**Filter source fields**\n\nBy default, the `_source` field is returned for every document (if stored).\nUse the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document.\nYou can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions.\n\n**Get stored fields**\n\nUse the `stored_fields` attribute to specify the set of stored fields you want to retrieve.\nAny requested fields that are not stored are ignored.\nYou can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions.\n ##Required authorization\n* Index privileges: `read`", "operationId": "mget", "parameters": [ { @@ -13213,7 +13214,7 @@ "document" ], "summary": "Get multiple documents", - "description": "Get multiple JSON documents by ID from one or more indices.\nIf you specify an index in the request URI, you only need to specify the document IDs in the request body.\nTo ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.\n\n**Filter source fields**\n\nBy default, the `_source` field is returned for every document (if stored).\nUse the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document.\nYou can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions.\n\n**Get stored fields**\n\nUse the `stored_fields` attribute to specify the set of stored fields you want to retrieve.\nAny requested fields that are not stored are ignored.\nYou can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions.", + "description": "Get multiple JSON documents by ID from one or more indices.\nIf you specify an index in the request URI, you only need to specify the document IDs in the request body.\nTo ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.\n\n**Filter source fields**\n\nBy default, the `_source` field is returned for every document (if stored).\nUse the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document.\nYou can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions.\n\n**Get stored fields**\n\nUse the `stored_fields` attribute to specify the set of stored fields you want to retrieve.\nAny requested fields that are not stored are ignored.\nYou can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions.\n ##Required authorization\n* Index privileges: `read`", "operationId": "mget-1", "parameters": [ { @@ -13264,7 +13265,7 @@ "document" ], "summary": "Get multiple documents", - "description": "Get multiple JSON documents by ID from one or more indices.\nIf you specify an index in the request URI, you only need to specify the document IDs in the request body.\nTo ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.\n\n**Filter source fields**\n\nBy default, the `_source` field is returned for every document (if stored).\nUse the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document.\nYou can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions.\n\n**Get stored fields**\n\nUse the `stored_fields` attribute to specify the set of stored fields you want to retrieve.\nAny requested fields that are not stored are ignored.\nYou can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions.", + "description": "Get multiple JSON documents by ID from one or more indices.\nIf you specify an index in the request URI, you only need to specify the document IDs in the request body.\nTo ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.\n\n**Filter source fields**\n\nBy default, the `_source` field is returned for every document (if stored).\nUse the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document.\nYou can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions.\n\n**Get stored fields**\n\nUse the `stored_fields` attribute to specify the set of stored fields you want to retrieve.\nAny requested fields that are not stored are ignored.\nYou can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions.\n ##Required authorization\n* Index privileges: `read`", "operationId": "mget-2", "parameters": [ { @@ -13316,7 +13317,7 @@ "document" ], "summary": "Get multiple documents", - "description": "Get multiple JSON documents by ID from one or more indices.\nIf you specify an index in the request URI, you only need to specify the document IDs in the request body.\nTo ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.\n\n**Filter source fields**\n\nBy default, the `_source` field is returned for every document (if stored).\nUse the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document.\nYou can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions.\n\n**Get stored fields**\n\nUse the `stored_fields` attribute to specify the set of stored fields you want to retrieve.\nAny requested fields that are not stored are ignored.\nYou can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions.", + "description": "Get multiple JSON documents by ID from one or more indices.\nIf you specify an index in the request URI, you only need to specify the document IDs in the request body.\nTo ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.\n\n**Filter source fields**\n\nBy default, the `_source` field is returned for every document (if stored).\nUse the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document.\nYou can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions.\n\n**Get stored fields**\n\nUse the `stored_fields` attribute to specify the set of stored fields you want to retrieve.\nAny requested fields that are not stored are ignored.\nYou can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions.\n ##Required authorization\n* Index privileges: `read`", "operationId": "mget-3", "parameters": [ { @@ -13370,7 +13371,7 @@ "ml anomaly" ], "summary": "Close anomaly detection jobs", - "description": "A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results.\nWhen you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data.\nIf you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request.\nWhen a datafeed that has a specified end date stops, it automatically closes its associated job.", + "description": "A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results.\nWhen you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data.\nIf you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request.\nWhen a datafeed that has a specified end date stops, it automatically closes its associated job.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-close-job", "parameters": [ { @@ -13472,6 +13473,7 @@ "ml anomaly" ], "summary": "Get calendar configuration info", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-calendars-2", "parameters": [ { @@ -13499,6 +13501,7 @@ "ml anomaly" ], "summary": "Create a calendar", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-put-calendar", "parameters": [ { @@ -13570,6 +13573,7 @@ "ml anomaly" ], "summary": "Get calendar configuration info", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-calendars-3", "parameters": [ { @@ -13597,7 +13601,7 @@ "ml anomaly" ], "summary": "Delete a calendar", - "description": "Remove all scheduled events from a calendar, then delete it.", + "description": "Remove all scheduled events from a calendar, then delete it.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-delete-calendar", "parameters": [ { @@ -13691,6 +13695,7 @@ "ml anomaly" ], "summary": "Add anomaly detection job to calendar", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-put-calendar-job", "parameters": [ { @@ -13751,6 +13756,7 @@ "ml anomaly" ], "summary": "Delete anomaly jobs from a calendar", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-delete-calendar-job", "parameters": [ { @@ -13819,7 +13825,7 @@ "ml data frame" ], "summary": "Get data frame analytics job configuration info", - "description": "You can get information for multiple data frame analytics jobs in a single\nAPI request by using a comma-separated list of data frame analytics jobs or a\nwildcard expression.", + "description": "You can get information for multiple data frame analytics jobs in a single\nAPI request by using a comma-separated list of data frame analytics jobs or a\nwildcard expression.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-data-frame-analytics", "parameters": [ { @@ -13850,7 +13856,7 @@ "ml data frame" ], "summary": "Create a data frame analytics job", - "description": "This API creates a data frame analytics job that performs an analysis on the\nsource indices and stores the outcome in a destination index.\nBy default, the query used in the source configuration is `{\"match_all\": {}}`.\n\nIf the destination index does not exist, it is created automatically when you start the job.\n\nIf you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters.", + "description": "This API creates a data frame analytics job that performs an analysis on the\nsource indices and stores the outcome in a destination index.\nBy default, the query used in the source configuration is `{\"match_all\": {}}`.\n\nIf the destination index does not exist, it is created automatically when you start the job.\n\nIf you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters.\n ##Required authorization\n* Index privileges: `create_index`,`index`,`manage`,`read`,`view_index_metadata`* Cluster privileges: `manage_ml`", "operationId": "ml-put-data-frame-analytics", "parameters": [ { @@ -13990,6 +13996,7 @@ "ml data frame" ], "summary": "Delete a data frame analytics job", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-delete-data-frame-analytics", "parameters": [ { @@ -14051,7 +14058,7 @@ "ml anomaly" ], "summary": "Get datafeeds configuration info", - "description": "You can get information for multiple datafeeds in a single API request by\nusing a comma-separated list of datafeeds or a wildcard expression. You can\nget information for all datafeeds by using `_all`, by specifying `*` as the\n``, or by omitting the ``.\nThis API returns a maximum of 10,000 datafeeds.", + "description": "You can get information for multiple datafeeds in a single API request by\nusing a comma-separated list of datafeeds or a wildcard expression. You can\nget information for all datafeeds by using `_all`, by specifying `*` as the\n``, or by omitting the ``.\nThis API returns a maximum of 10,000 datafeeds.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-datafeeds", "parameters": [ { @@ -14076,7 +14083,7 @@ "ml anomaly" ], "summary": "Create a datafeed", - "description": "Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job.\nYou can associate only one datafeed with each anomaly detection job.\nThe datafeed contains a query that runs at a defined interval (`frequency`).\nIf you are concerned about delayed data, you can add a delay (`query_delay') at each interval.\nBy default, the datafeed uses the following query: `{\"match_all\": {\"boost\": 1}}`.\n\nWhen Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had\nat the time of creation and runs the query using those same roles. If you provide secondary authorization headers,\nthose credentials are used instead.\nYou must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed\ndirectly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index.", + "description": "Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job.\nYou can associate only one datafeed with each anomaly detection job.\nThe datafeed contains a query that runs at a defined interval (`frequency`).\nIf you are concerned about delayed data, you can add a delay (`query_delay') at each interval.\nBy default, the datafeed uses the following query: `{\"match_all\": {\"boost\": 1}}`.\n\nWhen Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had\nat the time of creation and runs the query using those same roles. If you provide secondary authorization headers,\nthose credentials are used instead.\nYou must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed\ndirectly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index.\n ##Required authorization\n* Index privileges: `read`* Cluster privileges: `manage_ml`", "operationId": "ml-put-datafeed", "parameters": [ { @@ -14279,6 +14286,7 @@ "ml anomaly" ], "summary": "Delete a datafeed", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-delete-datafeed", "parameters": [ { @@ -14330,7 +14338,7 @@ "ml anomaly" ], "summary": "Get filters", - "description": "You can get a single filter or all filters.", + "description": "You can get a single filter or all filters.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-get-filters-1", "parameters": [ { @@ -14355,7 +14363,7 @@ "ml anomaly" ], "summary": "Create a filter", - "description": "A filter contains a list of strings. It can be used by one or more anomaly detection jobs.\nSpecifically, filters are referenced in the `custom_rules` property of detector configuration objects.", + "description": "A filter contains a list of strings. It can be used by one or more anomaly detection jobs.\nSpecifically, filters are referenced in the `custom_rules` property of detector configuration objects.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-put-filter", "parameters": [ { @@ -14431,7 +14439,7 @@ "ml anomaly" ], "summary": "Delete a filter", - "description": "If an anomaly detection job references the filter, you cannot delete the\nfilter. You must update or delete the job before you can delete the filter.", + "description": "If an anomaly detection job references the filter, you cannot delete the\nfilter. You must update or delete the job before you can delete the filter.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-delete-filter", "parameters": [ { @@ -14473,7 +14481,7 @@ "ml anomaly" ], "summary": "Get anomaly detection jobs configuration info", - "description": "You can get information for multiple anomaly detection jobs in a single API\nrequest by using a group name, a comma-separated list of jobs, or a wildcard\nexpression. You can get information for all anomaly detection jobs by using\n`_all`, by specifying `*` as the ``, or by omitting the ``.", + "description": "You can get information for multiple anomaly detection jobs in a single API\nrequest by using a group name, a comma-separated list of jobs, or a wildcard\nexpression. You can get information for all anomaly detection jobs by using\n`_all`, by specifying `*` as the ``, or by omitting the ``.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-jobs", "parameters": [ { @@ -14498,7 +14506,7 @@ "ml anomaly" ], "summary": "Create an anomaly detection job", - "description": "If you include a `datafeed_config`, you must have read index privileges on the source index.\nIf you include a `datafeed_config` but do not provide a query, the datafeed uses `{\"match_all\": {\"boost\": 1}}`.", + "description": "If you include a `datafeed_config`, you must have read index privileges on the source index.\nIf you include a `datafeed_config` but do not provide a query, the datafeed uses `{\"match_all\": {\"boost\": 1}}`.\n ##Required authorization\n* Index privileges: `read`* Cluster privileges: `manage_ml`", "operationId": "ml-put-job", "parameters": [ { @@ -14736,7 +14744,7 @@ "ml anomaly" ], "summary": "Delete an anomaly detection job", - "description": "All job configuration, model state and results are deleted.\nIt is not currently possible to delete multiple jobs using wildcards or a\ncomma separated list. If you delete a job that has a datafeed, the request\nfirst tries to delete the datafeed. This behavior is equivalent to calling\nthe delete datafeed API with the same timeout and force parameters as the\ndelete job request.", + "description": "All job configuration, model state and results are deleted.\nIt is not currently possible to delete multiple jobs using wildcards or a\ncomma separated list. If you delete a job that has a datafeed, the request\nfirst tries to delete the datafeed. This behavior is equivalent to calling\nthe delete datafeed API with the same timeout and force parameters as the\ndelete job request.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-delete-job", "parameters": [ { @@ -14814,6 +14822,7 @@ "ml trained model" ], "summary": "Get trained model configuration info", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-trained-models", "parameters": [ { @@ -14853,7 +14862,7 @@ "ml trained model" ], "summary": "Create a trained model", - "description": "Enable you to supply a trained model that is not created by data frame analytics.", + "description": "Enable you to supply a trained model that is not created by data frame analytics.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-put-trained-model", "parameters": [ { @@ -14961,7 +14970,7 @@ "ml trained model" ], "summary": "Delete an unreferenced trained model", - "description": "The request deletes a trained inference model that is not referenced by an ingest pipeline.", + "description": "The request deletes a trained inference model that is not referenced by an ingest pipeline.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-delete-trained-model", "parameters": [ { @@ -15023,7 +15032,7 @@ "ml trained model" ], "summary": "Create or update a trained model alias", - "description": "A trained model alias is a logical name used to reference a single trained\nmodel.\nYou can use aliases instead of trained model identifiers to make it easier to\nreference your models. For example, you can use aliases in inference\naggregations and processors.\nAn alias must be unique and refer to only a single trained model. However,\nyou can have multiple aliases for each trained model.\nIf you use this API to update an alias such that it references a different\ntrained model ID and the model uses a different type of data frame analytics,\nan error occurs. For example, this situation occurs if you have a trained\nmodel for regression analysis and a trained model for classification\nanalysis; you cannot reassign an alias from one type of trained model to\nanother.\nIf you use this API to update an alias and there are very few input fields in\ncommon between the old and new trained models for the model alias, the API\nreturns a warning.", + "description": "A trained model alias is a logical name used to reference a single trained\nmodel.\nYou can use aliases instead of trained model identifiers to make it easier to\nreference your models. For example, you can use aliases in inference\naggregations and processors.\nAn alias must be unique and refer to only a single trained model. However,\nyou can have multiple aliases for each trained model.\nIf you use this API to update an alias such that it references a different\ntrained model ID and the model uses a different type of data frame analytics,\nan error occurs. For example, this situation occurs if you have a trained\nmodel for regression analysis and a trained model for classification\nanalysis; you cannot reassign an alias from one type of trained model to\nanother.\nIf you use this API to update an alias and there are very few input fields in\ncommon between the old and new trained models for the model alias, the API\nreturns a warning.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-put-trained-model-alias", "parameters": [ { @@ -15078,7 +15087,7 @@ "ml trained model" ], "summary": "Delete a trained model alias", - "description": "This API deletes an existing model alias that refers to a trained model. If\nthe model alias is missing or refers to a model other than the one identified\nby the `model_id`, this API returns an error.", + "description": "This API deletes an existing model alias that refers to a trained model. If\nthe model alias is missing or refers to a model other than the one identified\nby the `model_id`, this API returns an error.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-delete-trained-model-alias", "parameters": [ { @@ -15131,7 +15140,7 @@ "ml anomaly" ], "summary": "Estimate job model memory usage", - "description": "Make an estimation of the memory usage for an anomaly detection job model.\nThe estimate is based on analysis configuration details for the job and cardinality\nestimates for the fields it references.", + "description": "Make an estimation of the memory usage for an anomaly detection job model.\nThe estimate is based on analysis configuration details for the job and cardinality\nestimates for the fields it references.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-estimate-model-memory", "requestBody": { "content": { @@ -15209,7 +15218,7 @@ "ml data frame" ], "summary": "Evaluate data frame analytics", - "description": "The API packages together commonly used evaluation metrics for various types\nof machine learning features. This has been designed for use on indexes\ncreated by data frame analytics. Evaluation requires both a ground truth\nfield and an analytics result field to be present.", + "description": "The API packages together commonly used evaluation metrics for various types\nof machine learning features. This has been designed for use on indexes\ncreated by data frame analytics. Evaluation requires both a ground truth\nfield and an analytics result field to be present.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-evaluate-data-frame", "requestBody": { "content": { @@ -15318,7 +15327,7 @@ "ml anomaly" ], "summary": "Force buffered data to be processed", - "description": "The flush jobs API is only applicable when sending data for analysis using\nthe post data API. Depending on the content of the buffer, then it might\nadditionally calculate new results. Both flush and close operations are\nsimilar, however the flush is more efficient if you are expecting to send\nmore data for analysis. When flushing, the job remains open and is available\nto continue analyzing data. A close operation additionally prunes and\npersists the model state to disk and the job must be opened again before\nanalyzing further data.", + "description": "The flush jobs API is only applicable when sending data for analysis using\nthe post data API. Depending on the content of the buffer, then it might\nadditionally calculate new results. Both flush and close operations are\nsimilar, however the flush is more efficient if you are expecting to send\nmore data for analysis. When flushing, the job remains open and is available\nto continue analyzing data. A close operation additionally prunes and\npersists the model state to disk and the job must be opened again before\nanalyzing further data.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-flush-job", "parameters": [ { @@ -15444,6 +15453,7 @@ "ml anomaly" ], "summary": "Get info about events in calendars", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-calendar-events", "parameters": [ { @@ -15542,6 +15552,7 @@ "ml anomaly" ], "summary": "Add scheduled events to the calendar", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-post-calendar-events", "parameters": [ { @@ -15610,6 +15621,7 @@ "ml anomaly" ], "summary": "Get calendar configuration info", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-calendars", "parameters": [ { @@ -15634,6 +15646,7 @@ "ml anomaly" ], "summary": "Get calendar configuration info", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-calendars-1", "parameters": [ { @@ -15660,7 +15673,7 @@ "ml data frame" ], "summary": "Get data frame analytics job configuration info", - "description": "You can get information for multiple data frame analytics jobs in a single\nAPI request by using a comma-separated list of data frame analytics jobs or a\nwildcard expression.", + "description": "You can get information for multiple data frame analytics jobs in a single\nAPI request by using a comma-separated list of data frame analytics jobs or a\nwildcard expression.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-data-frame-analytics-1", "parameters": [ { @@ -15690,6 +15703,7 @@ "ml data frame" ], "summary": "Get data frame analytics job stats", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-data-frame-analytics-stats", "parameters": [ { @@ -15719,6 +15733,7 @@ "ml data frame" ], "summary": "Get data frame analytics job stats", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-data-frame-analytics-stats-1", "parameters": [ { @@ -15751,7 +15766,7 @@ "ml anomaly" ], "summary": "Get datafeed stats", - "description": "You can get statistics for multiple datafeeds in a single API request by\nusing a comma-separated list of datafeeds or a wildcard expression. You can\nget statistics for all datafeeds by using `_all`, by specifying `*` as the\n``, or by omitting the ``. If the datafeed is stopped, the\nonly information you receive is the `datafeed_id` and the `state`.\nThis API returns a maximum of 10,000 datafeeds.", + "description": "You can get statistics for multiple datafeeds in a single API request by\nusing a comma-separated list of datafeeds or a wildcard expression. You can\nget statistics for all datafeeds by using `_all`, by specifying `*` as the\n``, or by omitting the ``. If the datafeed is stopped, the\nonly information you receive is the `datafeed_id` and the `state`.\nThis API returns a maximum of 10,000 datafeeds.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-datafeed-stats", "parameters": [ { @@ -15775,7 +15790,7 @@ "ml anomaly" ], "summary": "Get datafeed stats", - "description": "You can get statistics for multiple datafeeds in a single API request by\nusing a comma-separated list of datafeeds or a wildcard expression. You can\nget statistics for all datafeeds by using `_all`, by specifying `*` as the\n``, or by omitting the ``. If the datafeed is stopped, the\nonly information you receive is the `datafeed_id` and the `state`.\nThis API returns a maximum of 10,000 datafeeds.", + "description": "You can get statistics for multiple datafeeds in a single API request by\nusing a comma-separated list of datafeeds or a wildcard expression. You can\nget statistics for all datafeeds by using `_all`, by specifying `*` as the\n``, or by omitting the ``. If the datafeed is stopped, the\nonly information you receive is the `datafeed_id` and the `state`.\nThis API returns a maximum of 10,000 datafeeds.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-datafeed-stats-1", "parameters": [ { @@ -15796,7 +15811,7 @@ "ml anomaly" ], "summary": "Get datafeeds configuration info", - "description": "You can get information for multiple datafeeds in a single API request by\nusing a comma-separated list of datafeeds or a wildcard expression. You can\nget information for all datafeeds by using `_all`, by specifying `*` as the\n``, or by omitting the ``.\nThis API returns a maximum of 10,000 datafeeds.", + "description": "You can get information for multiple datafeeds in a single API request by\nusing a comma-separated list of datafeeds or a wildcard expression. You can\nget information for all datafeeds by using `_all`, by specifying `*` as the\n``, or by omitting the ``.\nThis API returns a maximum of 10,000 datafeeds.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-datafeeds-1", "parameters": [ { @@ -15820,7 +15835,7 @@ "ml anomaly" ], "summary": "Get filters", - "description": "You can get a single filter or all filters.", + "description": "You can get a single filter or all filters.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-get-filters", "parameters": [ { @@ -15844,6 +15859,7 @@ "ml anomaly" ], "summary": "Get anomaly detection job stats", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-job-stats", "parameters": [ { @@ -15864,6 +15880,7 @@ "ml anomaly" ], "summary": "Get anomaly detection job stats", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-job-stats-1", "parameters": [ { @@ -15887,7 +15904,7 @@ "ml anomaly" ], "summary": "Get anomaly detection jobs configuration info", - "description": "You can get information for multiple anomaly detection jobs in a single API\nrequest by using a group name, a comma-separated list of jobs, or a wildcard\nexpression. You can get information for all anomaly detection jobs by using\n`_all`, by specifying `*` as the ``, or by omitting the ``.", + "description": "You can get information for multiple anomaly detection jobs in a single API\nrequest by using a group name, a comma-separated list of jobs, or a wildcard\nexpression. You can get information for all anomaly detection jobs by using\n`_all`, by specifying `*` as the ``, or by omitting the ``.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-jobs-1", "parameters": [ { @@ -15911,7 +15928,7 @@ "ml anomaly" ], "summary": "Get overall bucket results", - "description": "Retrievs overall bucket results that summarize the bucket results of\nmultiple anomaly detection jobs.\n\nThe `overall_score` is calculated by combining the scores of all the\nbuckets within the overall bucket span. First, the maximum\n`anomaly_score` per anomaly detection job in the overall bucket is\ncalculated. Then the `top_n` of those scores are averaged to result in\nthe `overall_score`. This means that you can fine-tune the\n`overall_score` so that it is more or less sensitive to the number of\njobs that detect an anomaly at the same time. For example, if you set\n`top_n` to `1`, the `overall_score` is the maximum bucket score in the\noverall bucket. Alternatively, if you set `top_n` to the number of jobs,\nthe `overall_score` is high only when all jobs detect anomalies in that\noverall bucket. If you set the `bucket_span` parameter (to a value\ngreater than its default), the `overall_score` is the maximum\n`overall_score` of the overall buckets that have a span equal to the\njobs' largest bucket span.", + "description": "Retrievs overall bucket results that summarize the bucket results of\nmultiple anomaly detection jobs.\n\nThe `overall_score` is calculated by combining the scores of all the\nbuckets within the overall bucket span. First, the maximum\n`anomaly_score` per anomaly detection job in the overall bucket is\ncalculated. Then the `top_n` of those scores are averaged to result in\nthe `overall_score`. This means that you can fine-tune the\n`overall_score` so that it is more or less sensitive to the number of\njobs that detect an anomaly at the same time. For example, if you set\n`top_n` to `1`, the `overall_score` is the maximum bucket score in the\noverall bucket. Alternatively, if you set `top_n` to the number of jobs,\nthe `overall_score` is high only when all jobs detect anomalies in that\noverall bucket. If you set the `bucket_span` parameter (to a value\ngreater than its default), the `overall_score` is the maximum\n`overall_score` of the overall buckets that have a span equal to the\njobs' largest bucket span.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-overall-buckets", "parameters": [ { @@ -15954,7 +15971,7 @@ "ml anomaly" ], "summary": "Get overall bucket results", - "description": "Retrievs overall bucket results that summarize the bucket results of\nmultiple anomaly detection jobs.\n\nThe `overall_score` is calculated by combining the scores of all the\nbuckets within the overall bucket span. First, the maximum\n`anomaly_score` per anomaly detection job in the overall bucket is\ncalculated. Then the `top_n` of those scores are averaged to result in\nthe `overall_score`. This means that you can fine-tune the\n`overall_score` so that it is more or less sensitive to the number of\njobs that detect an anomaly at the same time. For example, if you set\n`top_n` to `1`, the `overall_score` is the maximum bucket score in the\noverall bucket. Alternatively, if you set `top_n` to the number of jobs,\nthe `overall_score` is high only when all jobs detect anomalies in that\noverall bucket. If you set the `bucket_span` parameter (to a value\ngreater than its default), the `overall_score` is the maximum\n`overall_score` of the overall buckets that have a span equal to the\njobs' largest bucket span.", + "description": "Retrievs overall bucket results that summarize the bucket results of\nmultiple anomaly detection jobs.\n\nThe `overall_score` is calculated by combining the scores of all the\nbuckets within the overall bucket span. First, the maximum\n`anomaly_score` per anomaly detection job in the overall bucket is\ncalculated. Then the `top_n` of those scores are averaged to result in\nthe `overall_score`. This means that you can fine-tune the\n`overall_score` so that it is more or less sensitive to the number of\njobs that detect an anomaly at the same time. For example, if you set\n`top_n` to `1`, the `overall_score` is the maximum bucket score in the\noverall bucket. Alternatively, if you set `top_n` to the number of jobs,\nthe `overall_score` is high only when all jobs detect anomalies in that\noverall bucket. If you set the `bucket_span` parameter (to a value\ngreater than its default), the `overall_score` is the maximum\n`overall_score` of the overall buckets that have a span equal to the\njobs' largest bucket span.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-overall-buckets-1", "parameters": [ { @@ -15999,6 +16016,7 @@ "ml trained model" ], "summary": "Get trained model configuration info", + "description": "\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-trained-models-1", "parameters": [ { @@ -16037,7 +16055,7 @@ "ml trained model" ], "summary": "Get trained models usage info", - "description": "You can get usage information for multiple trained\nmodels in a single API request by using a comma-separated list of model IDs or a wildcard expression.", + "description": "You can get usage information for multiple trained\nmodels in a single API request by using a comma-separated list of model IDs or a wildcard expression.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-trained-models-stats", "parameters": [ { @@ -16067,7 +16085,7 @@ "ml trained model" ], "summary": "Get trained models usage info", - "description": "You can get usage information for multiple trained\nmodels in a single API request by using a comma-separated list of model IDs or a wildcard expression.", + "description": "You can get usage information for multiple trained\nmodels in a single API request by using a comma-separated list of model IDs or a wildcard expression.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-get-trained-models-stats-1", "parameters": [ { @@ -16178,7 +16196,7 @@ "ml anomaly" ], "summary": "Open anomaly detection jobs", - "description": "An anomaly detection job must be opened to be ready to receive and analyze\ndata. It can be opened and closed multiple times throughout its lifecycle.\nWhen you open a new job, it starts with an empty model.\nWhen you open an existing job, the most recent model state is automatically\nloaded. The job is ready to resume its analysis from where it left off, once\nnew data is received.", + "description": "An anomaly detection job must be opened to be ready to receive and analyze\ndata. It can be opened and closed multiple times throughout its lifecycle.\nWhen you open a new job, it starts with an empty model.\nWhen you open an existing job, the most recent model state is automatically\nloaded. The job is ready to resume its analysis from where it left off, once\nnew data is received.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-open-job", "parameters": [ { @@ -16262,7 +16280,7 @@ "ml data frame" ], "summary": "Preview features used by data frame analytics", - "description": "Preview the extracted features used by a data frame analytics config.", + "description": "Preview the extracted features used by a data frame analytics config.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-preview-data-frame-analytics", "requestBody": { "$ref": "#/components/requestBodies/ml.preview_data_frame_analytics" @@ -16279,7 +16297,7 @@ "ml data frame" ], "summary": "Preview features used by data frame analytics", - "description": "Preview the extracted features used by a data frame analytics config.", + "description": "Preview the extracted features used by a data frame analytics config.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-preview-data-frame-analytics-1", "requestBody": { "$ref": "#/components/requestBodies/ml.preview_data_frame_analytics" @@ -16298,7 +16316,7 @@ "ml data frame" ], "summary": "Preview features used by data frame analytics", - "description": "Preview the extracted features used by a data frame analytics config.", + "description": "Preview the extracted features used by a data frame analytics config.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-preview-data-frame-analytics-2", "parameters": [ { @@ -16320,7 +16338,7 @@ "ml data frame" ], "summary": "Preview features used by data frame analytics", - "description": "Preview the extracted features used by a data frame analytics config.", + "description": "Preview the extracted features used by a data frame analytics config.\n ##Required authorization\n* Cluster privileges: `monitor_ml`", "operationId": "ml-preview-data-frame-analytics-3", "parameters": [ { @@ -16344,7 +16362,7 @@ "ml anomaly" ], "summary": "Preview a datafeed", - "description": "This API returns the first \"page\" of search results from a datafeed.\nYou can preview an existing datafeed or provide configuration details for a datafeed\nand anomaly detection job in the API. The preview shows the structure of the data\nthat will be passed to the anomaly detection engine.\nIMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that\ncalled the API. However, when the datafeed starts it uses the roles of the last user that created or updated the\ndatafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials.\nYou can also use secondary authorization headers to supply the credentials.", + "description": "This API returns the first \"page\" of search results from a datafeed.\nYou can preview an existing datafeed or provide configuration details for a datafeed\nand anomaly detection job in the API. The preview shows the structure of the data\nthat will be passed to the anomaly detection engine.\nIMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that\ncalled the API. However, when the datafeed starts it uses the roles of the last user that created or updated the\ndatafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials.\nYou can also use secondary authorization headers to supply the credentials.\n ##Required authorization\n* Index privileges: `read`* Cluster privileges: `manage_ml`", "operationId": "ml-preview-datafeed", "parameters": [ { @@ -16372,7 +16390,7 @@ "ml anomaly" ], "summary": "Preview a datafeed", - "description": "This API returns the first \"page\" of search results from a datafeed.\nYou can preview an existing datafeed or provide configuration details for a datafeed\nand anomaly detection job in the API. The preview shows the structure of the data\nthat will be passed to the anomaly detection engine.\nIMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that\ncalled the API. However, when the datafeed starts it uses the roles of the last user that created or updated the\ndatafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials.\nYou can also use secondary authorization headers to supply the credentials.", + "description": "This API returns the first \"page\" of search results from a datafeed.\nYou can preview an existing datafeed or provide configuration details for a datafeed\nand anomaly detection job in the API. The preview shows the structure of the data\nthat will be passed to the anomaly detection engine.\nIMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that\ncalled the API. However, when the datafeed starts it uses the roles of the last user that created or updated the\ndatafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials.\nYou can also use secondary authorization headers to supply the credentials.\n ##Required authorization\n* Index privileges: `read`* Cluster privileges: `manage_ml`", "operationId": "ml-preview-datafeed-1", "parameters": [ { @@ -16402,7 +16420,7 @@ "ml anomaly" ], "summary": "Preview a datafeed", - "description": "This API returns the first \"page\" of search results from a datafeed.\nYou can preview an existing datafeed or provide configuration details for a datafeed\nand anomaly detection job in the API. The preview shows the structure of the data\nthat will be passed to the anomaly detection engine.\nIMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that\ncalled the API. However, when the datafeed starts it uses the roles of the last user that created or updated the\ndatafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials.\nYou can also use secondary authorization headers to supply the credentials.", + "description": "This API returns the first \"page\" of search results from a datafeed.\nYou can preview an existing datafeed or provide configuration details for a datafeed\nand anomaly detection job in the API. The preview shows the structure of the data\nthat will be passed to the anomaly detection engine.\nIMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that\ncalled the API. However, when the datafeed starts it uses the roles of the last user that created or updated the\ndatafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials.\nYou can also use secondary authorization headers to supply the credentials.\n ##Required authorization\n* Index privileges: `read`* Cluster privileges: `manage_ml`", "operationId": "ml-preview-datafeed-2", "parameters": [ { @@ -16427,7 +16445,7 @@ "ml anomaly" ], "summary": "Preview a datafeed", - "description": "This API returns the first \"page\" of search results from a datafeed.\nYou can preview an existing datafeed or provide configuration details for a datafeed\nand anomaly detection job in the API. The preview shows the structure of the data\nthat will be passed to the anomaly detection engine.\nIMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that\ncalled the API. However, when the datafeed starts it uses the roles of the last user that created or updated the\ndatafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials.\nYou can also use secondary authorization headers to supply the credentials.", + "description": "This API returns the first \"page\" of search results from a datafeed.\nYou can preview an existing datafeed or provide configuration details for a datafeed\nand anomaly detection job in the API. The preview shows the structure of the data\nthat will be passed to the anomaly detection engine.\nIMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that\ncalled the API. However, when the datafeed starts it uses the roles of the last user that created or updated the\ndatafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials.\nYou can also use secondary authorization headers to supply the credentials.\n ##Required authorization\n* Index privileges: `read`* Cluster privileges: `manage_ml`", "operationId": "ml-preview-datafeed-3", "parameters": [ { @@ -16454,6 +16472,7 @@ "ml trained model" ], "summary": "Create part of a trained model definition", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-put-trained-model-definition-part", "parameters": [ { @@ -16529,7 +16548,7 @@ "ml trained model" ], "summary": "Create a trained model vocabulary", - "description": "This API is supported only for natural language processing (NLP) models.\nThe vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition.", + "description": "This API is supported only for natural language processing (NLP) models.\nThe vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-put-trained-model-vocabulary", "parameters": [ { @@ -16603,7 +16622,7 @@ "ml anomaly" ], "summary": "Reset an anomaly detection job", - "description": "All model state and results are deleted. The job is ready to start over as if\nit had just been created.\nIt is not currently possible to reset multiple jobs using wildcards or a\ncomma separated list.", + "description": "All model state and results are deleted. The job is ready to start over as if\nit had just been created.\nIt is not currently possible to reset multiple jobs using wildcards or a\ncomma separated list.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-reset-job", "parameters": [ { @@ -16659,7 +16678,7 @@ "ml data frame" ], "summary": "Start a data frame analytics job", - "description": "A data frame analytics job can be started and stopped multiple times\nthroughout its lifecycle.\nIf the destination index does not exist, it is created automatically the\nfirst time you start the data frame analytics job. The\n`index.number_of_shards` and `index.number_of_replicas` settings for the\ndestination index are copied from the source index. If there are multiple\nsource indices, the destination index copies the highest setting values. The\nmappings for the destination index are also copied from the source indices.\nIf there are any mapping conflicts, the job fails to start.\nIf the destination index exists, it is used as is. You can therefore set up\nthe destination index in advance with custom settings and mappings.", + "description": "A data frame analytics job can be started and stopped multiple times\nthroughout its lifecycle.\nIf the destination index does not exist, it is created automatically the\nfirst time you start the data frame analytics job. The\n`index.number_of_shards` and `index.number_of_replicas` settings for the\ndestination index are copied from the source index. If there are multiple\nsource indices, the destination index copies the highest setting values. The\nmappings for the destination index are also copied from the source indices.\nIf there are any mapping conflicts, the job fails to start.\nIf the destination index exists, it is used as is. You can therefore set up\nthe destination index in advance with custom settings and mappings.\n ##Required authorization\n* Index privileges: `create_index`,`index`,`manage`,`read`,`view_index_metadata`* Cluster privileges: `manage_ml`", "operationId": "ml-start-data-frame-analytics", "parameters": [ { @@ -16717,7 +16736,7 @@ "ml anomaly" ], "summary": "Start datafeeds", - "description": "A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped\nmultiple times throughout its lifecycle.\n\nBefore you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs.\n\nIf you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped.\nIf new data was indexed for that exact millisecond between stopping and starting, it will be ignored.\n\nWhen Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or\nupdate it had at the time of creation or update and runs the query using those same roles. If you provided secondary\nauthorization headers when you created or updated the datafeed, those credentials are used instead.", + "description": "A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped\nmultiple times throughout its lifecycle.\n\nBefore you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs.\n\nIf you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped.\nIf new data was indexed for that exact millisecond between stopping and starting, it will be ignored.\n\nWhen Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or\nupdate it had at the time of creation or update and runs the query using those same roles. If you provided secondary\nauthorization headers when you created or updated the datafeed, those credentials are used instead.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-start-datafeed", "parameters": [ { @@ -16816,7 +16835,7 @@ "ml trained model" ], "summary": "Start a trained model deployment", - "description": "It allocates the model to every machine learning node.", + "description": "It allocates the model to every machine learning node.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-start-trained-model-deployment", "parameters": [ { @@ -16944,7 +16963,7 @@ "ml data frame" ], "summary": "Stop data frame analytics jobs", - "description": "A data frame analytics job can be started and stopped multiple times\nthroughout its lifecycle.", + "description": "A data frame analytics job can be started and stopped multiple times\nthroughout its lifecycle.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-stop-data-frame-analytics", "parameters": [ { @@ -17018,7 +17037,7 @@ "ml anomaly" ], "summary": "Stop datafeeds", - "description": "A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped\nmultiple times throughout its lifecycle.", + "description": "A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped\nmultiple times throughout its lifecycle.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-stop-datafeed", "parameters": [ { @@ -17114,6 +17133,7 @@ "ml trained model" ], "summary": "Stop a trained model deployment", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-stop-trained-model-deployment", "parameters": [ { @@ -17177,6 +17197,7 @@ "ml data frame" ], "summary": "Update a data frame analytics job", + "description": "\n ##Required authorization\n* Index privileges: `read`,`create_index`,`manage`,`index`,`view_index_metadata`* Cluster privileges: `manage_ml`", "operationId": "ml-update-data-frame-analytics", "parameters": [ { @@ -17289,7 +17310,7 @@ "ml anomaly" ], "summary": "Update a datafeed", - "description": "You must stop and start the datafeed for the changes to be applied.\nWhen Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at\nthe time of the update and runs the query using those same roles. If you provide secondary authorization headers,\nthose credentials are used instead.", + "description": "You must stop and start the datafeed for the changes to be applied.\nWhen Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at\nthe time of the update and runs the query using those same roles. If you provide secondary authorization headers,\nthose credentials are used instead.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-update-datafeed", "parameters": [ { @@ -17495,7 +17516,7 @@ "ml anomaly" ], "summary": "Update a filter", - "description": "Updates the description of a filter, adds items, or removes items from the list.", + "description": "Updates the description of a filter, adds items, or removes items from the list.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-update-filter", "parameters": [ { @@ -17580,7 +17601,7 @@ "ml anomaly" ], "summary": "Update an anomaly detection job", - "description": "Updates certain properties of an anomaly detection job.", + "description": "Updates certain properties of an anomaly detection job.\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-update-job", "parameters": [ { @@ -17778,6 +17799,7 @@ "ml trained model" ], "summary": "Update a trained model deployment", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_ml`", "operationId": "ml-update-trained-model-deployment", "parameters": [ { @@ -17849,7 +17871,7 @@ "search" ], "summary": "Run multiple searches", - "description": "The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format.\nThe structure is as follows:\n\n```\nheader\\n\nbody\\n\nheader\\n\nbody\\n\n```\n\nThis structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node.\n\nIMPORTANT: The final line of data must end with a newline character `\\n`.\nEach newline character may be preceded by a carriage return `\\r`.\nWhen sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`.", + "description": "The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format.\nThe structure is as follows:\n\n```\nheader\\n\nbody\\n\nheader\\n\nbody\\n\n```\n\nThis structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node.\n\nIMPORTANT: The final line of data must end with a newline character `\\n`.\nEach newline character may be preceded by a carriage return `\\r`.\nWhen sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`.\n ##Required authorization\n* Index privileges: `read`", "operationId": "msearch", "parameters": [ { @@ -17907,7 +17929,7 @@ "search" ], "summary": "Run multiple searches", - "description": "The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format.\nThe structure is as follows:\n\n```\nheader\\n\nbody\\n\nheader\\n\nbody\\n\n```\n\nThis structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node.\n\nIMPORTANT: The final line of data must end with a newline character `\\n`.\nEach newline character may be preceded by a carriage return `\\r`.\nWhen sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`.", + "description": "The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format.\nThe structure is as follows:\n\n```\nheader\\n\nbody\\n\nheader\\n\nbody\\n\n```\n\nThis structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node.\n\nIMPORTANT: The final line of data must end with a newline character `\\n`.\nEach newline character may be preceded by a carriage return `\\r`.\nWhen sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`.\n ##Required authorization\n* Index privileges: `read`", "operationId": "msearch-1", "parameters": [ { @@ -17967,7 +17989,7 @@ "search" ], "summary": "Run multiple searches", - "description": "The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format.\nThe structure is as follows:\n\n```\nheader\\n\nbody\\n\nheader\\n\nbody\\n\n```\n\nThis structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node.\n\nIMPORTANT: The final line of data must end with a newline character `\\n`.\nEach newline character may be preceded by a carriage return `\\r`.\nWhen sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`.", + "description": "The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format.\nThe structure is as follows:\n\n```\nheader\\n\nbody\\n\nheader\\n\nbody\\n\n```\n\nThis structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node.\n\nIMPORTANT: The final line of data must end with a newline character `\\n`.\nEach newline character may be preceded by a carriage return `\\r`.\nWhen sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`.\n ##Required authorization\n* Index privileges: `read`", "operationId": "msearch-2", "parameters": [ { @@ -18028,7 +18050,7 @@ "search" ], "summary": "Run multiple searches", - "description": "The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format.\nThe structure is as follows:\n\n```\nheader\\n\nbody\\n\nheader\\n\nbody\\n\n```\n\nThis structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node.\n\nIMPORTANT: The final line of data must end with a newline character `\\n`.\nEach newline character may be preceded by a carriage return `\\r`.\nWhen sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`.", + "description": "The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format.\nThe structure is as follows:\n\n```\nheader\\n\nbody\\n\nheader\\n\nbody\\n\n```\n\nThis structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node.\n\nIMPORTANT: The final line of data must end with a newline character `\\n`.\nEach newline character may be preceded by a carriage return `\\r`.\nWhen sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`.\n ##Required authorization\n* Index privileges: `read`", "operationId": "msearch-3", "parameters": [ { @@ -18091,7 +18113,7 @@ "search" ], "summary": "Run multiple templated searches", - "description": "Run multiple templated searches with a single request.\nIf you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines.\nFor example:\n\n```\n$ cat requests\n{ \"index\": \"my-index\" }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ \"index\": \"my-other-index\" }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}\n\n$ curl -H \"Content-Type: application/x-ndjson\" -XGET localhost:9200/_msearch/template --data-binary \"@requests\"; echo\n```", + "description": "Run multiple templated searches with a single request.\nIf you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines.\nFor example:\n\n```\n$ cat requests\n{ \"index\": \"my-index\" }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ \"index\": \"my-other-index\" }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}\n\n$ curl -H \"Content-Type: application/x-ndjson\" -XGET localhost:9200/_msearch/template --data-binary \"@requests\"; echo\n```\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/search-templates" }, @@ -18134,7 +18156,7 @@ "search" ], "summary": "Run multiple templated searches", - "description": "Run multiple templated searches with a single request.\nIf you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines.\nFor example:\n\n```\n$ cat requests\n{ \"index\": \"my-index\" }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ \"index\": \"my-other-index\" }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}\n\n$ curl -H \"Content-Type: application/x-ndjson\" -XGET localhost:9200/_msearch/template --data-binary \"@requests\"; echo\n```", + "description": "Run multiple templated searches with a single request.\nIf you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines.\nFor example:\n\n```\n$ cat requests\n{ \"index\": \"my-index\" }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ \"index\": \"my-other-index\" }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}\n\n$ curl -H \"Content-Type: application/x-ndjson\" -XGET localhost:9200/_msearch/template --data-binary \"@requests\"; echo\n```\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/search-templates" }, @@ -18179,7 +18201,7 @@ "search" ], "summary": "Run multiple templated searches", - "description": "Run multiple templated searches with a single request.\nIf you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines.\nFor example:\n\n```\n$ cat requests\n{ \"index\": \"my-index\" }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ \"index\": \"my-other-index\" }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}\n\n$ curl -H \"Content-Type: application/x-ndjson\" -XGET localhost:9200/_msearch/template --data-binary \"@requests\"; echo\n```", + "description": "Run multiple templated searches with a single request.\nIf you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines.\nFor example:\n\n```\n$ cat requests\n{ \"index\": \"my-index\" }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ \"index\": \"my-other-index\" }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}\n\n$ curl -H \"Content-Type: application/x-ndjson\" -XGET localhost:9200/_msearch/template --data-binary \"@requests\"; echo\n```\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/search-templates" }, @@ -18225,7 +18247,7 @@ "search" ], "summary": "Run multiple templated searches", - "description": "Run multiple templated searches with a single request.\nIf you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines.\nFor example:\n\n```\n$ cat requests\n{ \"index\": \"my-index\" }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ \"index\": \"my-other-index\" }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}\n\n$ curl -H \"Content-Type: application/x-ndjson\" -XGET localhost:9200/_msearch/template --data-binary \"@requests\"; echo\n```", + "description": "Run multiple templated searches with a single request.\nIf you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines.\nFor example:\n\n```\n$ cat requests\n{ \"index\": \"my-index\" }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ \"index\": \"my-other-index\" }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}\n\n$ curl -H \"Content-Type: application/x-ndjson\" -XGET localhost:9200/_msearch/template --data-binary \"@requests\"; echo\n```\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/search-templates" }, @@ -18273,7 +18295,7 @@ "document" ], "summary": "Get multiple term vectors", - "description": "Get multiple term vectors with a single request.\nYou can specify existing documents by index and ID or provide artificial documents in the body of the request.\nYou can specify the index in the request body or request URI.\nThe response contains a `docs` array with all the fetched termvectors.\nEach element has the structure provided by the termvectors API.\n\n**Artificial documents**\n\nYou can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request.\nThe mapping used is determined by the specified `_index`.", + "description": "Get multiple term vectors with a single request.\nYou can specify existing documents by index and ID or provide artificial documents in the body of the request.\nYou can specify the index in the request body or request URI.\nThe response contains a `docs` array with all the fetched termvectors.\nEach element has the structure provided by the termvectors API.\n\n**Artificial documents**\n\nYou can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request.\nThe mapping used is determined by the specified `_index`.\n ##Required authorization\n* Index privileges: `read`", "operationId": "mtermvectors", "parameters": [ { @@ -18334,7 +18356,7 @@ "document" ], "summary": "Get multiple term vectors", - "description": "Get multiple term vectors with a single request.\nYou can specify existing documents by index and ID or provide artificial documents in the body of the request.\nYou can specify the index in the request body or request URI.\nThe response contains a `docs` array with all the fetched termvectors.\nEach element has the structure provided by the termvectors API.\n\n**Artificial documents**\n\nYou can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request.\nThe mapping used is determined by the specified `_index`.", + "description": "Get multiple term vectors with a single request.\nYou can specify existing documents by index and ID or provide artificial documents in the body of the request.\nYou can specify the index in the request body or request URI.\nThe response contains a `docs` array with all the fetched termvectors.\nEach element has the structure provided by the termvectors API.\n\n**Artificial documents**\n\nYou can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request.\nThe mapping used is determined by the specified `_index`.\n ##Required authorization\n* Index privileges: `read`", "operationId": "mtermvectors-1", "parameters": [ { @@ -18397,7 +18419,7 @@ "document" ], "summary": "Get multiple term vectors", - "description": "Get multiple term vectors with a single request.\nYou can specify existing documents by index and ID or provide artificial documents in the body of the request.\nYou can specify the index in the request body or request URI.\nThe response contains a `docs` array with all the fetched termvectors.\nEach element has the structure provided by the termvectors API.\n\n**Artificial documents**\n\nYou can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request.\nThe mapping used is determined by the specified `_index`.", + "description": "Get multiple term vectors with a single request.\nYou can specify existing documents by index and ID or provide artificial documents in the body of the request.\nYou can specify the index in the request body or request URI.\nThe response contains a `docs` array with all the fetched termvectors.\nEach element has the structure provided by the termvectors API.\n\n**Artificial documents**\n\nYou can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request.\nThe mapping used is determined by the specified `_index`.\n ##Required authorization\n* Index privileges: `read`", "operationId": "mtermvectors-2", "parameters": [ { @@ -18461,7 +18483,7 @@ "document" ], "summary": "Get multiple term vectors", - "description": "Get multiple term vectors with a single request.\nYou can specify existing documents by index and ID or provide artificial documents in the body of the request.\nYou can specify the index in the request body or request URI.\nThe response contains a `docs` array with all the fetched termvectors.\nEach element has the structure provided by the termvectors API.\n\n**Artificial documents**\n\nYou can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request.\nThe mapping used is determined by the specified `_index`.", + "description": "Get multiple term vectors with a single request.\nYou can specify existing documents by index and ID or provide artificial documents in the body of the request.\nYou can specify the index in the request body or request URI.\nThe response contains a `docs` array with all the fetched termvectors.\nEach element has the structure provided by the termvectors API.\n\n**Artificial documents**\n\nYou can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request.\nThe mapping used is determined by the specified `_index`.\n ##Required authorization\n* Index privileges: `read`", "operationId": "mtermvectors-3", "parameters": [ { @@ -18527,7 +18549,7 @@ "search" ], "summary": "Open a point in time", - "description": "A search request by default runs against the most recent visible data of the target indices,\nwhich is called point in time. Elasticsearch pit (point in time) is a lightweight view into the\nstate of the data as it existed when initiated. In some cases, it’s preferred to perform multiple\nsearch requests using the same point in time. For example, if refreshes happen between\n`search_after` requests, then the results of those requests might not be consistent as changes happening\nbetween searches are only visible to the more recent point in time.\n\nA point in time must be opened explicitly before being used in search requests.\n\nA subsequent search request with the `pit` parameter must not specify `index`, `routing`, or `preference` values as these parameters are copied from the point in time.\n\nJust like regular searches, you can use `from` and `size` to page through point in time search results, up to the first 10,000 hits.\nIf you want to retrieve more hits, use PIT with `search_after`.\n\nIMPORTANT: The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request.\n\nWhen a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a `NoShardAvailableActionException` exception.\nTo get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime.\n\n**Keeping point in time alive**\n\nThe `keep_alive` parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time.\nThe value does not need to be long enough to process all data — it just needs to be long enough for the next request.\n\nNormally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments.\nOnce the smaller segments are no longer needed they are deleted.\nHowever, open point-in-times prevent the old segments from being deleted since they are still in use.\n\nTIP: Keeping older segments alive means that more disk space and file handles are needed.\nEnsure that you have configured your nodes to have ample free file handles.\n\nAdditionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request.\nEnsure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates.\nNote that a point-in-time doesn't prevent its associated indices from being deleted.\nYou can check how many point-in-times (that is, search contexts) are open with the nodes stats API.", + "description": "A search request by default runs against the most recent visible data of the target indices,\nwhich is called point in time. Elasticsearch pit (point in time) is a lightweight view into the\nstate of the data as it existed when initiated. In some cases, it’s preferred to perform multiple\nsearch requests using the same point in time. For example, if refreshes happen between\n`search_after` requests, then the results of those requests might not be consistent as changes happening\nbetween searches are only visible to the more recent point in time.\n\nA point in time must be opened explicitly before being used in search requests.\n\nA subsequent search request with the `pit` parameter must not specify `index`, `routing`, or `preference` values as these parameters are copied from the point in time.\n\nJust like regular searches, you can use `from` and `size` to page through point in time search results, up to the first 10,000 hits.\nIf you want to retrieve more hits, use PIT with `search_after`.\n\nIMPORTANT: The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request.\n\nWhen a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a `NoShardAvailableActionException` exception.\nTo get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime.\n\n**Keeping point in time alive**\n\nThe `keep_alive` parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time.\nThe value does not need to be long enough to process all data — it just needs to be long enough for the next request.\n\nNormally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments.\nOnce the smaller segments are no longer needed they are deleted.\nHowever, open point-in-times prevent the old segments from being deleted since they are still in use.\n\nTIP: Keeping older segments alive means that more disk space and file handles are needed.\nEnsure that you have configured your nodes to have ample free file handles.\n\nAdditionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request.\nEnsure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates.\nNote that a point-in-time doesn't prevent its associated indices from being deleted.\nYou can check how many point-in-times (that is, search contexts) are open with the nodes stats API.\n ##Required authorization\n* Index privileges: `read`", "operationId": "open-point-in-time", "parameters": [ { @@ -18672,7 +18694,7 @@ "script" ], "summary": "Create or update a script or search template", - "description": "Creates or updates a stored script or search template.", + "description": "Creates or updates a stored script or search template.\n ##Required authorization\n* Cluster privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/search-templates" }, @@ -18715,7 +18737,7 @@ "script" ], "summary": "Create or update a script or search template", - "description": "Creates or updates a stored script or search template.", + "description": "Creates or updates a stored script or search template.\n ##Required authorization\n* Cluster privileges: `manage`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/search-templates" }, @@ -18760,7 +18782,7 @@ "query_rules" ], "summary": "Get a query rule", - "description": "Get details about a query rule within a query ruleset.", + "description": "Get details about a query rule within a query ruleset.\n ##Required authorization\n* Cluster privileges: `manage_search_query_rules`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/rest-apis/searching-with-query-rules" }, @@ -18820,7 +18842,7 @@ "query_rules" ], "summary": "Create or update a query rule", - "description": "Create or update a query rule within a query ruleset.\n\nIMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule.\nIt is advised to use one or the other in query rulesets, to avoid errors.\nAdditionally, pinned queries have a maximum limit of 100 pinned hits.\nIf multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.", + "description": "Create or update a query rule within a query ruleset.\n\nIMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule.\nIt is advised to use one or the other in query rulesets, to avoid errors.\nAdditionally, pinned queries have a maximum limit of 100 pinned hits.\nIf multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.\n ##Required authorization\n* Cluster privileges: `manage_search_query_rules`", "operationId": "query-rules-put-rule", "parameters": [ { @@ -18925,7 +18947,7 @@ "query_rules" ], "summary": "Delete a query rule", - "description": "Delete a query rule within a query ruleset.\nThis is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API.", + "description": "Delete a query rule within a query ruleset.\nThis is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API.\n ##Required authorization\n* Cluster privileges: `manage_search_query_rules`", "operationId": "query-rules-delete-rule", "parameters": [ { @@ -18972,7 +18994,7 @@ "query_rules" ], "summary": "Get a query ruleset", - "description": "Get details about a query ruleset.", + "description": "Get details about a query ruleset.\n ##Required authorization\n* Cluster privileges: `manage_search_query_rules`", "operationId": "query-rules-get-ruleset", "parameters": [ { @@ -19018,7 +19040,7 @@ "query_rules" ], "summary": "Create or update a query ruleset", - "description": "There is a limit of 100 rules per ruleset.\nThis limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting.\n\nIMPORTANT: Due to limitations within pinned queries, you can only select documents using `ids` or `docs`, but cannot use both in single rule.\nIt is advised to use one or the other in query rulesets, to avoid errors.\nAdditionally, pinned queries have a maximum limit of 100 pinned hits.\nIf multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.", + "description": "There is a limit of 100 rules per ruleset.\nThis limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting.\n\nIMPORTANT: Due to limitations within pinned queries, you can only select documents using `ids` or `docs`, but cannot use both in single rule.\nIt is advised to use one or the other in query rulesets, to avoid errors.\nAdditionally, pinned queries have a maximum limit of 100 pinned hits.\nIf multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.\n ##Required authorization\n* Cluster privileges: `manage_search_query_rules`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/rest-apis/searching-with-query-rules" }, @@ -19103,7 +19125,7 @@ "query_rules" ], "summary": "Delete a query ruleset", - "description": "Remove a query ruleset and its associated data.\nThis is a destructive action that is not recoverable.", + "description": "Remove a query ruleset and its associated data.\nThis is a destructive action that is not recoverable.\n ##Required authorization\n* Cluster privileges: `manage_search_query_rules`", "operationId": "query-rules-delete-ruleset", "parameters": [ { @@ -19139,7 +19161,7 @@ "query_rules" ], "summary": "Get all query rulesets", - "description": "Get summarized information about the query rulesets.", + "description": "Get summarized information about the query rulesets.\n ##Required authorization\n* Cluster privileges: `manage_search_query_rules`", "operationId": "query-rules-list-rulesets", "parameters": [ { @@ -19211,7 +19233,7 @@ "query_rules" ], "summary": "Test a query ruleset", - "description": "Evaluate match criteria against a query ruleset to identify the rules that would match that criteria.", + "description": "Evaluate match criteria against a query ruleset to identify the rules that would match that criteria.\n ##Required authorization\n* Cluster privileges: `manage_search_query_rules`", "operationId": "query-rules-test", "parameters": [ { @@ -19302,7 +19324,7 @@ "search" ], "summary": "Evaluate ranked search results", - "description": "Evaluate the quality of ranked search results over a set of typical search queries.", + "description": "Evaluate the quality of ranked search results over a set of typical search queries.\n ##Required authorization\n* Index privileges: `read`", "operationId": "rank-eval", "parameters": [ { @@ -19333,7 +19355,7 @@ "search" ], "summary": "Evaluate ranked search results", - "description": "Evaluate the quality of ranked search results over a set of typical search queries.", + "description": "Evaluate the quality of ranked search results over a set of typical search queries.\n ##Required authorization\n* Index privileges: `read`", "operationId": "rank-eval-1", "parameters": [ { @@ -19366,7 +19388,7 @@ "search" ], "summary": "Evaluate ranked search results", - "description": "Evaluate the quality of ranked search results over a set of typical search queries.", + "description": "Evaluate the quality of ranked search results over a set of typical search queries.\n ##Required authorization\n* Index privileges: `read`", "operationId": "rank-eval-2", "parameters": [ { @@ -19400,7 +19422,7 @@ "search" ], "summary": "Evaluate ranked search results", - "description": "Evaluate the quality of ranked search results over a set of typical search queries.", + "description": "Evaluate the quality of ranked search results over a set of typical search queries.\n ##Required authorization\n* Index privileges: `read`", "operationId": "rank-eval-3", "parameters": [ { @@ -19436,7 +19458,7 @@ "document" ], "summary": "Reindex documents", - "description": "Copy documents from a source to a destination.\nYou can copy all documents to the destination index or reindex a subset of the documents.\nThe source can be any existing index, alias, or data stream.\nThe destination must differ from the source.\nFor example, you cannot reindex a data stream into itself.\n\nIMPORTANT: Reindex requires `_source` to be enabled for all documents in the source.\nThe destination should be configured as wanted before calling the reindex API.\nReindex does not copy the settings from the source or its associated template.\nMappings, shard counts, and replicas, for example, must be configured ahead of time.\n\nIf the Elasticsearch security features are enabled, you must have the following security privileges:\n\n* The `read` index privilege for the source data stream, index, or alias.\n* The `write` index privilege for the destination data stream, index, or index alias.\n* To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias.\n* If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias.\n\nIf reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting.\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe `dest` element can be configured like the index API to control optimistic concurrency control.\nOmitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID.\n\nSetting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source.\n\nSetting `op_type` to `create` causes the reindex API to create only missing documents in the destination.\nAll existing documents will cause a version conflict.\n\nIMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`.\nA reindex can only add new documents to a destination data stream.\nIt cannot update existing documents in a destination data stream.\n\nBy default, version conflicts abort the reindex process.\nTo continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`.\nIn this case, the response includes a count of the version conflicts that were encountered.\nNote that the handling of other error types is unaffected by the `conflicts` property.\nAdditionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query.\n\nNOTE: The reindex API makes no effort to handle ID collisions.\nThe last document written will \"win\" but the order isn't usually predictable so it is not a good idea to rely on this behavior.\nInstead, make sure that IDs are unique by using a script.\n\n**Running reindex asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task.\nElasticsearch creates a record of this task as a document at `_tasks/`.\n\n**Reindex from multiple sources**\n\nIf you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources.\nThat way you can resume the process if there are any errors by removing the partially completed source and starting over.\nIt also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel.\n\nFor example, you can use a bash script like this:\n\n```\nfor index in i1 i2 i3 i4 i5; do\n curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{\n \"source\": {\n \"index\": \"'$index'\"\n },\n \"dest\": {\n \"index\": \"'$index'-reindexed\"\n }\n }'\ndone\n```\n\n**Throttling**\n\nSet `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations.\nRequests are throttled by padding each batch with a wait time.\nTo turn off throttling, set `requests_per_second` to `-1`.\n\nThe throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is `1000`, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nReindex supports sliced scroll to parallelize the reindexing process.\nThis parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nYou can slice a reindex request manually by providing a slice ID and total number of slices to each request.\nYou can also let reindex automatically parallelize by using sliced scroll to slice on `_id`.\nThe `slices` parameter specifies the number of slices to use.\n\nAdding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks:\n\n* You can see these requests in the tasks API. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with `slices` will cancel each sub-request.\n* Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed.\n* Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time.\n\nIf slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices.\nIf slicing manually or otherwise tuning automatic slicing, use the following guidelines.\n\nQuery performance is most efficient when the number of slices is equal to the number of shards in the index.\nIf that number is large (for example, `500`), choose a lower number as too many slices will hurt performance.\nSetting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n\nIndexing performance scales linearly across available resources with the number of slices.\n\nWhether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Modify documents during reindexing**\n\nLike `_update_by_query`, reindex operations support a script that modifies the document.\nUnlike `_update_by_query`, the script is allowed to modify the document's metadata.\n\nJust as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination.\nFor example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This \"no operation\" will be reported in the `noop` counter in the response body.\nSet `ctx.op` to `delete` if your script decides that the document must be deleted from the destination.\nThe deletion will be reported in the `deleted` counter in the response body.\nSetting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`.\n\nThink of the possibilities! Just be careful; you are able to change:\n\n* `_id`\n* `_index`\n* `_version`\n* `_routing`\n\nSetting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request.\nIt will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API.\n\n**Reindex from remote**\n\nReindex supports reindexing from a remote Elasticsearch cluster.\nThe `host` parameter must contain a scheme, host, port, and optional path.\nThe `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication.\nBe sure to use HTTPS when using basic authentication or the password will be sent in plain text.\nThere are a range of settings available to configure the behavior of the HTTPS connection.\n\nWhen using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key.\nRemote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting.\nIt can be set to a comma delimited list of allowed remote host and port combinations.\nScheme is ignored; only the host and port are used.\nFor example:\n\n```\nreindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*\"]\n```\n\nThe list of allowed hosts must be configured on any nodes that will coordinate the reindex.\nThis feature should work with remote clusters of any version of Elasticsearch.\nThis should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version.\n\nWARNING: Elasticsearch does not support forward compatibility across major versions.\nFor example, you cannot reindex from a 7.x cluster into a 6.x cluster.\n\nTo enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nReindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb.\nIf the remote index includes very large documents you'll need to use a smaller batch size.\nIt is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field.\nBoth default to 30 seconds.\n\n**Configuring SSL parameters**\n\nReindex from remote supports configurable SSL settings.\nThese must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore.\nIt is not possible to configure SSL in the body of the reindex request.", + "description": "Copy documents from a source to a destination.\nYou can copy all documents to the destination index or reindex a subset of the documents.\nThe source can be any existing index, alias, or data stream.\nThe destination must differ from the source.\nFor example, you cannot reindex a data stream into itself.\n\nIMPORTANT: Reindex requires `_source` to be enabled for all documents in the source.\nThe destination should be configured as wanted before calling the reindex API.\nReindex does not copy the settings from the source or its associated template.\nMappings, shard counts, and replicas, for example, must be configured ahead of time.\n\nIf the Elasticsearch security features are enabled, you must have the following security privileges:\n\n* The `read` index privilege for the source data stream, index, or alias.\n* The `write` index privilege for the destination data stream, index, or index alias.\n* To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias.\n* If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias.\n\nIf reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting.\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe `dest` element can be configured like the index API to control optimistic concurrency control.\nOmitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID.\n\nSetting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source.\n\nSetting `op_type` to `create` causes the reindex API to create only missing documents in the destination.\nAll existing documents will cause a version conflict.\n\nIMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`.\nA reindex can only add new documents to a destination data stream.\nIt cannot update existing documents in a destination data stream.\n\nBy default, version conflicts abort the reindex process.\nTo continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`.\nIn this case, the response includes a count of the version conflicts that were encountered.\nNote that the handling of other error types is unaffected by the `conflicts` property.\nAdditionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query.\n\nNOTE: The reindex API makes no effort to handle ID collisions.\nThe last document written will \"win\" but the order isn't usually predictable so it is not a good idea to rely on this behavior.\nInstead, make sure that IDs are unique by using a script.\n\n**Running reindex asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task.\nElasticsearch creates a record of this task as a document at `_tasks/`.\n\n**Reindex from multiple sources**\n\nIf you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources.\nThat way you can resume the process if there are any errors by removing the partially completed source and starting over.\nIt also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel.\n\nFor example, you can use a bash script like this:\n\n```\nfor index in i1 i2 i3 i4 i5; do\n curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{\n \"source\": {\n \"index\": \"'$index'\"\n },\n \"dest\": {\n \"index\": \"'$index'-reindexed\"\n }\n }'\ndone\n```\n\n**Throttling**\n\nSet `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations.\nRequests are throttled by padding each batch with a wait time.\nTo turn off throttling, set `requests_per_second` to `-1`.\n\nThe throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is `1000`, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nReindex supports sliced scroll to parallelize the reindexing process.\nThis parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nYou can slice a reindex request manually by providing a slice ID and total number of slices to each request.\nYou can also let reindex automatically parallelize by using sliced scroll to slice on `_id`.\nThe `slices` parameter specifies the number of slices to use.\n\nAdding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks:\n\n* You can see these requests in the tasks API. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with `slices` will cancel each sub-request.\n* Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed.\n* Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time.\n\nIf slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices.\nIf slicing manually or otherwise tuning automatic slicing, use the following guidelines.\n\nQuery performance is most efficient when the number of slices is equal to the number of shards in the index.\nIf that number is large (for example, `500`), choose a lower number as too many slices will hurt performance.\nSetting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n\nIndexing performance scales linearly across available resources with the number of slices.\n\nWhether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Modify documents during reindexing**\n\nLike `_update_by_query`, reindex operations support a script that modifies the document.\nUnlike `_update_by_query`, the script is allowed to modify the document's metadata.\n\nJust as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination.\nFor example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This \"no operation\" will be reported in the `noop` counter in the response body.\nSet `ctx.op` to `delete` if your script decides that the document must be deleted from the destination.\nThe deletion will be reported in the `deleted` counter in the response body.\nSetting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`.\n\nThink of the possibilities! Just be careful; you are able to change:\n\n* `_id`\n* `_index`\n* `_version`\n* `_routing`\n\nSetting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request.\nIt will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API.\n\n**Reindex from remote**\n\nReindex supports reindexing from a remote Elasticsearch cluster.\nThe `host` parameter must contain a scheme, host, port, and optional path.\nThe `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication.\nBe sure to use HTTPS when using basic authentication or the password will be sent in plain text.\nThere are a range of settings available to configure the behavior of the HTTPS connection.\n\nWhen using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key.\nRemote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting.\nIt can be set to a comma delimited list of allowed remote host and port combinations.\nScheme is ignored; only the host and port are used.\nFor example:\n\n```\nreindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*\"]\n```\n\nThe list of allowed hosts must be configured on any nodes that will coordinate the reindex.\nThis feature should work with remote clusters of any version of Elasticsearch.\nThis should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version.\n\nWARNING: Elasticsearch does not support forward compatibility across major versions.\nFor example, you cannot reindex from a 7.x cluster into a 6.x cluster.\n\nTo enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nReindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb.\nIf the remote index includes very large documents you'll need to use a smaller batch size.\nIt is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field.\nBoth default to 30 seconds.\n\n**Configuring SSL parameters**\n\nReindex from remote supports configurable SSL settings.\nThese must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore.\nIt is not possible to configure SSL in the body of the reindex request.\n ##Required authorization\n* Index privileges: `read`,`write`", "operationId": "reindex", "parameters": [ { @@ -19712,7 +19734,7 @@ "search" ], "summary": "Render a search template", - "description": "Render a search template as a search request body.", + "description": "Render a search template as a search request body.\n ##Required authorization\n* Index privileges: `read`", "operationId": "render-search-template", "requestBody": { "$ref": "#/components/requestBodies/render_search_template" @@ -19735,7 +19757,7 @@ "search" ], "summary": "Render a search template", - "description": "Render a search template as a search request body.", + "description": "Render a search template as a search request body.\n ##Required authorization\n* Index privileges: `read`", "operationId": "render-search-template-1", "requestBody": { "$ref": "#/components/requestBodies/render_search_template" @@ -19760,7 +19782,7 @@ "search" ], "summary": "Render a search template", - "description": "Render a search template as a search request body.", + "description": "Render a search template as a search request body.\n ##Required authorization\n* Index privileges: `read`", "operationId": "render-search-template-2", "parameters": [ { @@ -19788,7 +19810,7 @@ "search" ], "summary": "Render a search template", - "description": "Render a search template as a search request body.", + "description": "Render a search template as a search request body.\n ##Required authorization\n* Index privileges: `read`", "operationId": "render-search-template-3", "parameters": [ { @@ -19866,7 +19888,7 @@ "search" ], "summary": "Run a search", - "description": "Get search hits that match the query defined in the request.\nYou can provide search queries using the `q` query string parameter or the request body.\nIf both are specified, only the query parameter is used.\n\nIf the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges.\nTo search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices.\n\n**Search slicing**\n\nWhen paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties.\nBy default the splitting is done first on the shards, then locally on each shard.\nThe local splitting partitions the shard into contiguous ranges based on Lucene document IDs.\n\nFor instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard.\n\nIMPORTANT: The same point-in-time ID should be used for all slices.\nIf different PIT IDs are used, slices can overlap and miss documents.\nThis situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.", + "description": "Get search hits that match the query defined in the request.\nYou can provide search queries using the `q` query string parameter or the request body.\nIf both are specified, only the query parameter is used.\n\nIf the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges.\nTo search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices.\n\n**Search slicing**\n\nWhen paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties.\nBy default the splitting is done first on the shards, then locally on each shard.\nThe local splitting partitions the shard into contiguous ranges based on Lucene document IDs.\n\nFor instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard.\n\nIMPORTANT: The same point-in-time ID should be used for all slices.\nIf different PIT IDs are used, slices can overlap and miss documents.\nThis situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/remote-clusters/remote-clusters-cert#remote-clusters-privileges-ccs" }, @@ -20023,7 +20045,7 @@ "search" ], "summary": "Run a search", - "description": "Get search hits that match the query defined in the request.\nYou can provide search queries using the `q` query string parameter or the request body.\nIf both are specified, only the query parameter is used.\n\nIf the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges.\nTo search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices.\n\n**Search slicing**\n\nWhen paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties.\nBy default the splitting is done first on the shards, then locally on each shard.\nThe local splitting partitions the shard into contiguous ranges based on Lucene document IDs.\n\nFor instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard.\n\nIMPORTANT: The same point-in-time ID should be used for all slices.\nIf different PIT IDs are used, slices can overlap and miss documents.\nThis situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.", + "description": "Get search hits that match the query defined in the request.\nYou can provide search queries using the `q` query string parameter or the request body.\nIf both are specified, only the query parameter is used.\n\nIf the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges.\nTo search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices.\n\n**Search slicing**\n\nWhen paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties.\nBy default the splitting is done first on the shards, then locally on each shard.\nThe local splitting partitions the shard into contiguous ranges based on Lucene document IDs.\n\nFor instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard.\n\nIMPORTANT: The same point-in-time ID should be used for all slices.\nIf different PIT IDs are used, slices can overlap and miss documents.\nThis situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/remote-clusters/remote-clusters-cert#remote-clusters-privileges-ccs" }, @@ -20182,7 +20204,7 @@ "search" ], "summary": "Run a search", - "description": "Get search hits that match the query defined in the request.\nYou can provide search queries using the `q` query string parameter or the request body.\nIf both are specified, only the query parameter is used.\n\nIf the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges.\nTo search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices.\n\n**Search slicing**\n\nWhen paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties.\nBy default the splitting is done first on the shards, then locally on each shard.\nThe local splitting partitions the shard into contiguous ranges based on Lucene document IDs.\n\nFor instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard.\n\nIMPORTANT: The same point-in-time ID should be used for all slices.\nIf different PIT IDs are used, slices can overlap and miss documents.\nThis situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.", + "description": "Get search hits that match the query defined in the request.\nYou can provide search queries using the `q` query string parameter or the request body.\nIf both are specified, only the query parameter is used.\n\nIf the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges.\nTo search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices.\n\n**Search slicing**\n\nWhen paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties.\nBy default the splitting is done first on the shards, then locally on each shard.\nThe local splitting partitions the shard into contiguous ranges based on Lucene document IDs.\n\nFor instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard.\n\nIMPORTANT: The same point-in-time ID should be used for all slices.\nIf different PIT IDs are used, slices can overlap and miss documents.\nThis situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/remote-clusters/remote-clusters-cert#remote-clusters-privileges-ccs" }, @@ -20342,7 +20364,7 @@ "search" ], "summary": "Run a search", - "description": "Get search hits that match the query defined in the request.\nYou can provide search queries using the `q` query string parameter or the request body.\nIf both are specified, only the query parameter is used.\n\nIf the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges.\nTo search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices.\n\n**Search slicing**\n\nWhen paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties.\nBy default the splitting is done first on the shards, then locally on each shard.\nThe local splitting partitions the shard into contiguous ranges based on Lucene document IDs.\n\nFor instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard.\n\nIMPORTANT: The same point-in-time ID should be used for all slices.\nIf different PIT IDs are used, slices can overlap and miss documents.\nThis situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.", + "description": "Get search hits that match the query defined in the request.\nYou can provide search queries using the `q` query string parameter or the request body.\nIf both are specified, only the query parameter is used.\n\nIf the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges.\nTo search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices.\n\n**Search slicing**\n\nWhen paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties.\nBy default the splitting is done first on the shards, then locally on each shard.\nThe local splitting partitions the shard into contiguous ranges based on Lucene document IDs.\n\nFor instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard.\n\nIMPORTANT: The same point-in-time ID should be used for all slices.\nIf different PIT IDs are used, slices can overlap and miss documents.\nThis situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/remote-clusters/remote-clusters-cert#remote-clusters-privileges-ccs" }, @@ -20504,6 +20526,7 @@ "search_application" ], "summary": "Get search application details", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_search_application`", "operationId": "search-application-get", "parameters": [ { @@ -20549,6 +20572,7 @@ "search_application" ], "summary": "Create or update a search application", + "description": "\n ##Required authorization\n* Index privileges: `manage`* Cluster privileges: `manage_search_application`", "operationId": "search-application-put", "parameters": [ { @@ -20622,7 +20646,7 @@ "search_application" ], "summary": "Delete a search application", - "description": "Remove a search application and its associated alias. Indices attached to the search application are not removed.", + "description": "Remove a search application and its associated alias. Indices attached to the search application are not removed.\n ##Required authorization\n* Index privileges: `manage`* Cluster privileges: `manage_search_application`", "operationId": "search-application-delete", "parameters": [ { @@ -20776,7 +20800,7 @@ "search_application" ], "summary": "Get search applications", - "description": "Get information about search applications.", + "description": "Get information about search applications.\n ##Required authorization\n* Cluster privileges: `manage_search_application`", "operationId": "search-application-list", "parameters": [ { @@ -20922,7 +20946,7 @@ "search" ], "summary": "Search a vector tile", - "description": "Search a vector tile for geospatial values.\nBefore using this API, you should be familiar with the Mapbox vector tile specification.\nThe API returns results as a binary mapbox vector tile.\n\nInternally, Elasticsearch translates a vector tile search API request into a search containing:\n\n* A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box.\n* A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box.\n* Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`.\n* If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label.\n\nFor example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search\n\n```\nGET my-index/_search\n{\n \"size\": 10000,\n \"query\": {\n \"geo_bounding_box\": {\n \"my-geo-field\": {\n \"top_left\": {\n \"lat\": -40.979898069620134,\n \"lon\": -45\n },\n \"bottom_right\": {\n \"lat\": -66.51326044311186,\n \"lon\": 0\n }\n }\n }\n },\n \"aggregations\": {\n \"grid\": {\n \"geotile_grid\": {\n \"field\": \"my-geo-field\",\n \"precision\": 11,\n \"size\": 65536,\n \"bounds\": {\n \"top_left\": {\n \"lat\": -40.979898069620134,\n \"lon\": -45\n },\n \"bottom_right\": {\n \"lat\": -66.51326044311186,\n \"lon\": 0\n }\n }\n }\n },\n \"bounds\": {\n \"geo_bounds\": {\n \"field\": \"my-geo-field\",\n \"wrap_longitude\": false\n }\n }\n }\n}\n```\n\nThe API returns results as a binary Mapbox vector tile.\nMapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers:\n\n* A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query.\n* An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data.\n* A meta layer containing:\n * A feature containing a bounding box. By default, this is the bounding box of the tile.\n * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`.\n * Metadata for the search.\n\nThe API only returns features that can display at its zoom level.\nFor example, if a polygon feature has no area at its zoom level, the API omits it.\nThe API returns errors as UTF-8 encoded JSON.\n\nIMPORTANT: You can specify several options for this API as either a query parameter or request body parameter.\nIf you specify both parameters, the query parameter takes precedence.\n\n**Grid precision for geotile**\n\nFor a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels.\n`grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`.\nFor example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15.\nThe maximum final precision is 29.\nThe `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`.\nFor example, a value of 8 divides the tile into a grid of 256 x 256 cells.\nThe `aggs` layer only contains features for cells with matching data.\n\n**Grid precision for geohex**\n\nFor a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`.\n\nThis precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation.\nThe following table maps the H3 resolution for each precision.\nFor example, if `` is 3 and `grid_precision` is 3, the precision is 6.\nAt a precision of 6, hexagonal cells have an H3 resolution of 2.\nIf `` is 3 and `grid_precision` is 4, the precision is 7.\nAt a precision of 7, hexagonal cells have an H3 resolution of 3.\n\n| Precision | Unique tile bins | H3 resolution | Unique hex bins |\tRatio |\n| --------- | ---------------- | ------------- | ----------------| ----- |\n| 1 | 4 | 0 | 122 | 30.5 |\n| 2 | 16 | 0 | 122 | 7.625 |\n| 3 | 64 | 1 | 842 | 13.15625 |\n| 4 | 256 | 1 | 842 | 3.2890625 |\n| 5 | 1024 | 2 | 5882 | 5.744140625 |\n| 6 | 4096 | 2 | 5882 | 1.436035156 |\n| 7 | 16384 | 3 | 41162 | 2.512329102 |\n| 8 | 65536 | 3 | 41162 | 0.6280822754 |\n| 9 | 262144 | 4 | 288122 | 1.099098206 |\n| 10 | 1048576 | 4 | 288122 | 0.2747745514 |\n| 11 | 4194304 | 5 | 2016842 | 0.4808526039 |\n| 12 | 16777216 | 6 | 14117882 | 0.8414913416 |\n| 13 | 67108864 | 6 | 14117882 | 0.2103728354 |\n| 14 | 268435456 | 7 | 98825162 | 0.3681524172 |\n| 15 | 1073741824 | 8 | 691776122 | 0.644266719 |\n| 16 | 4294967296 | 8 | 691776122 | 0.1610666797 |\n| 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 |\n| 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 |\n| 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 |\n| 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 |\n| 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 |\n| 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 |\n| 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 |\n| 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 |\n| 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 |\n| 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 |\n| 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 |\n| 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 |\n| 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 |\n\nHexagonal cells don't align perfectly on a vector tile.\nSome cells may intersect more than one vector tile.\nTo compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level.\nElasticsearch uses the H3 resolution that is closest to the corresponding geotile density.", + "description": "Search a vector tile for geospatial values.\nBefore using this API, you should be familiar with the Mapbox vector tile specification.\nThe API returns results as a binary mapbox vector tile.\n\nInternally, Elasticsearch translates a vector tile search API request into a search containing:\n\n* A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box.\n* A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box.\n* Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`.\n* If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label.\n\nFor example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search\n\n```\nGET my-index/_search\n{\n \"size\": 10000,\n \"query\": {\n \"geo_bounding_box\": {\n \"my-geo-field\": {\n \"top_left\": {\n \"lat\": -40.979898069620134,\n \"lon\": -45\n },\n \"bottom_right\": {\n \"lat\": -66.51326044311186,\n \"lon\": 0\n }\n }\n }\n },\n \"aggregations\": {\n \"grid\": {\n \"geotile_grid\": {\n \"field\": \"my-geo-field\",\n \"precision\": 11,\n \"size\": 65536,\n \"bounds\": {\n \"top_left\": {\n \"lat\": -40.979898069620134,\n \"lon\": -45\n },\n \"bottom_right\": {\n \"lat\": -66.51326044311186,\n \"lon\": 0\n }\n }\n }\n },\n \"bounds\": {\n \"geo_bounds\": {\n \"field\": \"my-geo-field\",\n \"wrap_longitude\": false\n }\n }\n }\n}\n```\n\nThe API returns results as a binary Mapbox vector tile.\nMapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers:\n\n* A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query.\n* An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data.\n* A meta layer containing:\n * A feature containing a bounding box. By default, this is the bounding box of the tile.\n * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`.\n * Metadata for the search.\n\nThe API only returns features that can display at its zoom level.\nFor example, if a polygon feature has no area at its zoom level, the API omits it.\nThe API returns errors as UTF-8 encoded JSON.\n\nIMPORTANT: You can specify several options for this API as either a query parameter or request body parameter.\nIf you specify both parameters, the query parameter takes precedence.\n\n**Grid precision for geotile**\n\nFor a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels.\n`grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`.\nFor example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15.\nThe maximum final precision is 29.\nThe `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`.\nFor example, a value of 8 divides the tile into a grid of 256 x 256 cells.\nThe `aggs` layer only contains features for cells with matching data.\n\n**Grid precision for geohex**\n\nFor a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`.\n\nThis precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation.\nThe following table maps the H3 resolution for each precision.\nFor example, if `` is 3 and `grid_precision` is 3, the precision is 6.\nAt a precision of 6, hexagonal cells have an H3 resolution of 2.\nIf `` is 3 and `grid_precision` is 4, the precision is 7.\nAt a precision of 7, hexagonal cells have an H3 resolution of 3.\n\n| Precision | Unique tile bins | H3 resolution | Unique hex bins |\tRatio |\n| --------- | ---------------- | ------------- | ----------------| ----- |\n| 1 | 4 | 0 | 122 | 30.5 |\n| 2 | 16 | 0 | 122 | 7.625 |\n| 3 | 64 | 1 | 842 | 13.15625 |\n| 4 | 256 | 1 | 842 | 3.2890625 |\n| 5 | 1024 | 2 | 5882 | 5.744140625 |\n| 6 | 4096 | 2 | 5882 | 1.436035156 |\n| 7 | 16384 | 3 | 41162 | 2.512329102 |\n| 8 | 65536 | 3 | 41162 | 0.6280822754 |\n| 9 | 262144 | 4 | 288122 | 1.099098206 |\n| 10 | 1048576 | 4 | 288122 | 0.2747745514 |\n| 11 | 4194304 | 5 | 2016842 | 0.4808526039 |\n| 12 | 16777216 | 6 | 14117882 | 0.8414913416 |\n| 13 | 67108864 | 6 | 14117882 | 0.2103728354 |\n| 14 | 268435456 | 7 | 98825162 | 0.3681524172 |\n| 15 | 1073741824 | 8 | 691776122 | 0.644266719 |\n| 16 | 4294967296 | 8 | 691776122 | 0.1610666797 |\n| 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 |\n| 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 |\n| 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 |\n| 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 |\n| 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 |\n| 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 |\n| 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 |\n| 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 |\n| 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 |\n| 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 |\n| 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 |\n| 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 |\n| 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 |\n\nHexagonal cells don't align perfectly on a vector tile.\nSome cells may intersect more than one vector tile.\nTo compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level.\nElasticsearch uses the H3 resolution that is closest to the corresponding geotile density.\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://github.com/mapbox/vector-tile-spec/blob/master/README.md" }, @@ -20986,7 +21010,7 @@ "search" ], "summary": "Search a vector tile", - "description": "Search a vector tile for geospatial values.\nBefore using this API, you should be familiar with the Mapbox vector tile specification.\nThe API returns results as a binary mapbox vector tile.\n\nInternally, Elasticsearch translates a vector tile search API request into a search containing:\n\n* A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box.\n* A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box.\n* Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`.\n* If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label.\n\nFor example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search\n\n```\nGET my-index/_search\n{\n \"size\": 10000,\n \"query\": {\n \"geo_bounding_box\": {\n \"my-geo-field\": {\n \"top_left\": {\n \"lat\": -40.979898069620134,\n \"lon\": -45\n },\n \"bottom_right\": {\n \"lat\": -66.51326044311186,\n \"lon\": 0\n }\n }\n }\n },\n \"aggregations\": {\n \"grid\": {\n \"geotile_grid\": {\n \"field\": \"my-geo-field\",\n \"precision\": 11,\n \"size\": 65536,\n \"bounds\": {\n \"top_left\": {\n \"lat\": -40.979898069620134,\n \"lon\": -45\n },\n \"bottom_right\": {\n \"lat\": -66.51326044311186,\n \"lon\": 0\n }\n }\n }\n },\n \"bounds\": {\n \"geo_bounds\": {\n \"field\": \"my-geo-field\",\n \"wrap_longitude\": false\n }\n }\n }\n}\n```\n\nThe API returns results as a binary Mapbox vector tile.\nMapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers:\n\n* A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query.\n* An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data.\n* A meta layer containing:\n * A feature containing a bounding box. By default, this is the bounding box of the tile.\n * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`.\n * Metadata for the search.\n\nThe API only returns features that can display at its zoom level.\nFor example, if a polygon feature has no area at its zoom level, the API omits it.\nThe API returns errors as UTF-8 encoded JSON.\n\nIMPORTANT: You can specify several options for this API as either a query parameter or request body parameter.\nIf you specify both parameters, the query parameter takes precedence.\n\n**Grid precision for geotile**\n\nFor a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels.\n`grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`.\nFor example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15.\nThe maximum final precision is 29.\nThe `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`.\nFor example, a value of 8 divides the tile into a grid of 256 x 256 cells.\nThe `aggs` layer only contains features for cells with matching data.\n\n**Grid precision for geohex**\n\nFor a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`.\n\nThis precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation.\nThe following table maps the H3 resolution for each precision.\nFor example, if `` is 3 and `grid_precision` is 3, the precision is 6.\nAt a precision of 6, hexagonal cells have an H3 resolution of 2.\nIf `` is 3 and `grid_precision` is 4, the precision is 7.\nAt a precision of 7, hexagonal cells have an H3 resolution of 3.\n\n| Precision | Unique tile bins | H3 resolution | Unique hex bins |\tRatio |\n| --------- | ---------------- | ------------- | ----------------| ----- |\n| 1 | 4 | 0 | 122 | 30.5 |\n| 2 | 16 | 0 | 122 | 7.625 |\n| 3 | 64 | 1 | 842 | 13.15625 |\n| 4 | 256 | 1 | 842 | 3.2890625 |\n| 5 | 1024 | 2 | 5882 | 5.744140625 |\n| 6 | 4096 | 2 | 5882 | 1.436035156 |\n| 7 | 16384 | 3 | 41162 | 2.512329102 |\n| 8 | 65536 | 3 | 41162 | 0.6280822754 |\n| 9 | 262144 | 4 | 288122 | 1.099098206 |\n| 10 | 1048576 | 4 | 288122 | 0.2747745514 |\n| 11 | 4194304 | 5 | 2016842 | 0.4808526039 |\n| 12 | 16777216 | 6 | 14117882 | 0.8414913416 |\n| 13 | 67108864 | 6 | 14117882 | 0.2103728354 |\n| 14 | 268435456 | 7 | 98825162 | 0.3681524172 |\n| 15 | 1073741824 | 8 | 691776122 | 0.644266719 |\n| 16 | 4294967296 | 8 | 691776122 | 0.1610666797 |\n| 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 |\n| 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 |\n| 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 |\n| 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 |\n| 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 |\n| 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 |\n| 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 |\n| 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 |\n| 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 |\n| 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 |\n| 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 |\n| 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 |\n| 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 |\n\nHexagonal cells don't align perfectly on a vector tile.\nSome cells may intersect more than one vector tile.\nTo compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level.\nElasticsearch uses the H3 resolution that is closest to the corresponding geotile density.", + "description": "Search a vector tile for geospatial values.\nBefore using this API, you should be familiar with the Mapbox vector tile specification.\nThe API returns results as a binary mapbox vector tile.\n\nInternally, Elasticsearch translates a vector tile search API request into a search containing:\n\n* A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box.\n* A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box.\n* Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`.\n* If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label.\n\nFor example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search\n\n```\nGET my-index/_search\n{\n \"size\": 10000,\n \"query\": {\n \"geo_bounding_box\": {\n \"my-geo-field\": {\n \"top_left\": {\n \"lat\": -40.979898069620134,\n \"lon\": -45\n },\n \"bottom_right\": {\n \"lat\": -66.51326044311186,\n \"lon\": 0\n }\n }\n }\n },\n \"aggregations\": {\n \"grid\": {\n \"geotile_grid\": {\n \"field\": \"my-geo-field\",\n \"precision\": 11,\n \"size\": 65536,\n \"bounds\": {\n \"top_left\": {\n \"lat\": -40.979898069620134,\n \"lon\": -45\n },\n \"bottom_right\": {\n \"lat\": -66.51326044311186,\n \"lon\": 0\n }\n }\n }\n },\n \"bounds\": {\n \"geo_bounds\": {\n \"field\": \"my-geo-field\",\n \"wrap_longitude\": false\n }\n }\n }\n}\n```\n\nThe API returns results as a binary Mapbox vector tile.\nMapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers:\n\n* A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query.\n* An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data.\n* A meta layer containing:\n * A feature containing a bounding box. By default, this is the bounding box of the tile.\n * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`.\n * Metadata for the search.\n\nThe API only returns features that can display at its zoom level.\nFor example, if a polygon feature has no area at its zoom level, the API omits it.\nThe API returns errors as UTF-8 encoded JSON.\n\nIMPORTANT: You can specify several options for this API as either a query parameter or request body parameter.\nIf you specify both parameters, the query parameter takes precedence.\n\n**Grid precision for geotile**\n\nFor a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels.\n`grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`.\nFor example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15.\nThe maximum final precision is 29.\nThe `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`.\nFor example, a value of 8 divides the tile into a grid of 256 x 256 cells.\nThe `aggs` layer only contains features for cells with matching data.\n\n**Grid precision for geohex**\n\nFor a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`.\n\nThis precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation.\nThe following table maps the H3 resolution for each precision.\nFor example, if `` is 3 and `grid_precision` is 3, the precision is 6.\nAt a precision of 6, hexagonal cells have an H3 resolution of 2.\nIf `` is 3 and `grid_precision` is 4, the precision is 7.\nAt a precision of 7, hexagonal cells have an H3 resolution of 3.\n\n| Precision | Unique tile bins | H3 resolution | Unique hex bins |\tRatio |\n| --------- | ---------------- | ------------- | ----------------| ----- |\n| 1 | 4 | 0 | 122 | 30.5 |\n| 2 | 16 | 0 | 122 | 7.625 |\n| 3 | 64 | 1 | 842 | 13.15625 |\n| 4 | 256 | 1 | 842 | 3.2890625 |\n| 5 | 1024 | 2 | 5882 | 5.744140625 |\n| 6 | 4096 | 2 | 5882 | 1.436035156 |\n| 7 | 16384 | 3 | 41162 | 2.512329102 |\n| 8 | 65536 | 3 | 41162 | 0.6280822754 |\n| 9 | 262144 | 4 | 288122 | 1.099098206 |\n| 10 | 1048576 | 4 | 288122 | 0.2747745514 |\n| 11 | 4194304 | 5 | 2016842 | 0.4808526039 |\n| 12 | 16777216 | 6 | 14117882 | 0.8414913416 |\n| 13 | 67108864 | 6 | 14117882 | 0.2103728354 |\n| 14 | 268435456 | 7 | 98825162 | 0.3681524172 |\n| 15 | 1073741824 | 8 | 691776122 | 0.644266719 |\n| 16 | 4294967296 | 8 | 691776122 | 0.1610666797 |\n| 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 |\n| 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 |\n| 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 |\n| 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 |\n| 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 |\n| 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 |\n| 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 |\n| 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 |\n| 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 |\n| 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 |\n| 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 |\n| 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 |\n| 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 |\n\nHexagonal cells don't align perfectly on a vector tile.\nSome cells may intersect more than one vector tile.\nTo compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level.\nElasticsearch uses the H3 resolution that is closest to the corresponding geotile density.\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://github.com/mapbox/vector-tile-spec/blob/master/README.md" }, @@ -21052,6 +21076,7 @@ "search" ], "summary": "Run a search with a search template", + "description": "\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/search-templates" }, @@ -21118,6 +21143,7 @@ "search" ], "summary": "Run a search with a search template", + "description": "\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/search-templates" }, @@ -21186,6 +21212,7 @@ "search" ], "summary": "Run a search with a search template", + "description": "\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/search-templates" }, @@ -21255,6 +21282,7 @@ "search" ], "summary": "Run a search with a search template", + "description": "\n ##Required authorization\n* Index privileges: `read`", "externalDocs": { "url": "https://www.elastic.co/docs/solutions/search/search-templates" }, @@ -21424,7 +21452,7 @@ "security" ], "summary": "Get API key information", - "description": "Retrieves information for one or more API keys.\nNOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own.\nIf you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership.", + "description": "Retrieves information for one or more API keys.\nNOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own.\nIf you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership.\n ##Required authorization\n* Cluster privileges: `manage_own_api_key`,`read_security`", "operationId": "security-get-api-key", "parameters": [ { @@ -21556,7 +21584,7 @@ "security" ], "summary": "Create an API key", - "description": "Create an API key for access without requiring basic authentication.\n\nIMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges.\nIf you specify privileges, the API returns an error.\n\nA successful request returns a JSON structure that contains the API key, its unique id, and its name.\nIf applicable, it also returns expiration information for the API key in milliseconds.\n\nNOTE: By default, API keys never expire. You can specify expiration information when you create the API keys.\n\nThe API keys are created by the Elasticsearch API key service, which is automatically enabled.\nTo configure or turn off the API key service, refer to API key service setting documentation.", + "description": "Create an API key for access without requiring basic authentication.\n\nIMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges.\nIf you specify privileges, the API returns an error.\n\nA successful request returns a JSON structure that contains the API key, its unique id, and its name.\nIf applicable, it also returns expiration information for the API key in milliseconds.\n\nNOTE: By default, API keys never expire. You can specify expiration information when you create the API keys.\n\nThe API keys are created by the Elasticsearch API key service, which is automatically enabled.\nTo configure or turn off the API key service, refer to API key service setting documentation.\n ##Required authorization\n* Cluster privileges: `manage_own_api_key`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/configuration-reference/security-settings#api-key-service-settings" }, @@ -21587,7 +21615,7 @@ "security" ], "summary": "Create an API key", - "description": "Create an API key for access without requiring basic authentication.\n\nIMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges.\nIf you specify privileges, the API returns an error.\n\nA successful request returns a JSON structure that contains the API key, its unique id, and its name.\nIf applicable, it also returns expiration information for the API key in milliseconds.\n\nNOTE: By default, API keys never expire. You can specify expiration information when you create the API keys.\n\nThe API keys are created by the Elasticsearch API key service, which is automatically enabled.\nTo configure or turn off the API key service, refer to API key service setting documentation.", + "description": "Create an API key for access without requiring basic authentication.\n\nIMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges.\nIf you specify privileges, the API returns an error.\n\nA successful request returns a JSON structure that contains the API key, its unique id, and its name.\nIf applicable, it also returns expiration information for the API key in milliseconds.\n\nNOTE: By default, API keys never expire. You can specify expiration information when you create the API keys.\n\nThe API keys are created by the Elasticsearch API key service, which is automatically enabled.\nTo configure or turn off the API key service, refer to API key service setting documentation.\n ##Required authorization\n* Cluster privileges: `manage_own_api_key`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/configuration-reference/security-settings#api-key-service-settings" }, @@ -21618,7 +21646,7 @@ "security" ], "summary": "Invalidate API keys", - "description": "This API invalidates API keys created by the create API key or grant API key APIs.\nInvalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted.\n\nTo use this API, you must have at least the `manage_security`, `manage_api_key`, or `manage_own_api_key` cluster privileges.\nThe `manage_security` privilege allows deleting any API key, including both REST and cross cluster API keys.\nThe `manage_api_key` privilege allows deleting any REST API key, but not cross cluster API keys.\nThe `manage_own_api_key` only allows deleting REST API keys that are owned by the user.\nIn addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats:\n\n- Set the parameter `owner=true`.\n- Or, set both `username` and `realm_name` to match the user's identity.\n- Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field.", + "description": "This API invalidates API keys created by the create API key or grant API key APIs.\nInvalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted.\n\nTo use this API, you must have at least the `manage_security`, `manage_api_key`, or `manage_own_api_key` cluster privileges.\nThe `manage_security` privilege allows deleting any API key, including both REST and cross cluster API keys.\nThe `manage_api_key` privilege allows deleting any REST API key, but not cross cluster API keys.\nThe `manage_own_api_key` only allows deleting REST API keys that are owned by the user.\nIn addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats:\n\n- Set the parameter `owner=true`.\n- Or, set both `username` and `realm_name` to match the user's identity.\n- Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field.\n ##Required authorization\n* Cluster privileges: `manage_api_key`,`manage_own_api_key`", "operationId": "security-invalidate-api-key", "requestBody": { "content": { @@ -21753,7 +21781,7 @@ "security" ], "summary": "Get roles", - "description": "Get roles in the native realm.\nThe role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe get roles API cannot retrieve roles that are defined in roles files.", + "description": "Get roles in the native realm.\nThe role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe get roles API cannot retrieve roles that are defined in roles files.\n ##Required authorization\n* Cluster privileges: `read_security`", "operationId": "security-get-role", "parameters": [ { @@ -21778,7 +21806,7 @@ "security" ], "summary": "Create or update roles", - "description": "The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management.\nThe create or update roles API cannot update roles that are defined in roles files.\nFile-based role management is not available in Elastic Serverless.", + "description": "The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management.\nThe create or update roles API cannot update roles that are defined in roles files.\nFile-based role management is not available in Elastic Serverless.\n ##Required authorization\n* Cluster privileges: `manage_security`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/users-roles/cluster-or-deployment-auth/defining-roles" }, @@ -21812,7 +21840,7 @@ "security" ], "summary": "Create or update roles", - "description": "The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management.\nThe create or update roles API cannot update roles that are defined in roles files.\nFile-based role management is not available in Elastic Serverless.", + "description": "The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management.\nThe create or update roles API cannot update roles that are defined in roles files.\nFile-based role management is not available in Elastic Serverless.\n ##Required authorization\n* Cluster privileges: `manage_security`", "externalDocs": { "url": "https://www.elastic.co/docs/deploy-manage/users-roles/cluster-or-deployment-auth/defining-roles" }, @@ -21846,7 +21874,7 @@ "security" ], "summary": "Delete roles", - "description": "Delete roles in the native realm.\nThe role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe delete roles API cannot remove roles that are defined in roles files.", + "description": "Delete roles in the native realm.\nThe role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe delete roles API cannot remove roles that are defined in roles files.\n ##Required authorization\n* Cluster privileges: `manage_security`", "operationId": "security-delete-role", "parameters": [ { @@ -21913,7 +21941,7 @@ "security" ], "summary": "Get builtin privileges", - "description": "Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch.", + "description": "Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch.\n ##Required authorization\n* Cluster privileges: `manage_security`", "externalDocs": { "url": "https://www.elastic.co/docs/reference/elasticsearch/security-privileges" }, @@ -21971,7 +21999,7 @@ "security" ], "summary": "Get roles", - "description": "Get roles in the native realm.\nThe role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe get roles API cannot retrieve roles that are defined in roles files.", + "description": "Get roles in the native realm.\nThe role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe get roles API cannot retrieve roles that are defined in roles files.\n ##Required authorization\n* Cluster privileges: `read_security`", "operationId": "security-get-role-1", "responses": { "200": { @@ -22111,7 +22139,7 @@ "security" ], "summary": "Find API keys with a query", - "description": "Get a paginated list of API keys and their information.\nYou can optionally filter the results with a query.\n\nTo use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges.\nIf you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own.\nIf you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership.", + "description": "Get a paginated list of API keys and their information.\nYou can optionally filter the results with a query.\n\nTo use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges.\nIf you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own.\nIf you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership.\n ##Required authorization\n* Cluster privileges: `manage_own_api_key`,`read_security`", "operationId": "security-query-api-keys", "parameters": [ { @@ -22145,7 +22173,7 @@ "security" ], "summary": "Find API keys with a query", - "description": "Get a paginated list of API keys and their information.\nYou can optionally filter the results with a query.\n\nTo use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges.\nIf you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own.\nIf you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership.", + "description": "Get a paginated list of API keys and their information.\nYou can optionally filter the results with a query.\n\nTo use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges.\nIf you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own.\nIf you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership.\n ##Required authorization\n* Cluster privileges: `manage_own_api_key`,`read_security`", "operationId": "security-query-api-keys-1", "parameters": [ { @@ -22181,7 +22209,7 @@ "security" ], "summary": "Find roles with a query", - "description": "Get roles in a paginated manner.\nThe role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe query roles API does not retrieve roles that are defined in roles files, nor built-in ones.\nYou can optionally filter the results with a query.\nAlso, the results can be paginated and sorted.", + "description": "Get roles in a paginated manner.\nThe role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe query roles API does not retrieve roles that are defined in roles files, nor built-in ones.\nYou can optionally filter the results with a query.\nAlso, the results can be paginated and sorted.\n ##Required authorization\n* Cluster privileges: `read_security`", "operationId": "security-query-role", "requestBody": { "$ref": "#/components/requestBodies/security.query_role" @@ -22204,7 +22232,7 @@ "security" ], "summary": "Find roles with a query", - "description": "Get roles in a paginated manner.\nThe role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe query roles API does not retrieve roles that are defined in roles files, nor built-in ones.\nYou can optionally filter the results with a query.\nAlso, the results can be paginated and sorted.", + "description": "Get roles in a paginated manner.\nThe role management APIs are generally the preferred way to manage roles, rather than using file-based role management.\nThe query roles API does not retrieve roles that are defined in roles files, nor built-in ones.\nYou can optionally filter the results with a query.\nAlso, the results can be paginated and sorted.\n ##Required authorization\n* Cluster privileges: `read_security`", "operationId": "security-query-role-1", "requestBody": { "$ref": "#/components/requestBodies/security.query_role" @@ -22229,7 +22257,7 @@ "security" ], "summary": "Update an API key", - "description": "Update attributes of an existing API key.\nThis API supports updates to an API key's access scope, expiration, and metadata.\n\nTo use this API, you must have at least the `manage_own_api_key` cluster privilege.\nUsers can only update API keys that they created or that were granted to them.\nTo update another user’s API key, use the `run_as` feature to submit a request on behalf of another user.\n\nIMPORTANT: It's not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required.\n\nUse this API to update API keys created by the create API key or grant API Key APIs.\nIf you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead.\nIt's not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API.\n\nThe access scope of an API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request.\nThe snapshot of the owner's permissions is updated automatically on every call.\n\nIMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change the API key's access scope.\nThis change can occur if the owner user's permissions have changed since the API key was created or last modified.", + "description": "Update attributes of an existing API key.\nThis API supports updates to an API key's access scope, expiration, and metadata.\n\nTo use this API, you must have at least the `manage_own_api_key` cluster privilege.\nUsers can only update API keys that they created or that were granted to them.\nTo update another user’s API key, use the `run_as` feature to submit a request on behalf of another user.\n\nIMPORTANT: It's not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required.\n\nUse this API to update API keys created by the create API key or grant API Key APIs.\nIf you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead.\nIt's not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API.\n\nThe access scope of an API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request.\nThe snapshot of the owner's permissions is updated automatically on every call.\n\nIMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change the API key's access scope.\nThis change can occur if the owner user's permissions have changed since the API key was created or last modified.\n ##Required authorization\n* Cluster privileges: `manage_own_api_key`", "operationId": "security-update-api-key", "parameters": [ { @@ -22384,7 +22412,7 @@ "sql" ], "summary": "Delete an async SQL search", - "description": "Delete an async SQL search or a stored synchronous SQL search.\nIf the search is still running, the API cancels it.\n\nIf the Elasticsearch security features are enabled, only the following users can use this API to delete a search:\n\n* Users with the `cancel_task` cluster privilege.\n* The user who first submitted the search.", + "description": "Delete an async SQL search or a stored synchronous SQL search.\nIf the search is still running, the API cancels it.\n\nIf the Elasticsearch security features are enabled, only the following users can use this API to delete a search:\n\n* Users with the `cancel_task` cluster privilege.\n* The user who first submitted the search.\n ##Required authorization\n* Cluster privileges: `cancel_task`", "operationId": "sql-delete-async", "parameters": [ { @@ -22533,7 +22561,7 @@ "sql" ], "summary": "Get the async SQL search status", - "description": "Get the current status of an async SQL search or a stored synchronous SQL search.", + "description": "Get the current status of an async SQL search or a stored synchronous SQL search.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "sql-get-async-status", "parameters": [ { @@ -22599,7 +22627,7 @@ "sql" ], "summary": "Get SQL search results", - "description": "Run an SQL request.", + "description": "Run an SQL request.\n ##Required authorization\n* Index privileges: `read`", "operationId": "sql-query-1", "parameters": [ { @@ -22627,7 +22655,7 @@ "sql" ], "summary": "Get SQL search results", - "description": "Run an SQL request.", + "description": "Run an SQL request.\n ##Required authorization\n* Index privileges: `read`", "operationId": "sql-query", "parameters": [ { @@ -22657,7 +22685,7 @@ "sql" ], "summary": "Translate SQL into Elasticsearch queries", - "description": "Translate an SQL search into a search API request containing Query DSL.\nIt accepts the same request body parameters as the SQL search API, excluding `cursor`.", + "description": "Translate an SQL search into a search API request containing Query DSL.\nIt accepts the same request body parameters as the SQL search API, excluding `cursor`.\n ##Required authorization\n* Index privileges: `read`", "operationId": "sql-translate-1", "requestBody": { "$ref": "#/components/requestBodies/sql.translate" @@ -22680,7 +22708,7 @@ "sql" ], "summary": "Translate SQL into Elasticsearch queries", - "description": "Translate an SQL search into a search API request containing Query DSL.\nIt accepts the same request body parameters as the SQL search API, excluding `cursor`.", + "description": "Translate an SQL search into a search API request containing Query DSL.\nIt accepts the same request body parameters as the SQL search API, excluding `cursor`.\n ##Required authorization\n* Index privileges: `read`", "operationId": "sql-translate", "requestBody": { "$ref": "#/components/requestBodies/sql.translate" @@ -22705,6 +22733,7 @@ "synonyms" ], "summary": "Get a synonym set", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_search_synonyms`", "operationId": "synonyms-get-synonym", "parameters": [ { @@ -22787,7 +22816,7 @@ "synonyms" ], "summary": "Create or update a synonym set", - "description": "Synonyms sets are limited to a maximum of 10,000 synonym rules per set.\nIf you need to manage more synonym rules, you can create multiple synonym sets.\n\nWhen an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices.\nThis is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set.", + "description": "Synonyms sets are limited to a maximum of 10,000 synonym rules per set.\nIf you need to manage more synonym rules, you can create multiple synonym sets.\n\nWhen an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices.\nThis is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set.\n ##Required authorization\n* Cluster privileges: `manage_search_synonyms`", "operationId": "synonyms-put-synonym", "parameters": [ { @@ -22861,7 +22890,7 @@ "synonyms" ], "summary": "Delete a synonym set", - "description": "You can only delete a synonyms set that is not in use by any index analyzer.\n\nSynonyms sets can be used in synonym graph token filters and synonym token filters.\nThese synonym filters can be used as part of search analyzers.\n\nAnalyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open).\nEven if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase.\n\nIf any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available.\nTo prevent that, synonyms sets that are used in analyzers can't be deleted.\nA delete request in this case will return a 400 response code.\n\nTo remove a synonyms set, you must first remove all indices that contain analyzers using it.\nYou can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data.\nOnce finished, you can delete the index.\nWhen the synonyms set is not used in analyzers, you will be able to delete it.", + "description": "You can only delete a synonyms set that is not in use by any index analyzer.\n\nSynonyms sets can be used in synonym graph token filters and synonym token filters.\nThese synonym filters can be used as part of search analyzers.\n\nAnalyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open).\nEven if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase.\n\nIf any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available.\nTo prevent that, synonyms sets that are used in analyzers can't be deleted.\nA delete request in this case will return a 400 response code.\n\nTo remove a synonyms set, you must first remove all indices that contain analyzers using it.\nYou can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data.\nOnce finished, you can delete the index.\nWhen the synonyms set is not used in analyzers, you will be able to delete it.\n ##Required authorization\n* Cluster privileges: `manage_search_synonyms`", "operationId": "synonyms-delete-synonym", "parameters": [ { @@ -22897,7 +22926,7 @@ "synonyms" ], "summary": "Get a synonym rule", - "description": "Get a synonym rule from a synonym set.", + "description": "Get a synonym rule from a synonym set.\n ##Required authorization\n* Cluster privileges: `manage_search_synonyms`", "operationId": "synonyms-get-synonym-rule", "parameters": [ { @@ -22954,7 +22983,7 @@ "synonyms" ], "summary": "Create or update a synonym rule", - "description": "Create or update a synonym rule in a synonym set.\n\nIf any of the synonym rules included is invalid, the API returns an error.\n\nWhen you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule.", + "description": "Create or update a synonym rule in a synonym set.\n\nIf any of the synonym rules included is invalid, the API returns an error.\n\nWhen you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule.\n ##Required authorization\n* Cluster privileges: `manage_search_synonyms`", "operationId": "synonyms-put-synonym-rule", "parameters": [ { @@ -23036,7 +23065,7 @@ "synonyms" ], "summary": "Delete a synonym rule", - "description": "Delete a synonym rule from a synonym set.", + "description": "Delete a synonym rule from a synonym set.\n ##Required authorization\n* Cluster privileges: `manage_search_synonyms`", "operationId": "synonyms-delete-synonym-rule", "parameters": [ { @@ -23095,7 +23124,7 @@ "synonyms" ], "summary": "Get all synonym sets", - "description": "Get a summary of all defined synonym sets.", + "description": "Get a summary of all defined synonym sets.\n ##Required authorization\n* Cluster privileges: `manage_search_synonyms`", "operationId": "synonyms-get-synonyms-sets", "parameters": [ { @@ -23169,7 +23198,7 @@ "tasks" ], "summary": "Get task information", - "description": "Get information about a task currently running in the cluster.\n\nWARNING: The task management API is new and should still be considered a beta feature.\nThe API may change in ways that are not backwards compatible.\n\nIf the task identifier is not found, a 404 response code indicates that there are no resources that match the request.", + "description": "Get information about a task currently running in the cluster.\n\nWARNING: The task management API is new and should still be considered a beta feature.\nThe API may change in ways that are not backwards compatible.\n\nIf the task identifier is not found, a 404 response code indicates that there are no resources that match the request.\n ##Required authorization\n* Cluster privileges: `monitor`", "operationId": "tasks-get", "parameters": [ { @@ -23319,7 +23348,7 @@ "document" ], "summary": "Get term vector information", - "description": "Get information and statistics about terms in the fields of a particular document.\n\nYou can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request.\nYou can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body.\nFor example:\n\n```\nGET /my-index-000001/_termvectors/1?fields=message\n```\n\nFields can be specified using wildcards, similar to the multi match query.\n\nTerm vectors are real-time by default, not near real-time.\nThis can be changed by setting `realtime` parameter to `false`.\n\nYou can request three types of values: _term information_, _term statistics_, and _field statistics_.\nBy default, all term information and field statistics are returned for all fields but term statistics are excluded.\n\n**Term information**\n\n* term frequency in the field (always returned)\n* term positions (`positions: true`)\n* start and end offsets (`offsets: true`)\n* term payloads (`payloads: true`), as base64 encoded bytes\n\nIf the requested information wasn't stored in the index, it will be computed on the fly if possible.\nAdditionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user.\n\n> warn\n> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16.\n\n**Behaviour**\n\nThe term and field statistics are not accurate.\nDeleted documents are not taken into account.\nThe information is only retrieved for the shard the requested document resides in.\nThe term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context.\nBy default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected.\nUse `routing` only to hit a particular shard.", + "description": "Get information and statistics about terms in the fields of a particular document.\n\nYou can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request.\nYou can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body.\nFor example:\n\n```\nGET /my-index-000001/_termvectors/1?fields=message\n```\n\nFields can be specified using wildcards, similar to the multi match query.\n\nTerm vectors are real-time by default, not near real-time.\nThis can be changed by setting `realtime` parameter to `false`.\n\nYou can request three types of values: _term information_, _term statistics_, and _field statistics_.\nBy default, all term information and field statistics are returned for all fields but term statistics are excluded.\n\n**Term information**\n\n* term frequency in the field (always returned)\n* term positions (`positions: true`)\n* start and end offsets (`offsets: true`)\n* term payloads (`payloads: true`), as base64 encoded bytes\n\nIf the requested information wasn't stored in the index, it will be computed on the fly if possible.\nAdditionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user.\n\n> warn\n> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16.\n\n**Behaviour**\n\nThe term and field statistics are not accurate.\nDeleted documents are not taken into account.\nThe information is only retrieved for the shard the requested document resides in.\nThe term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context.\nBy default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected.\nUse `routing` only to hit a particular shard.\n ##Required authorization\n* Index privileges: `read`", "operationId": "termvectors", "parameters": [ { @@ -23383,7 +23412,7 @@ "document" ], "summary": "Get term vector information", - "description": "Get information and statistics about terms in the fields of a particular document.\n\nYou can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request.\nYou can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body.\nFor example:\n\n```\nGET /my-index-000001/_termvectors/1?fields=message\n```\n\nFields can be specified using wildcards, similar to the multi match query.\n\nTerm vectors are real-time by default, not near real-time.\nThis can be changed by setting `realtime` parameter to `false`.\n\nYou can request three types of values: _term information_, _term statistics_, and _field statistics_.\nBy default, all term information and field statistics are returned for all fields but term statistics are excluded.\n\n**Term information**\n\n* term frequency in the field (always returned)\n* term positions (`positions: true`)\n* start and end offsets (`offsets: true`)\n* term payloads (`payloads: true`), as base64 encoded bytes\n\nIf the requested information wasn't stored in the index, it will be computed on the fly if possible.\nAdditionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user.\n\n> warn\n> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16.\n\n**Behaviour**\n\nThe term and field statistics are not accurate.\nDeleted documents are not taken into account.\nThe information is only retrieved for the shard the requested document resides in.\nThe term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context.\nBy default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected.\nUse `routing` only to hit a particular shard.", + "description": "Get information and statistics about terms in the fields of a particular document.\n\nYou can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request.\nYou can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body.\nFor example:\n\n```\nGET /my-index-000001/_termvectors/1?fields=message\n```\n\nFields can be specified using wildcards, similar to the multi match query.\n\nTerm vectors are real-time by default, not near real-time.\nThis can be changed by setting `realtime` parameter to `false`.\n\nYou can request three types of values: _term information_, _term statistics_, and _field statistics_.\nBy default, all term information and field statistics are returned for all fields but term statistics are excluded.\n\n**Term information**\n\n* term frequency in the field (always returned)\n* term positions (`positions: true`)\n* start and end offsets (`offsets: true`)\n* term payloads (`payloads: true`), as base64 encoded bytes\n\nIf the requested information wasn't stored in the index, it will be computed on the fly if possible.\nAdditionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user.\n\n> warn\n> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16.\n\n**Behaviour**\n\nThe term and field statistics are not accurate.\nDeleted documents are not taken into account.\nThe information is only retrieved for the shard the requested document resides in.\nThe term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context.\nBy default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected.\nUse `routing` only to hit a particular shard.\n ##Required authorization\n* Index privileges: `read`", "operationId": "termvectors-1", "parameters": [ { @@ -23449,7 +23478,7 @@ "document" ], "summary": "Get term vector information", - "description": "Get information and statistics about terms in the fields of a particular document.\n\nYou can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request.\nYou can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body.\nFor example:\n\n```\nGET /my-index-000001/_termvectors/1?fields=message\n```\n\nFields can be specified using wildcards, similar to the multi match query.\n\nTerm vectors are real-time by default, not near real-time.\nThis can be changed by setting `realtime` parameter to `false`.\n\nYou can request three types of values: _term information_, _term statistics_, and _field statistics_.\nBy default, all term information and field statistics are returned for all fields but term statistics are excluded.\n\n**Term information**\n\n* term frequency in the field (always returned)\n* term positions (`positions: true`)\n* start and end offsets (`offsets: true`)\n* term payloads (`payloads: true`), as base64 encoded bytes\n\nIf the requested information wasn't stored in the index, it will be computed on the fly if possible.\nAdditionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user.\n\n> warn\n> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16.\n\n**Behaviour**\n\nThe term and field statistics are not accurate.\nDeleted documents are not taken into account.\nThe information is only retrieved for the shard the requested document resides in.\nThe term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context.\nBy default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected.\nUse `routing` only to hit a particular shard.", + "description": "Get information and statistics about terms in the fields of a particular document.\n\nYou can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request.\nYou can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body.\nFor example:\n\n```\nGET /my-index-000001/_termvectors/1?fields=message\n```\n\nFields can be specified using wildcards, similar to the multi match query.\n\nTerm vectors are real-time by default, not near real-time.\nThis can be changed by setting `realtime` parameter to `false`.\n\nYou can request three types of values: _term information_, _term statistics_, and _field statistics_.\nBy default, all term information and field statistics are returned for all fields but term statistics are excluded.\n\n**Term information**\n\n* term frequency in the field (always returned)\n* term positions (`positions: true`)\n* start and end offsets (`offsets: true`)\n* term payloads (`payloads: true`), as base64 encoded bytes\n\nIf the requested information wasn't stored in the index, it will be computed on the fly if possible.\nAdditionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user.\n\n> warn\n> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16.\n\n**Behaviour**\n\nThe term and field statistics are not accurate.\nDeleted documents are not taken into account.\nThe information is only retrieved for the shard the requested document resides in.\nThe term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context.\nBy default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected.\nUse `routing` only to hit a particular shard.\n ##Required authorization\n* Index privileges: `read`", "operationId": "termvectors-2", "parameters": [ { @@ -23510,7 +23539,7 @@ "document" ], "summary": "Get term vector information", - "description": "Get information and statistics about terms in the fields of a particular document.\n\nYou can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request.\nYou can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body.\nFor example:\n\n```\nGET /my-index-000001/_termvectors/1?fields=message\n```\n\nFields can be specified using wildcards, similar to the multi match query.\n\nTerm vectors are real-time by default, not near real-time.\nThis can be changed by setting `realtime` parameter to `false`.\n\nYou can request three types of values: _term information_, _term statistics_, and _field statistics_.\nBy default, all term information and field statistics are returned for all fields but term statistics are excluded.\n\n**Term information**\n\n* term frequency in the field (always returned)\n* term positions (`positions: true`)\n* start and end offsets (`offsets: true`)\n* term payloads (`payloads: true`), as base64 encoded bytes\n\nIf the requested information wasn't stored in the index, it will be computed on the fly if possible.\nAdditionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user.\n\n> warn\n> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16.\n\n**Behaviour**\n\nThe term and field statistics are not accurate.\nDeleted documents are not taken into account.\nThe information is only retrieved for the shard the requested document resides in.\nThe term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context.\nBy default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected.\nUse `routing` only to hit a particular shard.", + "description": "Get information and statistics about terms in the fields of a particular document.\n\nYou can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request.\nYou can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body.\nFor example:\n\n```\nGET /my-index-000001/_termvectors/1?fields=message\n```\n\nFields can be specified using wildcards, similar to the multi match query.\n\nTerm vectors are real-time by default, not near real-time.\nThis can be changed by setting `realtime` parameter to `false`.\n\nYou can request three types of values: _term information_, _term statistics_, and _field statistics_.\nBy default, all term information and field statistics are returned for all fields but term statistics are excluded.\n\n**Term information**\n\n* term frequency in the field (always returned)\n* term positions (`positions: true`)\n* start and end offsets (`offsets: true`)\n* term payloads (`payloads: true`), as base64 encoded bytes\n\nIf the requested information wasn't stored in the index, it will be computed on the fly if possible.\nAdditionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user.\n\n> warn\n> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16.\n\n**Behaviour**\n\nThe term and field statistics are not accurate.\nDeleted documents are not taken into account.\nThe information is only retrieved for the shard the requested document resides in.\nThe term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context.\nBy default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected.\nUse `routing` only to hit a particular shard.\n ##Required authorization\n* Index privileges: `read`", "operationId": "termvectors-3", "parameters": [ { @@ -23573,7 +23602,7 @@ "transform" ], "summary": "Get transforms", - "description": "Get configuration information for transforms.", + "description": "Get configuration information for transforms.\n ##Required authorization\n* Cluster privileges: `monitor_transform`", "operationId": "transform-get-transform", "parameters": [ { @@ -23604,7 +23633,7 @@ "transform" ], "summary": "Create a transform", - "description": "Creates a transform.\n\nA transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as\na data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a\nunique row per entity.\n\nYou must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If\nyou choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in\nthe pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values\nin the latest object.\n\nYou must have `create_index`, `index`, and `read` privileges on the destination index and `read` and\n`view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the\ntransform remembers which roles the user that created it had at the time of creation and uses those same roles. If\nthose roles do not have the required privileges on the source and destination indices, the transform fails when it\nattempts unauthorized operations.\n\nNOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any\n`.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do\nnot give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not\ngive users any privileges on `.data-frame-internal*` indices.", + "description": "Creates a transform.\n\nA transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as\na data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a\nunique row per entity.\n\nYou must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If\nyou choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in\nthe pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values\nin the latest object.\n\nYou must have `create_index`, `index`, and `read` privileges on the destination index and `read` and\n`view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the\ntransform remembers which roles the user that created it had at the time of creation and uses those same roles. If\nthose roles do not have the required privileges on the source and destination indices, the transform fails when it\nattempts unauthorized operations.\n\nNOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any\n`.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do\nnot give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not\ngive users any privileges on `.data-frame-internal*` indices.\n ##Required authorization\n* Index privileges: `create_index`,`read`,`index`,`view_index_metadata`* Cluster privileges: `manage_transform`", "operationId": "transform-put-transform", "parameters": [ { @@ -23729,6 +23758,7 @@ "transform" ], "summary": "Delete a transform", + "description": "\n ##Required authorization\n* Cluster privileges: `manage_transform`", "operationId": "transform-delete-transform", "parameters": [ { @@ -23800,7 +23830,7 @@ "transform" ], "summary": "Get transforms", - "description": "Get configuration information for transforms.", + "description": "Get configuration information for transforms.\n ##Required authorization\n* Cluster privileges: `monitor_transform`", "operationId": "transform-get-transform-1", "parameters": [ { @@ -23830,7 +23860,7 @@ "transform" ], "summary": "Get transform stats", - "description": "Get usage information for transforms.", + "description": "Get usage information for transforms.\n ##Required authorization\n* Index privileges: `read`,`view_index_metadata`* Cluster privileges: `monitor_transform`", "operationId": "transform-get-transform-stats", "parameters": [ { @@ -23927,7 +23957,7 @@ "transform" ], "summary": "Preview a transform", - "description": "Generates a preview of the results that you will get when you create a transform with the same configuration.\n\nIt returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also\ngenerates a list of mappings and settings for the destination index. These values are determined based on the field\ntypes of the source index and the transform aggregations.", + "description": "Generates a preview of the results that you will get when you create a transform with the same configuration.\n\nIt returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also\ngenerates a list of mappings and settings for the destination index. These values are determined based on the field\ntypes of the source index and the transform aggregations.\n ##Required authorization\n* Index privileges: `read`,`view_index_metadata`* Cluster privileges: `manage_transform`", "operationId": "transform-preview-transform", "parameters": [ { @@ -23958,7 +23988,7 @@ "transform" ], "summary": "Preview a transform", - "description": "Generates a preview of the results that you will get when you create a transform with the same configuration.\n\nIt returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also\ngenerates a list of mappings and settings for the destination index. These values are determined based on the field\ntypes of the source index and the transform aggregations.", + "description": "Generates a preview of the results that you will get when you create a transform with the same configuration.\n\nIt returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also\ngenerates a list of mappings and settings for the destination index. These values are determined based on the field\ntypes of the source index and the transform aggregations.\n ##Required authorization\n* Index privileges: `read`,`view_index_metadata`* Cluster privileges: `manage_transform`", "operationId": "transform-preview-transform-1", "parameters": [ { @@ -23991,7 +24021,7 @@ "transform" ], "summary": "Preview a transform", - "description": "Generates a preview of the results that you will get when you create a transform with the same configuration.\n\nIt returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also\ngenerates a list of mappings and settings for the destination index. These values are determined based on the field\ntypes of the source index and the transform aggregations.", + "description": "Generates a preview of the results that you will get when you create a transform with the same configuration.\n\nIt returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also\ngenerates a list of mappings and settings for the destination index. These values are determined based on the field\ntypes of the source index and the transform aggregations.\n ##Required authorization\n* Index privileges: `read`,`view_index_metadata`* Cluster privileges: `manage_transform`", "operationId": "transform-preview-transform-2", "parameters": [ { @@ -24019,7 +24049,7 @@ "transform" ], "summary": "Preview a transform", - "description": "Generates a preview of the results that you will get when you create a transform with the same configuration.\n\nIt returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also\ngenerates a list of mappings and settings for the destination index. These values are determined based on the field\ntypes of the source index and the transform aggregations.", + "description": "Generates a preview of the results that you will get when you create a transform with the same configuration.\n\nIt returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also\ngenerates a list of mappings and settings for the destination index. These values are determined based on the field\ntypes of the source index and the transform aggregations.\n ##Required authorization\n* Index privileges: `read`,`view_index_metadata`* Cluster privileges: `manage_transform`", "operationId": "transform-preview-transform-3", "parameters": [ { @@ -24049,7 +24079,7 @@ "transform" ], "summary": "Reset a transform", - "description": "Before you can reset it, you must stop it; alternatively, use the `force` query parameter.\nIf the destination index was created by the transform, it is deleted.", + "description": "Before you can reset it, you must stop it; alternatively, use the `force` query parameter.\nIf the destination index was created by the transform, it is deleted.\n ##Required authorization\n* Cluster privileges: `manage_transform`", "operationId": "transform-reset-transform", "parameters": [ { @@ -24111,7 +24141,7 @@ "transform" ], "summary": "Schedule a transform to start now", - "description": "Instantly run a transform to process data.\nIf you run this API, the transform will process the new data instantly,\nwithout waiting for the configured frequency interval. After the API is called,\nthe transform will be processed again at `now + frequency` unless the API\nis called again in the meantime.", + "description": "Instantly run a transform to process data.\nIf you run this API, the transform will process the new data instantly,\nwithout waiting for the configured frequency interval. After the API is called,\nthe transform will be processed again at `now + frequency` unless the API\nis called again in the meantime.\n ##Required authorization\n* Cluster privileges: `manage_transform`", "operationId": "transform-schedule-now-transform", "parameters": [ { @@ -24163,7 +24193,7 @@ "transform" ], "summary": "Start a transform", - "description": "When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is\nset to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping\ndefinitions for the destination index from the source indices and the transform aggregations. If fields in the\ndestination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations),\nthe transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce\nmapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you\nstart the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings\nin a pivot transform.\n\nWhen the transform starts, a series of validations occur to ensure its success. If you deferred validation when you\ncreated the transform, they occur when you start the transform—​with the exception of privilege checks. When\nElasticsearch security features are enabled, the transform remembers which roles the user that created it had at the\ntime of creation and uses those same roles. If those roles do not have the required privileges on the source and\ndestination indices, the transform fails when it attempts unauthorized operations.", + "description": "When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is\nset to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping\ndefinitions for the destination index from the source indices and the transform aggregations. If fields in the\ndestination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations),\nthe transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce\nmapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you\nstart the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings\nin a pivot transform.\n\nWhen the transform starts, a series of validations occur to ensure its success. If you deferred validation when you\ncreated the transform, they occur when you start the transform—​with the exception of privilege checks. When\nElasticsearch security features are enabled, the transform remembers which roles the user that created it had at the\ntime of creation and uses those same roles. If those roles do not have the required privileges on the source and\ndestination indices, the transform fails when it attempts unauthorized operations.\n ##Required authorization\n* Index privileges: `read`,`view_index_metadata`* Cluster privileges: `manage_transform`", "operationId": "transform-start-transform", "parameters": [ { @@ -24225,7 +24255,7 @@ "transform" ], "summary": "Stop transforms", - "description": "Stops one or more transforms.", + "description": "Stops one or more transforms.\n ##Required authorization\n* Cluster privileges: `manage_transform`", "operationId": "transform-stop-transform", "parameters": [ { @@ -24317,7 +24347,7 @@ "transform" ], "summary": "Update a transform", - "description": "Updates certain properties of a transform.\n\nAll updated properties except `description` do not take effect until after the transform starts the next checkpoint,\nthus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata`\nprivileges for the source indices. You must also have `index` and `read` privileges for the destination index. When\nElasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the\ntime of update and runs with those privileges.", + "description": "Updates certain properties of a transform.\n\nAll updated properties except `description` do not take effect until after the transform starts the next checkpoint,\nthus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata`\nprivileges for the source indices. You must also have `index` and `read` privileges for the destination index. When\nElasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the\ntime of update and runs with those privileges.\n ##Required authorization\n* Index privileges: `read`,`index`,`view_index_metadata`* Cluster privileges: `manage_transform`", "operationId": "transform-update-transform", "parameters": [ { @@ -24490,7 +24520,7 @@ "document" ], "summary": "Update a document", - "description": "Update a document by running a script or passing a partial document.\n\nIf the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias.\n\nThe script can update, delete, or skip modifying the document.\nThe API also supports passing a partial document, which is merged into the existing document.\nTo fully replace an existing document, use the index API.\nThis operation:\n\n* Gets the document (collocated with the shard) from the index.\n* Runs the specified script.\n* Indexes the result.\n\nThe document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation.\n\nThe `_source` field must be enabled to use this API.\nIn addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp).", + "description": "Update a document by running a script or passing a partial document.\n\nIf the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias.\n\nThe script can update, delete, or skip modifying the document.\nThe API also supports passing a partial document, which is merged into the existing document.\nTo fully replace an existing document, use the index API.\nThis operation:\n\n* Gets the document (collocated with the shard) from the index.\n* Runs the specified script.\n* Indexes the result.\n\nThe document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation.\n\nThe `_source` field must be enabled to use this API.\nIn addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp).\n ##Required authorization\n* Index privileges: `write`", "operationId": "update", "parameters": [ { @@ -24775,7 +24805,7 @@ "document" ], "summary": "Update documents", - "description": "Updates documents that match the specified query.\nIf no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:\n\n* `read`\n* `index` or `write`\n\nYou can specify the query criteria in the request URI or the request body using the same syntax as the search API.\n\nWhen you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning.\nWhen the versions match, the document is updated and the version number is incremented.\nIf a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails.\nYou can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`.\nNote that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query.\n\nNOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number.\n\nWhile processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents.\nA bulk update request is performed for each batch of matching documents.\nAny query or update failures cause the update by query request to fail and the failures are shown in the response.\nAny update requests that completed successfully still stick, they are not rolled back.\n\n**Throttling update requests**\n\nTo control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number.\nThis pads each batch with a wait time to throttle the rate.\nSet `requests_per_second` to `-1` to turn off throttling.\n\nThrottling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is 1000, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nUpdate by query supports sliced scroll to parallelize the update process.\nThis can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nSetting `slices` to `auto` chooses a reasonable number for most data streams and indices.\nThis setting will use one slice per shard, up to a certain limit.\nIf there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.\n\nAdding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks:\n\n* You can see these requests in the tasks APIs. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with slices will cancel each sub-request.\n* Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated.\n* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.\n\nIf you're slicing manually or otherwise tuning automatic slicing, keep in mind that:\n\n* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n* Update performance scales linearly across available resources with the number of slices.\n\nWhether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Update the document source**\n\nUpdate by query supports scripts to update the document source.\nAs with the update API, you can set `ctx.op` to change the operation that is performed.\n\nSet `ctx.op = \"noop\"` if your script decides that it doesn't have to make any changes.\nThe update by query operation skips updating the document and increments the `noop` counter.\n\nSet `ctx.op = \"delete\"` if your script decides that the document should be deleted.\nThe update by query operation deletes the document and increments the `deleted` counter.\n\nUpdate by query supports only `index`, `noop`, and `delete`.\nSetting `ctx.op` to anything else is an error.\nSetting any other field in `ctx` is an error.\nThis API enables you to only modify the source of matching documents; you cannot move them.", + "description": "Updates documents that match the specified query.\nIf no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:\n\n* `read`\n* `index` or `write`\n\nYou can specify the query criteria in the request URI or the request body using the same syntax as the search API.\n\nWhen you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning.\nWhen the versions match, the document is updated and the version number is incremented.\nIf a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails.\nYou can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`.\nNote that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query.\n\nNOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number.\n\nWhile processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents.\nA bulk update request is performed for each batch of matching documents.\nAny query or update failures cause the update by query request to fail and the failures are shown in the response.\nAny update requests that completed successfully still stick, they are not rolled back.\n\n**Throttling update requests**\n\nTo control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number.\nThis pads each batch with a wait time to throttle the rate.\nSet `requests_per_second` to `-1` to turn off throttling.\n\nThrottling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is 1000, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nUpdate by query supports sliced scroll to parallelize the update process.\nThis can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nSetting `slices` to `auto` chooses a reasonable number for most data streams and indices.\nThis setting will use one slice per shard, up to a certain limit.\nIf there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.\n\nAdding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks:\n\n* You can see these requests in the tasks APIs. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with slices will cancel each sub-request.\n* Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated.\n* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.\n\nIf you're slicing manually or otherwise tuning automatic slicing, keep in mind that:\n\n* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n* Update performance scales linearly across available resources with the number of slices.\n\nWhether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Update the document source**\n\nUpdate by query supports scripts to update the document source.\nAs with the update API, you can set `ctx.op` to change the operation that is performed.\n\nSet `ctx.op = \"noop\"` if your script decides that it doesn't have to make any changes.\nThe update by query operation skips updating the document and increments the `noop` counter.\n\nSet `ctx.op = \"delete\"` if your script decides that the document should be deleted.\nThe update by query operation deletes the document and increments the `deleted` counter.\n\nUpdate by query supports only `index`, `noop`, and `delete`.\nSetting `ctx.op` to anything else is an error.\nSetting any other field in `ctx` is an error.\nThis API enables you to only modify the source of matching documents; you cannot move them.\n ##Required authorization\n* Index privileges: `read`,`write`", "operationId": "update-by-query", "parameters": [ {