Skip to content

Commit 38a7d36

Browse files
separate config options into elasticsearch specific and shared options
specific elasticsearch option are now into the main elasticsearch class file and shared option have been moved into the PluginMixins::ElasticSearch::APIConfigs namespace. This is code refactorting that has no end-user impact.
1 parent dcd92d0 commit 38a7d36

File tree

5 files changed

+294
-297
lines changed

5 files changed

+294
-297
lines changed

CHANGELOG.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,6 @@
1+
## unreleased
2+
- Refactored configuration options into specific and shared in PluginMixins namespace [#973](https://github.com/logstash-plugins/logstash-output-elasticsearch/pull/973)
3+
14
## 10.7.3
25
- Added composable index template support for elasticsearch version 8 [#980](https://github.com/logstash-plugins/logstash-output-elasticsearch/pull/980)
36

lib/logstash/outputs/elasticsearch.rb

Lines changed: 127 additions & 129 deletions
Original file line numberDiff line numberDiff line change
@@ -88,15 +88,11 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
8888

8989
require "logstash/outputs/elasticsearch/http_client"
9090
require "logstash/outputs/elasticsearch/http_client_builder"
91-
require "logstash/outputs/elasticsearch/common_configs"
91+
require "logstash/plugin_mixins/elasticsearch/api_configs"
9292
require "logstash/outputs/elasticsearch/common"
9393
require "logstash/outputs/elasticsearch/ilm"
94-
9594
require 'logstash/plugin_mixins/ecs_compatibility_support'
9695

97-
# Protocol agnostic (i.e. non-http, non-java specific) configs go here
98-
include(LogStash::Outputs::ElasticSearch::CommonConfigs)
99-
10096
# Protocol agnostic methods
10197
include(LogStash::Outputs::ElasticSearch::Common)
10298

@@ -106,6 +102,11 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
106102
# ecs_compatibility option, provided by Logstash core or the support adapter.
107103
include(LogStash::PluginMixins::ECSCompatibilitySupport)
108104

105+
# Generic/API config options that any document indexer output needs
106+
include(LogStash::PluginMixins::ElasticSearch::APIConfigs)
107+
108+
DEFAULT_POLICY = "logstash-policy"
109+
109110
config_name "elasticsearch"
110111

111112
# The Elasticsearch action to perform. Valid actions are:
@@ -122,130 +123,128 @@ class LogStash::Outputs::ElasticSearch < LogStash::Outputs::Base
122123
# For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation]
123124
config :action, :validate => :string, :default => "index"
124125

125-
# Username to authenticate to a secure Elasticsearch cluster
126-
config :user, :validate => :string
127-
# Password to authenticate to a secure Elasticsearch cluster
128-
config :password, :validate => :password
126+
# The index to write events to. This can be dynamic using the `%{foo}` syntax.
127+
# The default value will partition your indices by day so you can more easily
128+
# delete old data or only search specific date ranges.
129+
# Indexes may not contain uppercase characters.
130+
# For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}.
131+
# LS uses Joda to format the index pattern from event timestamp.
132+
# Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here].
133+
config :index, :validate => :string
134+
135+
config :document_type,
136+
:validate => :string,
137+
:deprecated => "Document types are being deprecated in Elasticsearch 6.0, and removed entirely in 7.0. You should avoid this feature"
138+
139+
# From Logstash 1.3 onwards, a template is applied to Elasticsearch during
140+
# Logstash's startup if one with the name `template_name` does not already exist.
141+
# By default, the contents of this template is the default template for
142+
# `logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern
143+
# `logstash-*`. Should you require support for other index names, or would like
144+
# to change the mappings in the template in general, a custom template can be
145+
# specified by setting `template` to the path of a template file.
146+
#
147+
# Setting `manage_template` to false disables this feature. If you require more
148+
# control over template creation, (e.g. creating indices dynamically based on
149+
# field names) you should set `manage_template` to false and use the REST
150+
# API to apply your templates manually.
151+
config :manage_template, :validate => :boolean, :default => true
152+
153+
# This configuration option defines how the template is named inside Elasticsearch.
154+
# Note that if you have used the template management features and subsequently
155+
# change this, you will need to prune the old template manually, e.g.
156+
#
157+
# `curl -XDELETE <http://localhost:9200/_template/OldTemplateName?pretty>`
158+
#
159+
# where `OldTemplateName` is whatever the former setting was.
160+
config :template_name, :validate => :string
161+
162+
# You can set the path to your own template here, if you so desire.
163+
# If not set, the included template will be used.
164+
config :template, :validate => :path
165+
166+
# The template_overwrite option will always overwrite the indicated template
167+
# in Elasticsearch with either the one indicated by template or the included one.
168+
# This option is set to false by default. If you always want to stay up to date
169+
# with the template provided by Logstash, this option could be very useful to you.
170+
# Likewise, if you have your own template file managed by puppet, for example, and
171+
# you wanted to be able to update it regularly, this option could help there as well.
172+
#
173+
# Please note that if you are using your own customized version of the Logstash
174+
# template (logstash), setting this to true will make Logstash to overwrite
175+
# the "logstash" template (i.e. removing all customized settings)
176+
config :template_overwrite, :validate => :boolean, :default => false
129177

130-
# Authenticate using Elasticsearch API key.
131-
# format is id:api_key (as returned by https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html[Create API key])
132-
config :api_key, :validate => :password
178+
# The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here.
179+
# See https://www.elastic.co/blog/elasticsearch-versioning-support.
180+
config :version, :validate => :string
133181

134-
# Cloud authentication string ("<username>:<password>" format) is an alternative for the `user`/`password` configuration.
135-
#
136-
# For more details, check out the https://www.elastic.co/guide/en/logstash/current/connecting-to-cloud.html#_cloud_auth[cloud documentation]
137-
config :cloud_auth, :validate => :password
138-
139-
# HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps
140-
# the root path for the Elasticsearch HTTP API lives.
141-
# Note that if you use paths as components of URLs in the 'hosts' field you may
142-
# not also set this field. That will raise an error at startup
143-
config :path, :validate => :string
144-
145-
# HTTP Path to perform the _bulk requests to
146-
# this defaults to a concatenation of the path parameter and "_bulk"
147-
config :bulk_path, :validate => :string
148-
149-
# Pass a set of key value pairs as the URL query string. This query string is added
150-
# to every host listed in the 'hosts' configuration. If the 'hosts' list contains
151-
# urls that already have query strings, the one specified here will be appended.
152-
config :parameters, :validate => :hash
153-
154-
# Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme
155-
# is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used.
156-
# If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts'
157-
config :ssl, :validate => :boolean
158-
159-
# Option to validate the server's certificate. Disabling this severely compromises security.
160-
# For more information on disabling certificate verification please read
161-
# https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf
162-
config :ssl_certificate_verification, :validate => :boolean, :default => true
163-
164-
# The .cer or .pem file to validate the server's certificate
165-
config :cacert, :validate => :path
166-
167-
# The JKS truststore to validate the server's certificate.
168-
# Use either `:truststore` or `:cacert`
169-
config :truststore, :validate => :path
170-
171-
# Set the truststore password
172-
config :truststore_password, :validate => :password
173-
174-
# The keystore used to present a certificate to the server.
175-
# It can be either .jks or .p12
176-
config :keystore, :validate => :path
177-
178-
# Set the keystore password
179-
config :keystore_password, :validate => :password
180-
181-
# This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list.
182-
# Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use
183-
# this with master nodes, you probably want to disable HTTP on them by setting
184-
# `http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or
185-
# manually enter multiple Elasticsearch hosts using the `hosts` parameter.
186-
config :sniffing, :validate => :boolean, :default => false
187-
188-
# How long to wait, in seconds, between sniffing attempts
189-
config :sniffing_delay, :validate => :number, :default => 5
190-
191-
# HTTP Path to be used for the sniffing requests
192-
# the default value is computed by concatenating the path value and "_nodes/http"
193-
# if sniffing_path is set it will be used as an absolute path
194-
# do not use full URL here, only paths, e.g. "/sniff/_nodes/http"
195-
config :sniffing_path, :validate => :string
196-
197-
# Set the address of a forward HTTP proxy.
198-
# This used to accept hashes as arguments but now only accepts
199-
# arguments of the URI type to prevent leaking credentials.
200-
config :proxy, :validate => :uri # but empty string is allowed
201-
202-
# Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If
203-
# a timeout occurs, the request will be retried.
204-
config :timeout, :validate => :number, :default => 60
205-
206-
# Set the Elasticsearch errors in the whitelist that you don't want to log.
207-
# A useful example is when you want to skip all 409 errors
208-
# which are `document_already_exists_exception`.
209-
config :failure_type_logging_whitelist, :validate => :array, :default => []
210-
211-
# While the output tries to reuse connections efficiently we have a maximum.
212-
# This sets the maximum number of open connections the output will create.
213-
# Setting this too low may mean frequently closing / opening connections
214-
# which is bad.
215-
config :pool_max, :validate => :number, :default => 1000
216-
217-
# While the output tries to reuse connections efficiently we have a maximum per endpoint.
218-
# This sets the maximum number of open connections per endpoint the output will create.
219-
# Setting this too low may mean frequently closing / opening connections
220-
# which is bad.
221-
config :pool_max_per_route, :validate => :number, :default => 100
222-
223-
# HTTP Path where a HEAD request is sent when a backend is marked down
224-
# the request is sent in the background to see if it has come back again
225-
# before it is once again eligible to service requests.
226-
# If you have custom firewall rules you may need to change this
227-
config :healthcheck_path, :validate => :string
228-
229-
# How frequently, in seconds, to wait between resurrection attempts.
230-
# Resurrection is the process by which backend endpoints marked 'down' are checked
231-
# to see if they have come back to life
232-
config :resurrect_delay, :validate => :number, :default => 5
233-
234-
# How long to wait before checking if the connection is stale before executing a request on a connection using keepalive.
235-
# You may want to set this lower, if you get connection errors regularly
236-
# Quoting the Apache commons docs (this client is based Apache Commmons):
237-
# 'Defines period of inactivity in milliseconds after which persistent connections must
238-
# be re-validated prior to being leased to the consumer. Non-positive value passed to
239-
# this method disables connection validation. This check helps detect connections that
240-
# have become stale (half-closed) while kept inactive in the pool.'
241-
# See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info]
242-
config :validate_after_inactivity, :validate => :number, :default => 10000
243-
244-
# Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond
245-
config :http_compression, :validate => :boolean, :default => false
246-
247-
# Custom Headers to send on each request to elasticsearch nodes
248-
config :custom_headers, :validate => :hash, :default => {}
182+
# The version_type to use for indexing.
183+
# See https://www.elastic.co/blog/elasticsearch-versioning-support.
184+
# See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types
185+
config :version_type, :validate => ["internal", 'external', "external_gt", "external_gte", "force"]
186+
187+
# A routing override to be applied to all processed events.
188+
# This can be dynamic using the `%{foo}` syntax.
189+
config :routing, :validate => :string
190+
191+
# For child documents, ID of the associated parent.
192+
# This can be dynamic using the `%{foo}` syntax.
193+
config :parent, :validate => :string, :default => nil
194+
195+
# For child documents, name of the join field
196+
config :join_field, :validate => :string, :default => nil
197+
198+
# Set upsert content for update mode.s
199+
# Create a new document with this parameter as json string if `document_id` doesn't exists
200+
config :upsert, :validate => :string, :default => ""
201+
202+
# Enable `doc_as_upsert` for update mode.
203+
# Create a new document with source if `document_id` doesn't exist in Elasticsearch
204+
config :doc_as_upsert, :validate => :boolean, :default => false
205+
206+
# Set script name for scripted update mode
207+
config :script, :validate => :string, :default => ""
208+
209+
# Define the type of script referenced by "script" variable
210+
# inline : "script" contains inline script
211+
# indexed : "script" contains the name of script directly indexed in elasticsearch
212+
# file : "script" contains the name of script stored in elasticseach's config directory
213+
config :script_type, :validate => ["inline", 'indexed', "file"], :default => ["inline"]
214+
215+
# Set the language of the used script. If not set, this defaults to painless in ES 5.0
216+
config :script_lang, :validate => :string, :default => "painless"
217+
218+
# Set variable name passed to script (scripted update)
219+
config :script_var_name, :validate => :string, :default => "event"
220+
221+
# if enabled, script is in charge of creating non-existent document (scripted update)
222+
config :scripted_upsert, :validate => :boolean, :default => false
223+
224+
# The number of times Elasticsearch should internally retry an update/upserted document
225+
# See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates]
226+
# for more info
227+
config :retry_on_conflict, :validate => :number, :default => 1
228+
229+
# Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration
230+
# here like `pipeline => "%{INGEST_PIPELINE}"`
231+
config :pipeline, :validate => :string, :default => nil
232+
233+
# -----
234+
# ILM configurations (beta)
235+
# -----
236+
# Flag for enabling Index Lifecycle Management integration.
237+
config :ilm_enabled, :validate => [true, false, 'true', 'false', 'auto'], :default => 'auto'
238+
239+
# Rollover alias used for indexing data. If rollover alias doesn't exist, Logstash will create it and map it to the relevant index
240+
config :ilm_rollover_alias, :validate => :string
241+
242+
# appends “{now/d}-000001” by default for new index creation, subsequent rollover indices will increment based on this pattern i.e. “000002”
243+
# {now/d} is date math, and will insert the appropriate value automatically.
244+
config :ilm_pattern, :validate => :string, :default => '{now/d}-000001'
245+
246+
# ILM policy to use, if undefined the default policy will be used.
247+
config :ilm_policy, :validate => :string, :default => DEFAULT_POLICY
249248

250249
def initialize(*params)
251250
super
@@ -323,5 +322,4 @@ def self.oss?
323322
name = plugin.name.split('-')[-1]
324323
require "logstash/outputs/elasticsearch/#{name}"
325324
end
326-
327-
end # class LogStash::Outputs::Elasticsearch
325+
end

lib/logstash/outputs/elasticsearch/common.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ def setup_hosts
153153

154154
def hosts_default?(hosts)
155155
# NOTE: would be nice if pipeline allowed us a clean way to detect a config default :
156-
hosts.is_a?(Array) && hosts.size == 1 && hosts.first.equal?(CommonConfigs::DEFAULT_HOST)
156+
hosts.is_a?(Array) && hosts.size == 1 && hosts.first.equal?(LogStash::PluginMixins::ElasticSearch::APIConfigs::DEFAULT_HOST)
157157
end
158158
private :hosts_default?
159159

0 commit comments

Comments
 (0)