diff --git a/README.md b/README.md index abb57bdf..d5304f5c 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ [![CircleCI](https://circleci.com/gh/prometheus-community/elasticsearch_exporter.svg?style=svg)](https://circleci.com/gh/prometheus-community/elasticsearch_exporter) [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus-community/elasticsearch_exporter)](https://goreportcard.com/report/github.com/prometheus-community/elasticsearch_exporter) -Prometheus exporter for various metrics about ElasticSearch, written in Go. +Prometheus exporter for various metrics about Elasticsearch, written in Go. ### Installation @@ -39,7 +39,7 @@ helm install [RELEASE_NAME] prometheus-community/prometheus-elasticsearch-export ### Configuration -**NOTE:** The exporter fetches information from an ElasticSearch cluster on every scrape, therefore having a too short scrape interval can impose load on ES master nodes, particularly if you run with `--es.all` and `--es.indices`. We suggest you measure how long fetching `/_nodes/stats` and `/_all/_stats` takes for your ES cluster to determine whether your scraping interval is too short. As a last resort, you can scrape this exporter using a dedicated job with its own scraping interval. +**NOTE:** The exporter fetches information from an Elasticsearch cluster on every scrape, therefore having a too short scrape interval can impose load on ES master nodes, particularly if you run with `--es.all` and `--es.indices`. We suggest you measure how long fetching `/_nodes/stats` and `/_all/_stats` takes for your ES cluster to determine whether your scraping interval is too short. As a last resort, you can scrape this exporter using a dedicated job with its own scraping interval. Below is the command line options summary: ```bash @@ -147,8 +147,8 @@ Further Information | elasticsearch_indices_indexing_index_total | counter | 1 | Total index calls | elasticsearch_indices_mappings_stats_fields | gauge | 1 | Count of fields currently mapped by index | elasticsearch_indices_mappings_stats_json_parse_failures_total | counter | 0 | Number of errors while parsing JSON -| elasticsearch_indices_mappings_stats_scrapes_total | counter | 0 | Current total ElasticSearch Indices Mappings scrapes -| elasticsearch_indices_mappings_stats_up | gauge | 0 | Was the last scrape of the ElasticSearch Indices Mappings endpoint successful +| elasticsearch_indices_mappings_stats_scrapes_total | counter | 0 | Current total Elasticsearch Indices Mappings scrapes +| elasticsearch_indices_mappings_stats_up | gauge | 0 | Was the last scrape of the Elasticsearch Indices Mappings endpoint successful | elasticsearch_indices_merges_docs_total | counter | 1 | Cumulative docs merged | elasticsearch_indices_merges_total | counter | 1 | Total merges | elasticsearch_indices_merges_total_size_bytes_total | counter | 1 | Total merge size in bytes @@ -246,7 +246,7 @@ Further Information We provide examples for [Prometheus](http://prometheus.io) [alerts and recording rules](examples/prometheus/elasticsearch.rules) as well as an [Grafana](http://www.grafana.org) [Dashboard](examples/grafana/dashboard.json) and a [Kubernetes](http://kubernetes.io) [Deployment](examples/kubernetes/deployment.yml). -The example dashboard needs the [node_exporter](https://github.com/prometheus/node_exporter) installed. In order to select the nodes that belong to the ElasticSearch cluster, we rely on a label `cluster`. +The example dashboard needs the [node_exporter](https://github.com/prometheus/node_exporter) installed. In order to select the nodes that belong to the Elasticsearch cluster, we rely on a label `cluster`. Depending on your setup, it can derived from the platform metadata: For example on [GCE](https://cloud.google.com) diff --git a/collector/cluster_health.go b/collector/cluster_health.go index 3b991818..3e37198f 100644 --- a/collector/cluster_health.go +++ b/collector/cluster_health.go @@ -68,11 +68,11 @@ func NewClusterHealth(logger log.Logger, client *http.Client, url *url.URL) *Clu up: prometheus.NewGauge(prometheus.GaugeOpts{ Name: prometheus.BuildFQName(namespace, subsystem, "up"), - Help: "Was the last scrape of the ElasticSearch cluster health endpoint successful.", + Help: "Was the last scrape of the Elasticsearch cluster health endpoint successful.", }), totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{ Name: prometheus.BuildFQName(namespace, subsystem, "total_scrapes"), - Help: "Current total ElasticSearch cluster health scrapes.", + Help: "Current total Elasticsearch cluster health scrapes.", }), jsonParseFailures: prometheus.NewCounter(prometheus.CounterOpts{ Name: prometheus.BuildFQName(namespace, subsystem, "json_parse_failures"), diff --git a/collector/cluster_settings.go b/collector/cluster_settings.go index 034a5fc5..a0b09bcd 100644 --- a/collector/cluster_settings.go +++ b/collector/cluster_settings.go @@ -49,11 +49,11 @@ func NewClusterSettings(logger log.Logger, client *http.Client, url *url.URL) *C up: prometheus.NewGauge(prometheus.GaugeOpts{ Name: prometheus.BuildFQName(namespace, "clustersettings_stats", "up"), - Help: "Was the last scrape of the ElasticSearch cluster settings endpoint successful.", + Help: "Was the last scrape of the Elasticsearch cluster settings endpoint successful.", }), totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{ Name: prometheus.BuildFQName(namespace, "clustersettings_stats", "total_scrapes"), - Help: "Current total ElasticSearch cluster settings scrapes.", + Help: "Current total Elasticsearch cluster settings scrapes.", }), shardAllocationEnabled: prometheus.NewGauge(prometheus.GaugeOpts{ Name: prometheus.BuildFQName(namespace, "clustersettings_stats", "shard_allocation_enabled"), diff --git a/collector/indices.go b/collector/indices.go index 17113dd7..85413125 100644 --- a/collector/indices.go +++ b/collector/indices.go @@ -131,11 +131,11 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo up: prometheus.NewGauge(prometheus.GaugeOpts{ Name: prometheus.BuildFQName(namespace, "index_stats", "up"), - Help: "Was the last scrape of the ElasticSearch index endpoint successful.", + Help: "Was the last scrape of the Elasticsearch index endpoint successful.", }), totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{ Name: prometheus.BuildFQName(namespace, "index_stats", "total_scrapes"), - Help: "Current total ElasticSearch index scrapes.", + Help: "Current total Elasticsearch index scrapes.", }), jsonParseFailures: prometheus.NewCounter(prometheus.CounterOpts{ Name: prometheus.BuildFQName(namespace, "index_stats", "json_parse_failures"), diff --git a/collector/indices_mappings.go b/collector/indices_mappings.go index 38698bf3..689d12da 100644 --- a/collector/indices_mappings.go +++ b/collector/indices_mappings.go @@ -59,11 +59,11 @@ func NewIndicesMappings(logger log.Logger, client *http.Client, url *url.URL) *I up: prometheus.NewGauge(prometheus.GaugeOpts{ Name: prometheus.BuildFQName(namespace, subsystem, "up"), - Help: "Was the last scrape of the ElasticSearch Indices Mappings endpoint successful.", + Help: "Was the last scrape of the Elasticsearch Indices Mappings endpoint successful.", }), totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{ Name: prometheus.BuildFQName(namespace, subsystem, "scrapes_total"), - Help: "Current total ElasticSearch Indices Mappings scrapes.", + Help: "Current total Elasticsearch Indices Mappings scrapes.", }), jsonParseFailures: prometheus.NewCounter(prometheus.CounterOpts{ Name: prometheus.BuildFQName(namespace, subsystem, "json_parse_failures_total"), diff --git a/collector/indices_settings.go b/collector/indices_settings.go index 12fd0485..5b6b55a1 100644 --- a/collector/indices_settings.go +++ b/collector/indices_settings.go @@ -60,11 +60,11 @@ func NewIndicesSettings(logger log.Logger, client *http.Client, url *url.URL) *I up: prometheus.NewGauge(prometheus.GaugeOpts{ Name: prometheus.BuildFQName(namespace, "indices_settings_stats", "up"), - Help: "Was the last scrape of the ElasticSearch Indices Settings endpoint successful.", + Help: "Was the last scrape of the Elasticsearch Indices Settings endpoint successful.", }), totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{ Name: prometheus.BuildFQName(namespace, "indices_settings_stats", "total_scrapes"), - Help: "Current total ElasticSearch Indices Settings scrapes.", + Help: "Current total Elasticsearch Indices Settings scrapes.", }), readOnlyIndices: prometheus.NewGauge(prometheus.GaugeOpts{ Name: prometheus.BuildFQName(namespace, "indices_settings_stats", "read_only_indices"), diff --git a/collector/nodes.go b/collector/nodes.go index 1a895b7c..ba93a9b6 100644 --- a/collector/nodes.go +++ b/collector/nodes.go @@ -193,11 +193,11 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no up: prometheus.NewGauge(prometheus.GaugeOpts{ Name: prometheus.BuildFQName(namespace, "node_stats", "up"), - Help: "Was the last scrape of the ElasticSearch nodes endpoint successful.", + Help: "Was the last scrape of the Elasticsearch nodes endpoint successful.", }), totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{ Name: prometheus.BuildFQName(namespace, "node_stats", "total_scrapes"), - Help: "Current total ElasticSearch node scrapes.", + Help: "Current total Elasticsearch node scrapes.", }), jsonParseFailures: prometheus.NewCounter(prometheus.CounterOpts{ Name: prometheus.BuildFQName(namespace, "node_stats", "json_parse_failures"), diff --git a/collector/slm.go b/collector/slm.go index 84465517..a8533233 100644 --- a/collector/slm.go +++ b/collector/slm.go @@ -78,11 +78,11 @@ func NewSLM(logger log.Logger, client *http.Client, url *url.URL) *SLM { up: prometheus.NewGauge(prometheus.GaugeOpts{ Name: prometheus.BuildFQName(namespace, "slm_stats", "up"), - Help: "Was the last scrape of the ElasticSearch SLM endpoint successful.", + Help: "Was the last scrape of the Elasticsearch SLM endpoint successful.", }), totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{ Name: prometheus.BuildFQName(namespace, "slm_stats", "total_scrapes"), - Help: "Current total ElasticSearch SLM scrapes.", + Help: "Current total Elasticsearch SLM scrapes.", }), jsonParseFailures: prometheus.NewCounter(prometheus.CounterOpts{ Name: prometheus.BuildFQName(namespace, "slm_stats", "json_parse_failures"), diff --git a/collector/snapshots.go b/collector/snapshots.go index 90d3e25d..214096d0 100644 --- a/collector/snapshots.go +++ b/collector/snapshots.go @@ -73,11 +73,11 @@ func NewSnapshots(logger log.Logger, client *http.Client, url *url.URL) *Snapsho up: prometheus.NewGauge(prometheus.GaugeOpts{ Name: prometheus.BuildFQName(namespace, "snapshot_stats", "up"), - Help: "Was the last scrape of the ElasticSearch snapshots endpoint successful.", + Help: "Was the last scrape of the Elasticsearch snapshots endpoint successful.", }), totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{ Name: prometheus.BuildFQName(namespace, "snapshot_stats", "total_scrapes"), - Help: "Current total ElasticSearch snapshots scrapes.", + Help: "Current total Elasticsearch snapshots scrapes.", }), jsonParseFailures: prometheus.NewCounter(prometheus.CounterOpts{ Name: prometheus.BuildFQName(namespace, "snapshot_stats", "json_parse_failures"), diff --git a/examples/grafana/dashboard.json b/examples/grafana/dashboard.json index 66f2ab1a..6b1de5a8 100644 --- a/examples/grafana/dashboard.json +++ b/examples/grafana/dashboard.json @@ -2244,7 +2244,7 @@ ] }, "timezone": "utc", - "title": "ElasticSearch", + "title": "Elasticsearch", "uid": "n_nxrE_mk", "version": 2 } diff --git a/examples/prometheus/elasticsearch.rules b/examples/prometheus/elasticsearch.rules index a7922357..05109067 100644 --- a/examples/prometheus/elasticsearch.rules +++ b/examples/prometheus/elasticsearch.rules @@ -7,11 +7,11 @@ ALERT ElasticsearchTooFewNodesRunning IF elasticsearch_cluster_health_number_of_nodes < 3 FOR 5m LABELS {severity="critical"} - ANNOTATIONS {description="There are only {{$value}} < 3 ElasticSearch nodes running", summary="ElasticSearch running on less than 3 nodes"} + ANNOTATIONS {description="There are only {{$value}} < 3 Elasticsearch nodes running", summary="Elasticsearch running on less than 3 nodes"} # alert if heap usage is over 90% ALERT ElasticsearchHeapTooHigh IF elasticsearch_jvm_memory_used_bytes{area="heap"} / elasticsearch_jvm_memory_max_bytes{area="heap"} > 0.9 FOR 15m LABELS {severity="critical"} - ANNOTATIONS {description="The heap usage is over 90% for 15m", summary="ElasticSearch node {{$labels.node}} heap usage is high"} + ANNOTATIONS {description="The heap usage is over 90% for 15m", summary="Elasticsearch node {{$labels.node}} heap usage is high"} diff --git a/examples/prometheus/elasticsearch.rules.yml b/examples/prometheus/elasticsearch.rules.yml index 308048c3..d29f0b75 100644 --- a/examples/prometheus/elasticsearch.rules.yml +++ b/examples/prometheus/elasticsearch.rules.yml @@ -12,8 +12,8 @@ groups: labels: severity: critical annotations: - description: There are only {{$value}} < 3 ElasticSearch nodes running - summary: ElasticSearch running on less than 3 nodes + description: There are only {{$value}} < 3 Elasticsearch nodes running + summary: Elasticsearch running on less than 3 nodes - alert: ElasticsearchHeapTooHigh expr: elasticsearch_jvm_memory_used_bytes{area="heap"} / elasticsearch_jvm_memory_max_bytes{area="heap"} > 0.9 @@ -22,4 +22,4 @@ groups: severity: critical annotations: description: The heap usage is over 90% for 15m - summary: ElasticSearch node {{$labels.node}} heap usage is high + summary: Elasticsearch node {{$labels.node}} heap usage is high