@@ -16,16 +16,17 @@ package collector
16
16
import (
17
17
"encoding/json"
18
18
"fmt"
19
- "github.com/go-kit/log"
20
- "github.com/go-kit/log/level"
21
- "github.com/prometheus-community/elasticsearch_exporter/pkg/clusterinfo"
22
- "github.com/prometheus/client_golang/prometheus"
23
19
"io"
24
20
"net/http"
25
21
"net/url"
26
22
"path"
27
23
"sort"
28
24
"strconv"
25
+
26
+ "github.com/go-kit/log"
27
+ "github.com/go-kit/log/level"
28
+ "github.com/prometheus-community/elasticsearch_exporter/pkg/clusterinfo"
29
+ "github.com/prometheus/client_golang/prometheus"
29
30
)
30
31
31
32
type labels struct {
@@ -64,10 +65,6 @@ type Indices struct {
64
65
clusterInfoCh chan * clusterinfo.Response
65
66
lastClusterInfo * clusterinfo.Response
66
67
67
- up prometheus.Gauge
68
- totalScrapes prometheus.Counter
69
- jsonParseFailures prometheus.Counter
70
-
71
68
indexMetrics []* indexMetric
72
69
shardMetrics []* shardMetric
73
70
aliasMetrics []* aliasMetric
@@ -129,19 +126,6 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo
129
126
ClusterName : "unknown_cluster" ,
130
127
},
131
128
132
- up : prometheus .NewGauge (prometheus.GaugeOpts {
133
- Name : prometheus .BuildFQName (namespace , "index_stats" , "up" ),
134
- Help : "Was the last scrape of the Elasticsearch index endpoint successful." ,
135
- }),
136
- totalScrapes : prometheus .NewCounter (prometheus.CounterOpts {
137
- Name : prometheus .BuildFQName (namespace , "index_stats" , "total_scrapes" ),
138
- Help : "Current total Elasticsearch index scrapes." ,
139
- }),
140
- jsonParseFailures : prometheus .NewCounter (prometheus.CounterOpts {
141
- Name : prometheus .BuildFQName (namespace , "index_stats" , "json_parse_failures" ),
142
- Help : "Number of errors while parsing JSON." ,
143
- }),
144
-
145
129
indexMetrics : []* indexMetric {
146
130
{
147
131
Type : prometheus .GaugeValue ,
@@ -1117,9 +1101,10 @@ func (i *Indices) Describe(ch chan<- *prometheus.Desc) {
1117
1101
for _ , metric := range i .indexMetrics {
1118
1102
ch <- metric .Desc
1119
1103
}
1120
- ch <- i .up .Desc ()
1121
- ch <- i .totalScrapes .Desc ()
1122
- ch <- i .jsonParseFailures .Desc ()
1104
+ for _ , metric := range i .aliasMetrics {
1105
+ ch <- metric .Desc
1106
+ }
1107
+
1123
1108
}
1124
1109
1125
1110
func (i * Indices ) fetchAndDecodeIndexStats () (indexStatsResponse , error ) {
@@ -1139,7 +1124,6 @@ func (i *Indices) fetchAndDecodeIndexStats() (indexStatsResponse, error) {
1139
1124
}
1140
1125
1141
1126
if err := json .Unmarshal (bts , & isr ); err != nil {
1142
- i .jsonParseFailures .Inc ()
1143
1127
return isr , err
1144
1128
}
1145
1129
@@ -1179,7 +1163,6 @@ func (i *Indices) fetchAndDecodeAliases() (aliasesResponse, error) {
1179
1163
}
1180
1164
1181
1165
if err := json .Unmarshal (bts , & asr ); err != nil {
1182
- i .jsonParseFailures .Inc ()
1183
1166
return asr , err
1184
1167
}
1185
1168
@@ -1217,24 +1200,15 @@ func (i *Indices) queryURL(u *url.URL) ([]byte, error) {
1217
1200
1218
1201
// Collect gets Indices metric values
1219
1202
func (i * Indices ) Collect (ch chan <- prometheus.Metric ) {
1220
- i .totalScrapes .Inc ()
1221
- defer func () {
1222
- ch <- i .up
1223
- ch <- i .totalScrapes
1224
- ch <- i .jsonParseFailures
1225
- }()
1226
-
1227
1203
// indices
1228
1204
indexStatsResp , err := i .fetchAndDecodeIndexStats ()
1229
1205
if err != nil {
1230
- i .up .Set (0 )
1231
1206
level .Warn (i .logger ).Log (
1232
1207
"msg" , "failed to fetch and decode index stats" ,
1233
1208
"err" , err ,
1234
1209
)
1235
1210
return
1236
1211
}
1237
- i .up .Set (1 )
1238
1212
1239
1213
// Alias stats
1240
1214
if i .aliases {
0 commit comments