Skip to content

Commit 7a09a84

Browse files
authored
chore!: adopt log/slog, drop go-kit/log (#942)
The bulk of this change set was automated by the following script which is being used to aid in converting the various exporters/projects to use slog: https://gist.github.com/tjhop/49f96fb7ebbe55b12deee0b0312d8434 In addition to the parts that were straightforward conversions, this also: - refactors much of the logging config to adopt slog - removed custom `logger.go` setup for go-kit - adopt promslog/flag and use that to handle parsing log level/format flags - for consistent behavior, keep log output flag to allow toggle stdout/stderr for output - adopt promslog for logger setup - enables sloglint in golangci-lint config - drops go-kit/log exclusions from lint config - tidies mods to drop go-kit/log and go-logfmt/logfmt deps Signed-off-by: TJ Hoplock <[email protected]> * ci: update deprecated golangci-lint config Fixes: ``` WARN [config_reader] The configuration option `linters.errcheck.exclude` is deprecated, please use `linters.errcheck.exclude-functions`. ``` Signed-off-by: TJ Hoplock <[email protected]> --------- Signed-off-by: TJ Hoplock <[email protected]>
1 parent ccd458b commit 7a09a84

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+268
-359
lines changed

.circleci/config.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ executors:
66
# This must match .promu.yml.
77
golang:
88
docker:
9-
- image: cimg/go:1.22.6
9+
- image: cimg/go:1.23
1010
jobs:
1111
test:
1212
executor: golang

.golangci.yml

+2-1
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
linters:
33
enable:
44
- revive
5+
- sloglint
56

67
issues:
78
exclude-rules:
@@ -11,7 +12,7 @@ issues:
1112

1213
linters-settings:
1314
errcheck:
14-
exclude: scripts/errcheck_excludes.txt
15+
exclude-functions: scripts/errcheck_excludes.txt
1516
revive:
1617
rules:
1718
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter

.promu.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
go:
22
# This must match .circle/config.yml.
3-
version: 1.22
3+
version: 1.23
44
repository:
55
path: github.com/prometheus-community/elasticsearch_exporter
66
build:

collector/cluster_health.go

+7-8
Original file line numberDiff line numberDiff line change
@@ -17,12 +17,11 @@ import (
1717
"encoding/json"
1818
"fmt"
1919
"io"
20+
"log/slog"
2021
"net/http"
2122
"net/url"
2223
"path"
2324

24-
"github.com/go-kit/log"
25-
"github.com/go-kit/log/level"
2625
"github.com/prometheus/client_golang/prometheus"
2726
)
2827

@@ -46,7 +45,7 @@ type clusterHealthStatusMetric struct {
4645

4746
// ClusterHealth type defines the collector struct
4847
type ClusterHealth struct {
49-
logger log.Logger
48+
logger *slog.Logger
5049
client *http.Client
5150
url *url.URL
5251

@@ -55,7 +54,7 @@ type ClusterHealth struct {
5554
}
5655

5756
// NewClusterHealth returns a new Collector exposing ClusterHealth stats.
58-
func NewClusterHealth(logger log.Logger, client *http.Client, url *url.URL) *ClusterHealth {
57+
func NewClusterHealth(logger *slog.Logger, client *http.Client, url *url.URL) *ClusterHealth {
5958
subsystem := "cluster_health"
6059

6160
return &ClusterHealth{
@@ -225,8 +224,8 @@ func (c *ClusterHealth) fetchAndDecodeClusterHealth() (clusterHealthResponse, er
225224
defer func() {
226225
err = res.Body.Close()
227226
if err != nil {
228-
level.Warn(c.logger).Log(
229-
"msg", "failed to close http.Client",
227+
c.logger.Warn(
228+
"failed to close http.Client",
230229
"err", err,
231230
)
232231
}
@@ -252,8 +251,8 @@ func (c *ClusterHealth) fetchAndDecodeClusterHealth() (clusterHealthResponse, er
252251
func (c *ClusterHealth) Collect(ch chan<- prometheus.Metric) {
253252
clusterHealthResp, err := c.fetchAndDecodeClusterHealth()
254253
if err != nil {
255-
level.Warn(c.logger).Log(
256-
"msg", "failed to fetch and decode cluster health",
254+
c.logger.Warn(
255+
"failed to fetch and decode cluster health",
257256
"err", err,
258257
)
259258
return

collector/cluster_health_test.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@ import (
2222
"strings"
2323
"testing"
2424

25-
"github.com/go-kit/log"
2625
"github.com/prometheus/client_golang/prometheus/testutil"
26+
"github.com/prometheus/common/promslog"
2727
)
2828

2929
func TestClusterHealth(t *testing.T) {
@@ -189,7 +189,7 @@ func TestClusterHealth(t *testing.T) {
189189
t.Fatal(err)
190190
}
191191

192-
c := NewClusterHealth(log.NewNopLogger(), http.DefaultClient, u)
192+
c := NewClusterHealth(promslog.NewNopLogger(), http.DefaultClient, u)
193193
if err != nil {
194194
t.Fatal(err)
195195
}

collector/cluster_info.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -17,11 +17,11 @@ import (
1717
"context"
1818
"encoding/json"
1919
"io"
20+
"log/slog"
2021
"net/http"
2122
"net/url"
2223

2324
"github.com/blang/semver/v4"
24-
"github.com/go-kit/log"
2525
"github.com/prometheus/client_golang/prometheus"
2626
)
2727

@@ -30,12 +30,12 @@ func init() {
3030
}
3131

3232
type ClusterInfoCollector struct {
33-
logger log.Logger
33+
logger *slog.Logger
3434
u *url.URL
3535
hc *http.Client
3636
}
3737

38-
func NewClusterInfo(logger log.Logger, u *url.URL, hc *http.Client) (Collector, error) {
38+
func NewClusterInfo(logger *slog.Logger, u *url.URL, hc *http.Client) (Collector, error) {
3939
return &ClusterInfoCollector{
4040
logger: logger,
4141
u: u,

collector/cluster_info_test.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@ import (
2222
"strings"
2323
"testing"
2424

25-
"github.com/go-kit/log"
2625
"github.com/prometheus/client_golang/prometheus/testutil"
26+
"github.com/prometheus/common/promslog"
2727
)
2828

2929
func TestClusterInfo(t *testing.T) {
@@ -80,7 +80,7 @@ func TestClusterInfo(t *testing.T) {
8080
t.Fatal(err)
8181
}
8282

83-
c, err := NewClusterInfo(log.NewNopLogger(), u, http.DefaultClient)
83+
c, err := NewClusterInfo(promslog.NewNopLogger(), u, http.DefaultClient)
8484
if err != nil {
8585
t.Fatal(err)
8686
}

collector/cluster_settings.go

+9-10
Original file line numberDiff line numberDiff line change
@@ -18,13 +18,12 @@ import (
1818
"encoding/json"
1919
"fmt"
2020
"io"
21+
"log/slog"
2122
"net/http"
2223
"net/url"
2324
"strconv"
2425
"strings"
2526

26-
"github.com/go-kit/log"
27-
"github.com/go-kit/log/level"
2827
"github.com/imdario/mergo"
2928
"github.com/prometheus/client_golang/prometheus"
3029
)
@@ -34,12 +33,12 @@ func init() {
3433
}
3534

3635
type ClusterSettingsCollector struct {
37-
logger log.Logger
36+
logger *slog.Logger
3837
u *url.URL
3938
hc *http.Client
4039
}
4140

42-
func NewClusterSettings(logger log.Logger, u *url.URL, hc *http.Client) (Collector, error) {
41+
func NewClusterSettings(logger *slog.Logger, u *url.URL, hc *http.Client) (Collector, error) {
4342
return &ClusterSettingsCollector{
4443
logger: logger,
4544
u: u,
@@ -226,7 +225,7 @@ func (c *ClusterSettingsCollector) Update(ctx context.Context, ch chan<- prometh
226225
if strings.HasSuffix(merged.Cluster.Routing.Allocation.Disk.Watermark.High, "b") {
227226
flooodStageBytes, err := getValueInBytes(merged.Cluster.Routing.Allocation.Disk.Watermark.FloodStage)
228227
if err != nil {
229-
level.Error(c.logger).Log("msg", "failed to parse flood_stage bytes", "err", err)
228+
c.logger.Error("failed to parse flood_stage bytes", "err", err)
230229
} else {
231230
ch <- prometheus.MustNewConstMetric(
232231
clusterSettingsDesc["floodStageBytes"],
@@ -237,7 +236,7 @@ func (c *ClusterSettingsCollector) Update(ctx context.Context, ch chan<- prometh
237236

238237
highBytes, err := getValueInBytes(merged.Cluster.Routing.Allocation.Disk.Watermark.High)
239238
if err != nil {
240-
level.Error(c.logger).Log("msg", "failed to parse high bytes", "err", err)
239+
c.logger.Error("failed to parse high bytes", "err", err)
241240
} else {
242241
ch <- prometheus.MustNewConstMetric(
243242
clusterSettingsDesc["highBytes"],
@@ -248,7 +247,7 @@ func (c *ClusterSettingsCollector) Update(ctx context.Context, ch chan<- prometh
248247

249248
lowBytes, err := getValueInBytes(merged.Cluster.Routing.Allocation.Disk.Watermark.Low)
250249
if err != nil {
251-
level.Error(c.logger).Log("msg", "failed to parse low bytes", "err", err)
250+
c.logger.Error("failed to parse low bytes", "err", err)
252251
} else {
253252
ch <- prometheus.MustNewConstMetric(
254253
clusterSettingsDesc["lowBytes"],
@@ -263,7 +262,7 @@ func (c *ClusterSettingsCollector) Update(ctx context.Context, ch chan<- prometh
263262
// Watermark ratio metrics
264263
floodRatio, err := getValueAsRatio(merged.Cluster.Routing.Allocation.Disk.Watermark.FloodStage)
265264
if err != nil {
266-
level.Error(c.logger).Log("msg", "failed to parse flood_stage ratio", "err", err)
265+
c.logger.Error("failed to parse flood_stage ratio", "err", err)
267266
} else {
268267
ch <- prometheus.MustNewConstMetric(
269268
clusterSettingsDesc["floodStageRatio"],
@@ -274,7 +273,7 @@ func (c *ClusterSettingsCollector) Update(ctx context.Context, ch chan<- prometh
274273

275274
highRatio, err := getValueAsRatio(merged.Cluster.Routing.Allocation.Disk.Watermark.High)
276275
if err != nil {
277-
level.Error(c.logger).Log("msg", "failed to parse high ratio", "err", err)
276+
c.logger.Error("failed to parse high ratio", "err", err)
278277
} else {
279278
ch <- prometheus.MustNewConstMetric(
280279
clusterSettingsDesc["highRatio"],
@@ -285,7 +284,7 @@ func (c *ClusterSettingsCollector) Update(ctx context.Context, ch chan<- prometh
285284

286285
lowRatio, err := getValueAsRatio(merged.Cluster.Routing.Allocation.Disk.Watermark.Low)
287286
if err != nil {
288-
level.Error(c.logger).Log("msg", "failed to parse low ratio", "err", err)
287+
c.logger.Error("failed to parse low ratio", "err", err)
289288
} else {
290289
ch <- prometheus.MustNewConstMetric(
291290
clusterSettingsDesc["lowRatio"],

collector/cluster_settings_test.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@ import (
2222
"strings"
2323
"testing"
2424

25-
"github.com/go-kit/log"
2625
"github.com/prometheus/client_golang/prometheus/testutil"
26+
"github.com/prometheus/common/promslog"
2727
)
2828

2929
func TestClusterSettingsStats(t *testing.T) {
@@ -136,7 +136,7 @@ elasticsearch_clustersettings_allocation_watermark_low_bytes 5.24288e+07
136136
t.Fatal(err)
137137
}
138138

139-
c, err := NewClusterSettings(log.NewNopLogger(), u, http.DefaultClient)
139+
c, err := NewClusterSettings(promslog.NewNopLogger(), u, http.DefaultClient)
140140
if err != nil {
141141
t.Fatal(err)
142142
}

collector/collector.go

+9-10
Original file line numberDiff line numberDiff line change
@@ -18,14 +18,13 @@ import (
1818
"context"
1919
"errors"
2020
"fmt"
21+
"log/slog"
2122
"net/http"
2223
"net/url"
2324
"sync"
2425
"time"
2526

2627
"github.com/alecthomas/kingpin/v2"
27-
"github.com/go-kit/log"
28-
"github.com/go-kit/log/level"
2928
"github.com/prometheus/client_golang/prometheus"
3029
)
3130

@@ -37,7 +36,7 @@ const (
3736
defaultDisabled = false
3837
)
3938

40-
type factoryFunc func(logger log.Logger, u *url.URL, hc *http.Client) (Collector, error)
39+
type factoryFunc func(logger *slog.Logger, u *url.URL, hc *http.Client) (Collector, error)
4140

4241
var (
4342
factories = make(map[string]factoryFunc)
@@ -90,15 +89,15 @@ func registerCollector(name string, isDefaultEnabled bool, createFunc factoryFun
9089

9190
type ElasticsearchCollector struct {
9291
Collectors map[string]Collector
93-
logger log.Logger
92+
logger *slog.Logger
9493
esURL *url.URL
9594
httpClient *http.Client
9695
}
9796

9897
type Option func(*ElasticsearchCollector) error
9998

10099
// NewElasticsearchCollector creates a new ElasticsearchCollector
101-
func NewElasticsearchCollector(logger log.Logger, filters []string, options ...Option) (*ElasticsearchCollector, error) {
100+
func NewElasticsearchCollector(logger *slog.Logger, filters []string, options ...Option) (*ElasticsearchCollector, error) {
102101
e := &ElasticsearchCollector{logger: logger}
103102
// Apply options to customize the collector
104103
for _, o := range options {
@@ -128,7 +127,7 @@ func NewElasticsearchCollector(logger log.Logger, filters []string, options ...O
128127
if collector, ok := initiatedCollectors[key]; ok {
129128
collectors[key] = collector
130129
} else {
131-
collector, err := factories[key](log.With(logger, "collector", key), e.esURL, e.httpClient)
130+
collector, err := factories[key](logger.With("collector", key), e.esURL, e.httpClient)
132131
if err != nil {
133132
return nil, err
134133
}
@@ -176,21 +175,21 @@ func (e ElasticsearchCollector) Collect(ch chan<- prometheus.Metric) {
176175
wg.Wait()
177176
}
178177

179-
func execute(ctx context.Context, name string, c Collector, ch chan<- prometheus.Metric, logger log.Logger) {
178+
func execute(ctx context.Context, name string, c Collector, ch chan<- prometheus.Metric, logger *slog.Logger) {
180179
begin := time.Now()
181180
err := c.Update(ctx, ch)
182181
duration := time.Since(begin)
183182
var success float64
184183

185184
if err != nil {
186185
if IsNoDataError(err) {
187-
level.Debug(logger).Log("msg", "collector returned no data", "name", name, "duration_seconds", duration.Seconds(), "err", err)
186+
logger.Debug("collector returned no data", "name", name, "duration_seconds", duration.Seconds(), "err", err)
188187
} else {
189-
level.Error(logger).Log("msg", "collector failed", "name", name, "duration_seconds", duration.Seconds(), "err", err)
188+
logger.Error("collector failed", "name", name, "duration_seconds", duration.Seconds(), "err", err)
190189
}
191190
success = 0
192191
} else {
193-
level.Debug(logger).Log("msg", "collector succeeded", "name", name, "duration_seconds", duration.Seconds())
192+
logger.Debug("collector succeeded", "name", name, "duration_seconds", duration.Seconds())
194193
success = 1
195194
}
196195
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, duration.Seconds(), name)

collector/data_stream.go

+7-8
Original file line numberDiff line numberDiff line change
@@ -17,12 +17,11 @@ import (
1717
"encoding/json"
1818
"fmt"
1919
"io"
20+
"log/slog"
2021
"net/http"
2122
"net/url"
2223
"path"
2324

24-
"github.com/go-kit/log"
25-
"github.com/go-kit/log/level"
2625
"github.com/prometheus/client_golang/prometheus"
2726
)
2827

@@ -42,15 +41,15 @@ var (
4241

4342
// DataStream Information Struct
4443
type DataStream struct {
45-
logger log.Logger
44+
logger *slog.Logger
4645
client *http.Client
4746
url *url.URL
4847

4948
dataStreamMetrics []*dataStreamMetric
5049
}
5150

5251
// NewDataStream defines DataStream Prometheus metrics
53-
func NewDataStream(logger log.Logger, client *http.Client, url *url.URL) *DataStream {
52+
func NewDataStream(logger *slog.Logger, client *http.Client, url *url.URL) *DataStream {
5453
return &DataStream{
5554
logger: logger,
5655
client: client,
@@ -106,8 +105,8 @@ func (ds *DataStream) fetchAndDecodeDataStreamStats() (DataStreamStatsResponse,
106105
defer func() {
107106
err = res.Body.Close()
108107
if err != nil {
109-
level.Warn(ds.logger).Log(
110-
"msg", "failed to close http.Client",
108+
ds.logger.Warn(
109+
"failed to close http.Client",
111110
"err", err,
112111
)
113112
}
@@ -134,8 +133,8 @@ func (ds *DataStream) Collect(ch chan<- prometheus.Metric) {
134133

135134
dataStreamStatsResp, err := ds.fetchAndDecodeDataStreamStats()
136135
if err != nil {
137-
level.Warn(ds.logger).Log(
138-
"msg", "failed to fetch and decode data stream stats",
136+
ds.logger.Warn(
137+
"failed to fetch and decode data stream stats",
139138
"err", err,
140139
)
141140
return

0 commit comments

Comments
 (0)