diff --git a/go.mod b/go.mod index ec80b3b698..7834d99349 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/dustin/go-humanize v1.0.1 github.com/efficientgo/core v1.0.0-rc.2 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb - github.com/felixge/fgprof v0.9.4 + github.com/felixge/fgprof v0.9.5 github.com/go-kit/log v0.2.1 github.com/go-openapi/strfmt v0.23.0 github.com/go-openapi/swag v0.23.0 @@ -44,9 +44,9 @@ require ( github.com/opentracing/opentracing-go v1.2.0 github.com/pkg/errors v0.9.1 github.com/prometheus/alertmanager v0.27.0 - github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_golang v1.20.2 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.55.0 + github.com/prometheus/common v0.58.0 // Prometheus maps version 2.x.y to tags v0.x.y. github.com/prometheus/prometheus v0.54.0-rc.0 github.com/segmentio/fasthash v1.0.3 @@ -55,7 +55,7 @@ require ( github.com/stretchr/testify v1.9.0 github.com/thanos-io/objstore v0.0.0-20240622095743-1afe5d4bc3cd github.com/thanos-io/promql-engine v0.0.0-20240718195911-cdbd6dfed36b - github.com/thanos-io/thanos v0.35.2-0.20240722172812-990a60b72647 + github.com/thanos-io/thanos v0.35.2-0.20240904173945-09db52562de0 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20230728070032-dd9e68f319d5 go.etcd.io/etcd/api/v3 v3.5.15 @@ -63,9 +63,9 @@ require ( go.etcd.io/etcd/client/v3 v3.5.15 go.opentelemetry.io/contrib/propagators/aws v1.29.0 go.opentelemetry.io/otel v1.29.0 - go.opentelemetry.io/otel/bridge/opentracing v1.28.0 + go.opentelemetry.io/otel/bridge/opentracing v1.29.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 go.opentelemetry.io/otel/sdk v1.29.0 go.opentelemetry.io/otel/trace v1.29.0 go.uber.org/atomic v1.11.0 @@ -148,13 +148,13 @@ require ( github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/btree v1.1.2 // indirect - github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da // indirect + github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.5 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect @@ -178,7 +178,7 @@ require ( github.com/mattn/go-isatty v0.0.20 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a // indirect - github.com/miekg/dns v1.1.61 // indirect + github.com/miekg/dns v1.1.62 // indirect github.com/minio/md5-simd v1.1.2 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect @@ -214,10 +214,10 @@ require ( go.opentelemetry.io/collector/semconv v0.105.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/contrib/propagators/autoprop v0.53.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.28.0 // indirect - go.opentelemetry.io/contrib/propagators/jaeger v1.28.0 // indirect - go.opentelemetry.io/contrib/propagators/ot v1.28.0 // indirect + go.opentelemetry.io/contrib/propagators/autoprop v0.54.0 // indirect + go.opentelemetry.io/contrib/propagators/b3 v1.29.0 // indirect + go.opentelemetry.io/contrib/propagators/jaeger v1.29.0 // indirect + go.opentelemetry.io/contrib/propagators/ot v1.29.0 // indirect go.opentelemetry.io/otel/metric v1.29.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/goleak v1.3.0 // indirect @@ -226,16 +226,16 @@ require ( go4.org/intern v0.0.0-20230525184215-6c62f75575cb // indirect go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 // indirect golang.org/x/crypto v0.26.0 // indirect - golang.org/x/mod v0.19.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/mod v0.20.0 // indirect + golang.org/x/oauth2 v0.22.0 // indirect golang.org/x/sys v0.24.0 // indirect golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.23.0 // indirect + golang.org/x/tools v0.24.0 // indirect gonum.org/v1/gonum v0.15.0 // indirect google.golang.org/api v0.188.0 // indirect google.golang.org/genproto v0.0.0-20240708141625-4ad9e859172b // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect gopkg.in/telebot.v3 v3.2.1 // indirect k8s.io/apimachinery v0.30.2 // indirect diff --git a/go.sum b/go.sum index b32d8513dd..14cf2a8c32 100644 --- a/go.sum +++ b/go.sum @@ -768,8 +768,8 @@ github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGE github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= -github.com/felixge/fgprof v0.9.4 h1:ocDNwMFlnA0NU0zSB3I52xkO4sFXk80VK9lXjLClu88= -github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= +github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY= +github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= @@ -972,8 +972,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= -github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da h1:xRmpO92tb8y+Z85iUOMOicpCfaYcv7o3Cg3wKrIpg8g= -github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA= +github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= @@ -1021,8 +1021,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= github.com/hashicorp/consul/api v1.29.2 h1:aYyRn8EdE2mSfG14S1+L9Qkjtz8RzmaWh6AcNGRNwPw= @@ -1200,8 +1200,8 @@ github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a/go.mod h1:3OETvr github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= -github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= +github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= +github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= @@ -1252,8 +1252,8 @@ github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= -github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= +github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= @@ -1310,8 +1310,8 @@ github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrb github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg= +github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1327,8 +1327,8 @@ github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+ github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.58.0 h1:N+N8vY4/23r6iYfD3UQZUoJPnUYAo7v6LG5XZxjZTXo= +github.com/prometheus/common v0.58.0/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.8.2/go.mod h1:00shzmJL7KxcsabLWcONwpyNEuWhREOnFqZW7vadFS0= @@ -1427,8 +1427,8 @@ github.com/thanos-io/objstore v0.0.0-20240622095743-1afe5d4bc3cd h1:YBDmfk3k/eOY github.com/thanos-io/objstore v0.0.0-20240622095743-1afe5d4bc3cd/go.mod h1:3ukSkG4rIRUGkKM4oIz+BSuUx2e3RlQVVv3Cc3W+Tv4= github.com/thanos-io/promql-engine v0.0.0-20240718195911-cdbd6dfed36b h1:V06gjM1OFiJydoClwiGOMCpBWLSpxa5FZBvBc3coQg4= github.com/thanos-io/promql-engine v0.0.0-20240718195911-cdbd6dfed36b/go.mod h1:Gtv7CJIxGyiGsT+bNDg4nOAsL/bVKLlpfOZUSLSyYfY= -github.com/thanos-io/thanos v0.35.2-0.20240722172812-990a60b72647 h1:0qjB7yYBB4LeGw+BWVrEsPMHabYgXjfh2pD2vkuRa9s= -github.com/thanos-io/thanos v0.35.2-0.20240722172812-990a60b72647/go.mod h1:4QL7wA5z+Uh4tE6fm4Ar+nqQKgAxWzdOWdcBBjABUvo= +github.com/thanos-io/thanos v0.35.2-0.20240904173945-09db52562de0 h1:tUAKZQYn34cqQqo9PJqmvxIbcPpfps2Biul+w1sAsOg= +github.com/thanos-io/thanos v0.35.2-0.20240904173945-09db52562de0/go.mod h1:h7Nq2a5HXu76HcYg3Ht3JeUUIDDhU7hA9tqyBZKuGuA= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= @@ -1485,24 +1485,24 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.4 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/contrib/propagators/autoprop v0.53.0 h1:4zaVLcJ5mvYw0vlk63TX62qS4qty/4jAY1BKZ1usu18= -go.opentelemetry.io/contrib/propagators/autoprop v0.53.0/go.mod h1:RPlvYtxp5D8PKnRzyPM+rwMQrvzdlfA49Sgworkg7aQ= +go.opentelemetry.io/contrib/propagators/autoprop v0.54.0 h1:h/O1OcNbqrFilsMKfG6MJWWpx8gzCDfn9D+1W7lU3lE= +go.opentelemetry.io/contrib/propagators/autoprop v0.54.0/go.mod h1:VIaPlErTgbng1UhrMA4N6Yy+f94PLA/qRPOCMATdoCs= go.opentelemetry.io/contrib/propagators/aws v1.29.0 h1:mqadbdNBhn/MVOcNx0dEZAaOaomKKdnsM0QNBmFegiI= go.opentelemetry.io/contrib/propagators/aws v1.29.0/go.mod h1:3RCUqtGbLbVr6REZv3pQbtqql9GNEpvyB7GiTJhP/nk= -go.opentelemetry.io/contrib/propagators/b3 v1.28.0 h1:XR6CFQrQ/ttAYmTBX2loUEFGdk1h17pxYI8828dk/1Y= -go.opentelemetry.io/contrib/propagators/b3 v1.28.0/go.mod h1:DWRkzJONLquRz7OJPh2rRbZ7MugQj62rk7g6HRnEqh0= -go.opentelemetry.io/contrib/propagators/jaeger v1.28.0 h1:xQ3ktSVS128JWIaN1DiPGIjcH+GsvkibIAVRWFjS9eM= -go.opentelemetry.io/contrib/propagators/jaeger v1.28.0/go.mod h1:O9HIyI2kVBrFoEwQZ0IN6PHXykGoit4mZV2aEjkTRH4= -go.opentelemetry.io/contrib/propagators/ot v1.28.0 h1:rmlG+2pc5k5M7Y7izDrxAHZUIwDERdGMTD9oMV7llMk= -go.opentelemetry.io/contrib/propagators/ot v1.28.0/go.mod h1:MNgXIn+UrMbNGpd7xyckyo2LCHIgCdmdjEE7YNZGG+w= +go.opentelemetry.io/contrib/propagators/b3 v1.29.0 h1:hNjyoRsAACnhoOLWupItUjABzeYmX3GTTZLzwJluJlk= +go.opentelemetry.io/contrib/propagators/b3 v1.29.0/go.mod h1:E76MTitU1Niwo5NSN+mVxkyLu4h4h7Dp/yh38F2WuIU= +go.opentelemetry.io/contrib/propagators/jaeger v1.29.0 h1:+YPiqF5rR6PqHBlmEFLPumbSP0gY0WmCGFayXRcCLvs= +go.opentelemetry.io/contrib/propagators/jaeger v1.29.0/go.mod h1:6PD7q7qquWSp3Z4HeM3e/2ipRubaY1rXZO8NIHVDZjs= +go.opentelemetry.io/contrib/propagators/ot v1.29.0 h1:CaJU78FvXrA6ajjp1dOdcABBEjh529+hl396RTqc2LQ= +go.opentelemetry.io/contrib/propagators/ot v1.29.0/go.mod h1:Sc0omwLb4eptUhwOAfYXfmPmErHPu2HV6vkeDge/3sY= go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= -go.opentelemetry.io/otel/bridge/opentracing v1.28.0 h1:erHvOxIUFnSXj/HuS5SqaKe2CbWSBskONXm2bEBxYgc= -go.opentelemetry.io/otel/bridge/opentracing v1.28.0/go.mod h1:ZMOFThPtIKYiVqzKrU53s41j25Cj27KySyu5Az5jRPU= +go.opentelemetry.io/otel/bridge/opentracing v1.29.0 h1:qrife6xWXoBwBeif0/9nVr+18Gq3+st7kT2iTTKbu5o= +go.opentelemetry.io/otel/bridge/opentracing v1.29.0/go.mod h1:9MckCOAmd8dHQS92890ShcIwkVz/0tF/WvnMUMd9mGQ= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 h1:dIIDULZJpgdiHz5tXrTgKIMLkus6jEFa7x5SOKcyR7E= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0/go.mod h1:jlRVBe7+Z1wyxFSUs48L6OBQZ5JwH2Hg/Vbl+t9rAgI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 h1:nSiV3s7wiCam610XcLbYOmMfJxB9gO4uK3Xgv5gmTgg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0/go.mod h1:hKn/e/Nmd19/x1gvIHwtOwVWM+VhuITSWip3JUDghj0= go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= @@ -1607,8 +1607,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1705,8 +1705,8 @@ golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1929,8 +1929,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2149,10 +2149,10 @@ google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= google.golang.org/genproto v0.0.0-20240708141625-4ad9e859172b h1:dSTjko30weBaMj3eERKc0ZVXW4GudCswM3m+P++ukU0= google.golang.org/genproto v0.0.0-20240708141625-4ad9e859172b/go.mod h1:FfBgJBJg9GcpPvKIuHSZ/aE1g2ecGL74upMzGZjiGEY= -google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d h1:kHjw/5UfflP/L5EbledDrcG4C2597RtymmGRZvHiCuY= -google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b h1:04+jVzTs2XBnOZcPsLnmrTGqltqJbZQ1Ey26hjYdQQ0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd h1:BBOTEWLuuEGQy9n1y9MhVJ9Qt0BDu21X8qZs71/uPZo= +google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:fO8wJzT2zbQbAjbIoos1285VfEIYKDDY+Dt+WpTkh6g= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go index 908f962cf2..cf76ff735a 100644 --- a/pkg/compactor/compactor_test.go +++ b/pkg/compactor/compactor_test.go @@ -263,26 +263,12 @@ func TestCompactor_ShouldDoNothingOnNoUserBlocks(t *testing.T) { "cortex_compactor_runs_started_total", "cortex_compactor_runs_completed_total", "cortex_compactor_runs_failed_total", - "cortex_compactor_garbage_collected_blocks_total", - "cortex_compactor_garbage_collection_duration_seconds", - "cortex_compactor_garbage_collection_failures_total", - "cortex_compactor_garbage_collection_total", - "cortex_compactor_meta_sync_consistency_delay_seconds", - "cortex_compactor_meta_sync_duration_seconds", - "cortex_compactor_meta_sync_failures_total", - "cortex_compactor_meta_syncs_total", - "cortex_compactor_group_compaction_runs_completed_total", - "cortex_compactor_group_compaction_runs_started_total", - "cortex_compactor_group_compactions_failures_total", - "cortex_compactor_group_compactions_total", - "cortex_compactor_group_vertical_compactions_total", "cortex_compactor_block_cleanup_failures_total", "cortex_compactor_blocks_cleaned_total", - "cortex_compactor_blocks_marked_for_deletion_total", "cortex_compactor_blocks_marked_for_no_compaction_total", + "cortex_compactor_meta_sync_consistency_delay_seconds", "cortex_compactor_block_cleanup_started_total", "cortex_compactor_block_cleanup_completed_total", - "cortex_compactor_block_cleanup_failed_total", )) } @@ -350,25 +336,10 @@ func TestCompactor_ShouldRetryCompactionOnFailureWhileDiscoveringUsersFromBucket "cortex_compactor_runs_started_total", "cortex_compactor_runs_completed_total", "cortex_compactor_runs_failed_total", - "cortex_compactor_garbage_collected_blocks_total", - "cortex_compactor_garbage_collection_duration_seconds", - "cortex_compactor_garbage_collection_failures_total", - "cortex_compactor_garbage_collection_total", - "cortex_compactor_meta_sync_consistency_delay_seconds", - "cortex_compactor_meta_sync_duration_seconds", - "cortex_compactor_meta_sync_failures_total", - "cortex_compactor_meta_syncs_total", - "cortex_compactor_group_compaction_runs_completed_total", - "cortex_compactor_group_compaction_runs_started_total", - "cortex_compactor_group_compactions_failures_total", - "cortex_compactor_group_compactions_total", - "cortex_compactor_group_vertical_compactions_total", "cortex_compactor_block_cleanup_failures_total", "cortex_compactor_blocks_cleaned_total", - "cortex_compactor_blocks_marked_for_deletion_total", "cortex_compactor_blocks_marked_for_no_compaction_total", - "cortex_compactor_block_cleanup_started_total", - "cortex_compactor_block_cleanup_completed_total", + "cortex_compactor_meta_sync_consistency_delay_seconds", "cortex_compactor_block_cleanup_failed_total", )) } @@ -572,7 +543,7 @@ func TestCompactor_ShouldIterateOverUsersAndRunCompaction(t *testing.T) { testedMetrics := []string{ "cortex_compactor_runs_started_total", "cortex_compactor_runs_completed_total", "cortex_compactor_runs_failed_total", "cortex_compactor_blocks_cleaned_total", "cortex_compactor_block_cleanup_failures_total", "cortex_compactor_blocks_marked_for_deletion_total", - "cortex_compactor_block_cleanup_started_total", "cortex_compactor_block_cleanup_completed_total", "cortex_compactor_block_cleanup_failed_total", + "cortex_compactor_block_cleanup_started_total", "cortex_compactor_block_cleanup_completed_total", "cortex_compactor_blocks_marked_for_no_compaction_total", } assert.NoError(t, prom_testutil.GatherAndCompare(registry, strings.NewReader(` @@ -698,7 +669,7 @@ func TestCompactor_ShouldNotCompactBlocksMarkedForDeletion(t *testing.T) { testedMetrics := []string{ "cortex_compactor_runs_started_total", "cortex_compactor_runs_completed_total", "cortex_compactor_runs_failed_total", "cortex_compactor_blocks_cleaned_total", "cortex_compactor_block_cleanup_failures_total", "cortex_compactor_blocks_marked_for_deletion_total", - "cortex_compactor_block_cleanup_started_total", "cortex_compactor_block_cleanup_completed_total", "cortex_compactor_block_cleanup_failed_total", + "cortex_compactor_block_cleanup_started_total", "cortex_compactor_block_cleanup_completed_total", "cortex_compactor_blocks_marked_for_no_compaction_total", } assert.NoError(t, prom_testutil.GatherAndCompare(registry, strings.NewReader(` @@ -887,9 +858,8 @@ func TestCompactor_ShouldNotCompactBlocksForUsersMarkedForDeletion(t *testing.T) // Real shipper metrics are too variable to embed into a test. testedMetrics := []string{ "cortex_compactor_runs_started_total", "cortex_compactor_runs_completed_total", "cortex_compactor_runs_failed_total", - "cortex_compactor_blocks_cleaned_total", "cortex_compactor_block_cleanup_failures_total", "cortex_compactor_blocks_marked_for_deletion_total", - "cortex_compactor_block_cleanup_started_total", "cortex_compactor_block_cleanup_completed_total", "cortex_compactor_block_cleanup_failed_total", - "cortex_bucket_blocks_count", "cortex_bucket_blocks_marked_for_deletion_count", "cortex_bucket_index_last_successful_update_timestamp_seconds", + "cortex_compactor_blocks_cleaned_total", "cortex_compactor_block_cleanup_failures_total", + "cortex_compactor_block_cleanup_started_total", "cortex_compactor_block_cleanup_completed_total", "cortex_compactor_blocks_marked_for_no_compaction_total", } assert.NoError(t, prom_testutil.GatherAndCompare(registry, strings.NewReader(` @@ -2081,7 +2051,6 @@ func TestCompactor_FailedWithRetriableError(t *testing.T) { # TYPE cortex_compactor_compaction_error_total counter cortex_compactor_compaction_error_total{type="retriable", user="user-1"} 2 `), - "cortex_compactor_compaction_retry_error_total", "cortex_compactor_compaction_error_total", )) } @@ -2135,7 +2104,6 @@ func TestCompactor_FailedWithHaltError(t *testing.T) { # TYPE cortex_compactor_compaction_error_total counter cortex_compactor_compaction_error_total{type="halt", user="user-1"} 1 `), - "cortex_compactor_compaction_retry_error_total", "cortex_compactor_compaction_error_total", )) } diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 15ed3cad20..e5eda834ca 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -404,21 +404,25 @@ func TestDistributor_MetricsCleanup(t *testing.T) { d := dists[0] reg := regs[0] - metrics := []string{ + permanentMetrics := []string{ "cortex_distributor_received_samples_total", "cortex_distributor_received_exemplars_total", "cortex_distributor_received_metadata_total", - "cortex_distributor_deduped_samples_total", "cortex_distributor_samples_in_total", - "cortex_distributor_exemplars_in_total", - "cortex_distributor_metadata_in_total", - "cortex_distributor_non_ha_samples_received_total", - "cortex_distributor_latest_seen_sample_timestamp_seconds", "cortex_distributor_ingester_append_failures_total", "cortex_distributor_ingester_appends_total", "cortex_distributor_ingester_query_failures_total", "cortex_distributor_ingester_queries_total", } + removedMetrics := []string{ + "cortex_distributor_deduped_samples_total", + "cortex_distributor_exemplars_in_total", + "cortex_distributor_metadata_in_total", + "cortex_distributor_non_ha_samples_received_total", + "cortex_distributor_latest_seen_sample_timestamp_seconds", + } + + allMetrics := append(removedMetrics, permanentMetrics...) d.receivedSamples.WithLabelValues("userA", sampleMetricTypeFloat).Add(5) d.receivedSamples.WithLabelValues("userB", sampleMetricTypeFloat).Add(10) @@ -505,7 +509,7 @@ func TestDistributor_MetricsCleanup(t *testing.T) { # TYPE cortex_distributor_ingester_query_failures_total counter cortex_distributor_ingester_query_failures_total{ingester="ingester-0"} 1 cortex_distributor_ingester_query_failures_total{ingester="ingester-1"} 1 - `), metrics...)) + `), allMetrics...)) d.cleanupInactiveUser("userA") @@ -524,18 +528,6 @@ func TestDistributor_MetricsCleanup(t *testing.T) { d.cleanStaleIngesterMetrics() require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(` - # HELP cortex_distributor_deduped_samples_total The total number of deduplicated samples. - # TYPE cortex_distributor_deduped_samples_total counter - - # HELP cortex_distributor_latest_seen_sample_timestamp_seconds Unix timestamp of latest received sample per user. - # TYPE cortex_distributor_latest_seen_sample_timestamp_seconds gauge - - # HELP cortex_distributor_metadata_in_total The total number of metadata the have come in to the distributor, including rejected. - # TYPE cortex_distributor_metadata_in_total counter - - # HELP cortex_distributor_non_ha_samples_received_total The total number of received samples for a user that has HA tracking turned on, but the sample didn't contain both HA labels. - # TYPE cortex_distributor_non_ha_samples_received_total counter - # HELP cortex_distributor_received_metadata_total The total number of received metadata, excluding rejected. # TYPE cortex_distributor_received_metadata_total counter cortex_distributor_received_metadata_total{user="userB"} 10 @@ -553,9 +545,6 @@ func TestDistributor_MetricsCleanup(t *testing.T) { # TYPE cortex_distributor_received_exemplars_total counter cortex_distributor_received_exemplars_total{user="userB"} 10 - # HELP cortex_distributor_exemplars_in_total The total number of exemplars that have come in to the distributor, including rejected or deduped exemplars. - # TYPE cortex_distributor_exemplars_in_total counter - # HELP cortex_distributor_ingester_append_failures_total The total number of failed batch appends sent to ingesters. # TYPE cortex_distributor_ingester_append_failures_total counter cortex_distributor_ingester_append_failures_total{ingester="ingester-1",status="2xx",type="metadata"} 1 @@ -568,7 +557,11 @@ func TestDistributor_MetricsCleanup(t *testing.T) { # HELP cortex_distributor_ingester_query_failures_total The total number of failed queries sent to ingesters. # TYPE cortex_distributor_ingester_query_failures_total counter cortex_distributor_ingester_query_failures_total{ingester="ingester-1"} 1 - `), metrics...)) + `), permanentMetrics...)) + + err = testutil.GatherAndCompare(reg, strings.NewReader(""), removedMetrics...) + require.ErrorContains(t, err, "expected metric name(s) not found") + require.ErrorContains(t, err, strings.Join(removedMetrics, " ")) } func TestDistributor_PushIngestionRateLimiter(t *testing.T) { diff --git a/pkg/frontend/v1/frontend_test.go b/pkg/frontend/v1/frontend_test.go index 9ae8e776c7..766d54f07f 100644 --- a/pkg/frontend/v1/frontend_test.go +++ b/pkg/frontend/v1/frontend_test.go @@ -216,10 +216,7 @@ func TestFrontendMetricsCleanup(t *testing.T) { fr.cleanupInactiveUserMetrics("1") - require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(` - # HELP cortex_query_frontend_queue_length Number of queries in the queue. - # TYPE cortex_query_frontend_queue_length gauge - `), "cortex_query_frontend_queue_length")) + require.ErrorContains(t, testutil.GatherAndCompare(reg, strings.NewReader(""), "cortex_query_frontend_queue_length"), "expected metric name(s) not found") } testFrontend(t, defaultFrontendConfig(), handler, test, matchMaxConcurrency, nil, reg) diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index e574a0d81a..c631db582b 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -185,7 +185,7 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { # TYPE cortex_ingester_usage_per_labelset gauge cortex_ingester_usage_per_labelset{labelset="{label1=\"value1\"}",limit="max_series",user="1"} 3 cortex_ingester_usage_per_labelset{labelset="{label2=\"value2\"}",limit="max_series",user="1"} 2 - `), "cortex_ingester_usage_per_labelset", "cortex_ingester_limits_per_labelset", "cortex_discarded_samples_total")) + `), "cortex_ingester_usage_per_labelset", "cortex_ingester_limits_per_labelset")) // Should impose limits for _, set := range limits.LimitsPerLabelSet { @@ -653,8 +653,6 @@ func TestIngester_Push(t *testing.T) { "cortex_ingester_memory_users", "cortex_ingester_memory_series_created_total", "cortex_ingester_memory_series_removed_total", - "cortex_discarded_samples_total", - "cortex_ingester_active_series", } userID := "test" @@ -695,7 +693,7 @@ func TestIngester_Push(t *testing.T) { expectedMetadataIngested: []*cortexpb.MetricMetadata{ {MetricFamilyName: "metric_name_2", Help: "a help for metric_name_2", Unit: "", Type: cortexpb.GAUGE}, }, - additionalMetrics: []string{}, + additionalMetrics: []string{"cortex_discarded_samples_total", "cortex_ingester_active_series"}, disableNativeHistogram: true, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. @@ -757,6 +755,7 @@ func TestIngester_Push(t *testing.T) { "cortex_ingester_memory_metadata_created_total", "cortex_ingester_ingested_metadata_total", "cortex_ingester_ingested_metadata_failures_total", + "cortex_ingester_active_series", }, expectedMetrics: ` # HELP cortex_ingester_ingested_metadata_failures_total The total number of metadata that errored on ingestion. @@ -854,6 +853,7 @@ func TestIngester_Push(t *testing.T) { "cortex_ingester_tsdb_exemplar_series_with_exemplars_in_storage", "cortex_ingester_tsdb_exemplar_last_exemplars_timestamp_seconds", "cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total", + "cortex_ingester_active_series", }, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. @@ -964,6 +964,8 @@ func TestIngester_Push(t *testing.T) { additionalMetrics: []string{ "cortex_ingester_tsdb_out_of_order_samples_total", "cortex_ingester_tsdb_head_out_of_order_samples_appended_total", + "cortex_discarded_samples_total", + "cortex_ingester_active_series", }, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. @@ -1020,6 +1022,7 @@ func TestIngester_Push(t *testing.T) { expectedIngested: []cortexpb.TimeSeries{ {Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 2, TimestampMs: 1575043969}}}, }, + additionalMetrics: []string{"cortex_ingester_active_series"}, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. # TYPE cortex_ingester_ingested_samples_total counter @@ -1067,6 +1070,10 @@ func TestIngester_Push(t *testing.T) { expectedIngested: []cortexpb.TimeSeries{ {Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 2, TimestampMs: 1575043969}}}, }, + additionalMetrics: []string{ + "cortex_discarded_samples_total", + "cortex_ingester_active_series", + }, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. # TYPE cortex_ingester_ingested_samples_total counter @@ -1113,6 +1120,7 @@ func TestIngester_Push(t *testing.T) { expectedIngested: []cortexpb.TimeSeries{ {Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 1, TimestampMs: 1575043969 - (60 * 1000)}, {Value: 2, TimestampMs: 1575043969}}}, }, + additionalMetrics: []string{"cortex_ingester_active_series"}, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. # TYPE cortex_ingester_ingested_samples_total counter @@ -1156,6 +1164,7 @@ func TestIngester_Push(t *testing.T) { expectedIngested: []cortexpb.TimeSeries{ {Labels: metricLabelAdapters, Samples: []cortexpb.Sample{{Value: 2, TimestampMs: 1575043969}}}, }, + additionalMetrics: []string{"cortex_discarded_samples_total", "cortex_ingester_active_series"}, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. # TYPE cortex_ingester_ingested_samples_total counter @@ -1214,6 +1223,7 @@ func TestIngester_Push(t *testing.T) { "cortex_ingester_tsdb_exemplar_series_with_exemplars_in_storage", "cortex_ingester_tsdb_exemplar_last_exemplars_timestamp_seconds", "cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total", + "cortex_ingester_active_series", }, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. @@ -1274,6 +1284,7 @@ func TestIngester_Push(t *testing.T) { }, additionalMetrics: []string{ "cortex_ingester_tsdb_head_samples_appended_total", + "cortex_ingester_active_series", }, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. @@ -1323,6 +1334,7 @@ func TestIngester_Push(t *testing.T) { }, additionalMetrics: []string{ "cortex_ingester_tsdb_head_samples_appended_total", + "cortex_ingester_active_series", }, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. @@ -1373,6 +1385,7 @@ func TestIngester_Push(t *testing.T) { additionalMetrics: []string{ "cortex_ingester_tsdb_head_samples_appended_total", "cortex_ingester_tsdb_out_of_order_samples_total", + "cortex_ingester_active_series", }, expectedMetrics: ` # HELP cortex_ingester_ingested_samples_total The total number of samples ingested. @@ -4454,8 +4467,10 @@ func TestIngesterCompactAndCloseIdleTSDB(t *testing.T) { require.Equal(t, int64(1), i.TSDBState.seriesCount.Load()) - metricsToCheck := []string{memSeriesCreatedTotalName, memSeriesRemovedTotalName, "cortex_ingester_memory_users", "cortex_ingester_active_series", - "cortex_ingester_memory_metadata", "cortex_ingester_memory_metadata_created_total", "cortex_ingester_memory_metadata_removed_total"} + userMetrics := []string{memSeriesCreatedTotalName, memSeriesRemovedTotalName, "cortex_ingester_active_series"} + + globalMetrics := []string{"cortex_ingester_memory_users", "cortex_ingester_memory_metadata"} + metricsToCheck := append(userMetrics, globalMetrics...) require.NoError(t, testutil.GatherAndCompare(r, strings.NewReader(` # HELP cortex_ingester_memory_series_created_total The total number of series that were created per user. @@ -4495,24 +4510,19 @@ func TestIngesterCompactAndCloseIdleTSDB(t *testing.T) { require.Equal(t, int64(0), i.TSDBState.seriesCount.Load()) // Flushing removed all series from memory. // Verify that user has disappeared from metrics. - require.NoError(t, testutil.GatherAndCompare(r, strings.NewReader(` - # HELP cortex_ingester_memory_series_created_total The total number of series that were created per user. - # TYPE cortex_ingester_memory_series_created_total counter - - # HELP cortex_ingester_memory_series_removed_total The total number of series that were removed per user. - # TYPE cortex_ingester_memory_series_removed_total counter + err = testutil.GatherAndCompare(r, strings.NewReader(""), userMetrics...) + require.ErrorContains(t, err, "expected metric name(s) not found") + require.ErrorContains(t, err, strings.Join(userMetrics, " ")) + require.NoError(t, testutil.GatherAndCompare(r, strings.NewReader(` # HELP cortex_ingester_memory_users The current number of users in memory. # TYPE cortex_ingester_memory_users gauge cortex_ingester_memory_users 0 - # HELP cortex_ingester_active_series Number of currently active series per user. - # TYPE cortex_ingester_active_series gauge - # HELP cortex_ingester_memory_metadata The current number of metadata in memory. # TYPE cortex_ingester_memory_metadata gauge cortex_ingester_memory_metadata 0 - `), metricsToCheck...)) + `), "cortex_ingester_memory_users", "cortex_ingester_memory_metadata")) // Pushing another sample will recreate TSDB. pushSingleSampleWithMetadata(t, i) diff --git a/pkg/storegateway/bucket_stores_test.go b/pkg/storegateway/bucket_stores_test.go index d3efcfc112..cd6baa146a 100644 --- a/pkg/storegateway/bucket_stores_test.go +++ b/pkg/storegateway/bucket_stores_test.go @@ -699,19 +699,20 @@ func TestBucketStores_deleteLocalFilesForExcludedTenants(t *testing.T) { require.NoError(t, stores.InitialSync(ctx)) require.Equal(t, []string{user1, user2}, getUsersInDir(t, cfg.BucketStore.SyncDir)) - metricNames := []string{"cortex_bucket_store_block_drops_total", "cortex_bucket_store_block_loads_total", "cortex_bucket_store_blocks_loaded"} + metricNamesWithoutLoaded := []string{"cortex_bucket_store_block_drops_total", "cortex_bucket_store_block_loads_total"} + metricNames := append(metricNamesWithoutLoaded, "cortex_bucket_store_blocks_loaded") require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(` - # HELP cortex_bucket_store_block_drops_total Total number of local blocks that were dropped. - # TYPE cortex_bucket_store_block_drops_total counter - cortex_bucket_store_block_drops_total 0 - # HELP cortex_bucket_store_block_loads_total Total number of remote block loading attempts. - # TYPE cortex_bucket_store_block_loads_total counter - cortex_bucket_store_block_loads_total 2 - # HELP cortex_bucket_store_blocks_loaded Number of currently loaded blocks. - # TYPE cortex_bucket_store_blocks_loaded gauge - cortex_bucket_store_blocks_loaded{user="user-1"} 1 - cortex_bucket_store_blocks_loaded{user="user-2"} 1 + # HELP cortex_bucket_store_block_drops_total Total number of local blocks that were dropped. + # TYPE cortex_bucket_store_block_drops_total counter + cortex_bucket_store_block_drops_total 0 + # HELP cortex_bucket_store_block_loads_total Total number of remote block loading attempts. + # TYPE cortex_bucket_store_block_loads_total counter + cortex_bucket_store_block_loads_total 2 + # HELP cortex_bucket_store_blocks_loaded Number of currently loaded blocks. + # TYPE cortex_bucket_store_blocks_loaded gauge + cortex_bucket_store_blocks_loaded{user="user-1"} 1 + cortex_bucket_store_blocks_loaded{user="user-2"} 1 `), metricNames...)) // Single user left in shard. @@ -720,15 +721,15 @@ func TestBucketStores_deleteLocalFilesForExcludedTenants(t *testing.T) { require.Equal(t, []string{user1}, getUsersInDir(t, cfg.BucketStore.SyncDir)) require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(` - # HELP cortex_bucket_store_block_drops_total Total number of local blocks that were dropped. - # TYPE cortex_bucket_store_block_drops_total counter - cortex_bucket_store_block_drops_total 1 - # HELP cortex_bucket_store_block_loads_total Total number of remote block loading attempts. - # TYPE cortex_bucket_store_block_loads_total counter - cortex_bucket_store_block_loads_total 2 - # HELP cortex_bucket_store_blocks_loaded Number of currently loaded blocks. - # TYPE cortex_bucket_store_blocks_loaded gauge - cortex_bucket_store_blocks_loaded{user="user-1"} 1 + # HELP cortex_bucket_store_block_drops_total Total number of local blocks that were dropped. + # TYPE cortex_bucket_store_block_drops_total counter + cortex_bucket_store_block_drops_total 1 + # HELP cortex_bucket_store_block_loads_total Total number of remote block loading attempts. + # TYPE cortex_bucket_store_block_loads_total counter + cortex_bucket_store_block_loads_total 2 + # HELP cortex_bucket_store_blocks_loaded Number of currently loaded blocks. + # TYPE cortex_bucket_store_blocks_loaded gauge + cortex_bucket_store_blocks_loaded{user="user-1"} 1 `), metricNames...)) // No users left in this shard. @@ -737,13 +738,13 @@ func TestBucketStores_deleteLocalFilesForExcludedTenants(t *testing.T) { require.Equal(t, []string(nil), getUsersInDir(t, cfg.BucketStore.SyncDir)) require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(` - # HELP cortex_bucket_store_block_drops_total Total number of local blocks that were dropped. - # TYPE cortex_bucket_store_block_drops_total counter - cortex_bucket_store_block_drops_total 2 - # HELP cortex_bucket_store_block_loads_total Total number of remote block loading attempts. - # TYPE cortex_bucket_store_block_loads_total counter - cortex_bucket_store_block_loads_total 2 - `), metricNames...)) + # HELP cortex_bucket_store_block_drops_total Total number of local blocks that were dropped. + # TYPE cortex_bucket_store_block_drops_total counter + cortex_bucket_store_block_drops_total 2 + # HELP cortex_bucket_store_block_loads_total Total number of remote block loading attempts. + # TYPE cortex_bucket_store_block_loads_total counter + cortex_bucket_store_block_loads_total 2 + `), metricNamesWithoutLoaded...)) // We can always get user back. sharding.users = []string{user1} @@ -751,15 +752,15 @@ func TestBucketStores_deleteLocalFilesForExcludedTenants(t *testing.T) { require.Equal(t, []string{user1}, getUsersInDir(t, cfg.BucketStore.SyncDir)) require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(` - # HELP cortex_bucket_store_block_drops_total Total number of local blocks that were dropped. - # TYPE cortex_bucket_store_block_drops_total counter - cortex_bucket_store_block_drops_total 2 - # HELP cortex_bucket_store_block_loads_total Total number of remote block loading attempts. - # TYPE cortex_bucket_store_block_loads_total counter - cortex_bucket_store_block_loads_total 3 - # HELP cortex_bucket_store_blocks_loaded Number of currently loaded blocks. - # TYPE cortex_bucket_store_blocks_loaded gauge - cortex_bucket_store_blocks_loaded{user="user-1"} 1 + # HELP cortex_bucket_store_block_drops_total Total number of local blocks that were dropped. + # TYPE cortex_bucket_store_block_drops_total counter + cortex_bucket_store_block_drops_total 2 + # HELP cortex_bucket_store_block_loads_total Total number of remote block loading attempts. + # TYPE cortex_bucket_store_block_loads_total counter + cortex_bucket_store_block_loads_total 3 + # HELP cortex_bucket_store_blocks_loaded Number of currently loaded blocks. + # TYPE cortex_bucket_store_blocks_loaded gauge + cortex_bucket_store_blocks_loaded{user="user-1"} 1 `), metricNames...)) } diff --git a/vendor/github.com/felixge/fgprof/fgprof.go b/vendor/github.com/felixge/fgprof/fgprof.go index e4af2e4052..459787b6f6 100644 --- a/vendor/github.com/felixge/fgprof/fgprof.go +++ b/vendor/github.com/felixge/fgprof/fgprof.go @@ -81,6 +81,11 @@ type profiler struct { selfFrame *runtime.Frame } +// nullTerminationWorkaround deals with a regression in go1.23, see: +// - https://github.com/felixge/fgprof/issues/33 +// - https://go-review.googlesource.com/c/go/+/609815 +var nullTerminationWorkaround = runtime.Version() == "go1.23.0" + // GoroutineProfile returns the stacks of all goroutines currently managed by // the scheduler. This includes both goroutines that are currently running // (On-CPU), as well as waiting (Off-CPU). @@ -107,6 +112,11 @@ func (p *profiler) GoroutineProfile() []runtime.StackRecord { // p.stacks dynamically as well, but let's not over-engineer this until we // understand those cases better. for { + if nullTerminationWorkaround { + for i := range p.stacks { + p.stacks[i].Stack0 = [32]uintptr{} + } + } n, ok := runtime.GoroutineProfile(p.stacks) if !ok { p.stacks = make([]runtime.StackRecord, int(float64(n)*1.1)) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel index 78d7c9f5c8..a65d88eb86 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel @@ -73,7 +73,7 @@ go_test( "@org_golang_google_genproto_googleapis_api//httpbody", "@org_golang_google_genproto_googleapis_rpc//errdetails", "@org_golang_google_genproto_googleapis_rpc//status", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//health/grpc_health_v1", "@org_golang_google_grpc//metadata", diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go index 5dd4e44786..2f2b342431 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go @@ -49,6 +49,7 @@ var malformedHTTPHeaders = map[string]struct{}{ type ( rpcMethodKey struct{} httpPathPatternKey struct{} + httpPatternKey struct{} AnnotateContextOption func(ctx context.Context) context.Context ) @@ -404,3 +405,13 @@ func HTTPPathPattern(ctx context.Context) (string, bool) { func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context { return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern) } + +// HTTPPattern returns the HTTP path pattern struct relating to the HTTP handler, if one exists. +func HTTPPattern(ctx context.Context) (Pattern, bool) { + v, ok := ctx.Value(httpPatternKey{}).(Pattern) + return v, ok +} + +func withHTTPPattern(ctx context.Context, httpPattern Pattern) context.Context { + return context.WithValue(ctx, httpPatternKey{}, httpPattern) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go index 5682998699..01f5734191 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -93,6 +93,7 @@ func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.R func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { // return Internal when Marshal failed const fallback = `{"code": 13, "message": "failed to marshal error message"}` + const fallbackRewriter = `{"code": 13, "message": "failed to rewrite error message"}` var customStatus *HTTPStatusError if errors.As(err, &customStatus) { @@ -100,19 +101,28 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh } s := status.Convert(err) - pb := s.Proto() w.Header().Del("Trailer") w.Header().Del("Transfer-Encoding") - contentType := marshaler.ContentType(pb) + respRw, err := mux.forwardResponseRewriter(ctx, s.Proto()) + if err != nil { + grpclog.Errorf("Failed to rewrite error message %q: %v", s, err) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallbackRewriter); err != nil { + grpclog.Errorf("Failed to write response: %v", err) + } + return + } + + contentType := marshaler.ContentType(respRw) w.Header().Set("Content-Type", contentType) if s.Code() == codes.Unauthenticated { w.Header().Set("WWW-Authenticate", s.Message()) } - buf, merr := marshaler.Marshal(pb) + buf, merr := marshaler.Marshal(respRw) if merr != nil { grpclog.Errorf("Failed to marshal error message %q: %v", s, merr) w.WriteHeader(http.StatusInternalServerError) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go index de1eef1f4f..9f50a569e9 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -3,6 +3,7 @@ package runtime import ( "context" "errors" + "fmt" "io" "net/http" "net/textproto" @@ -55,20 +56,27 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal return } + respRw, err := mux.forwardResponseRewriter(ctx, resp) + if err != nil { + grpclog.Errorf("Rewrite error: %v", err) + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + if !wroteHeader { - w.Header().Set("Content-Type", marshaler.ContentType(resp)) + w.Header().Set("Content-Type", marshaler.ContentType(respRw)) } var buf []byte - httpBody, isHTTPBody := resp.(*httpbody.HttpBody) + httpBody, isHTTPBody := respRw.(*httpbody.HttpBody) switch { - case resp == nil: + case respRw == nil: buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response"))) case isHTTPBody: buf = httpBody.GetData() default: - result := map[string]interface{}{"result": resp} - if rb, ok := resp.(responseBody); ok { + result := map[string]interface{}{"result": respRw} + if rb, ok := respRw.(responseBody); ok { result["result"] = rb.XXX_ResponseBody() } @@ -164,12 +172,17 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha HTTPError(ctx, mux, marshaler, w, req, err) return } + respRw, err := mux.forwardResponseRewriter(ctx, resp) + if err != nil { + grpclog.Errorf("Rewrite error: %v", err) + HTTPError(ctx, mux, marshaler, w, req, err) + return + } var buf []byte - var err error - if rb, ok := resp.(responseBody); ok { + if rb, ok := respRw.(responseBody); ok { buf, err = marshaler.Marshal(rb.XXX_ResponseBody()) } else { - buf, err = marshaler.Marshal(resp) + buf, err = marshaler.Marshal(respRw) } if err != nil { grpclog.Errorf("Marshal error: %v", err) @@ -201,8 +214,7 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re } for _, opt := range opts { if err := opt(ctx, w, resp); err != nil { - grpclog.Errorf("Error handling ForwardResponseOptions: %v", err) - return err + return fmt.Errorf("error handling ForwardResponseOptions: %w", err) } } return nil diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go index ed9a7e4387..60c2065ddc 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go @@ -48,12 +48,19 @@ var encodedPathSplitter = regexp.MustCompile("(/|%2F)") // A HandlerFunc handles a specific pair of path pattern and HTTP method. type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) +// A Middleware handler wraps another HandlerFunc to do some pre- and/or post-processing of the request. This is used as an alternative to gRPC interceptors when using the direct-to-implementation +// registration methods. It is generally recommended to use gRPC client or server interceptors instead +// where possible. +type Middleware func(HandlerFunc) HandlerFunc + // ServeMux is a request multiplexer for grpc-gateway. // It matches http requests to patterns and invokes the corresponding handler. type ServeMux struct { // handlers maps HTTP method to a list of handlers. handlers map[string][]handler + middlewares []Middleware forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error + forwardResponseRewriter ForwardResponseRewriter marshalers marshalerRegistry incomingHeaderMatcher HeaderMatcherFunc outgoingHeaderMatcher HeaderMatcherFunc @@ -69,6 +76,24 @@ type ServeMux struct { // ServeMuxOption is an option that can be given to a ServeMux on construction. type ServeMuxOption func(*ServeMux) +// ForwardResponseRewriter is the signature of a function that is capable of rewriting messages +// before they are forwarded in a unary, stream, or error response. +type ForwardResponseRewriter func(ctx context.Context, response proto.Message) (any, error) + +// WithForwardResponseRewriter returns a ServeMuxOption that allows for implementers to insert logic +// that can rewrite the final response before it is forwarded. +// +// The response rewriter function is called during unary message forwarding, stream message +// forwarding and when errors are being forwarded. +// +// NOTE: Using this option will likely make what is generated by `protoc-gen-openapiv2` incorrect. +// Since this option involves making runtime changes to the response shape or type. +func WithForwardResponseRewriter(fwdResponseRewriter ForwardResponseRewriter) ServeMuxOption { + return func(sm *ServeMux) { + sm.forwardResponseRewriter = fwdResponseRewriter + } +} + // WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. // // forwardResponseOption is an option that will be called on the relevant context.Context, @@ -89,6 +114,15 @@ func WithUnescapingMode(mode UnescapingMode) ServeMuxOption { } } +// WithMiddlewares sets server middleware for all handlers. This is useful as an alternative to gRPC +// interceptors when using the direct-to-implementation registration methods and cannot rely +// on gRPC interceptors. It's recommended to use gRPC interceptors instead if possible. +func WithMiddlewares(middlewares ...Middleware) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.middlewares = append(serveMux.middlewares, middlewares...) + } +} + // SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters. // Configuring this will mean the generated OpenAPI output is no longer correct, and it should be // done with careful consideration. @@ -277,13 +311,14 @@ func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMux // NewServeMux returns a new ServeMux whose internal mapping is empty. func NewServeMux(opts ...ServeMuxOption) *ServeMux { serveMux := &ServeMux{ - handlers: make(map[string][]handler), - forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), - marshalers: makeMarshalerMIMERegistry(), - errorHandler: DefaultHTTPErrorHandler, - streamErrorHandler: DefaultStreamErrorHandler, - routingErrorHandler: DefaultRoutingErrorHandler, - unescapingMode: UnescapingModeDefault, + handlers: make(map[string][]handler), + forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), + forwardResponseRewriter: func(ctx context.Context, response proto.Message) (any, error) { return response, nil }, + marshalers: makeMarshalerMIMERegistry(), + errorHandler: DefaultHTTPErrorHandler, + streamErrorHandler: DefaultStreamErrorHandler, + routingErrorHandler: DefaultRoutingErrorHandler, + unescapingMode: UnescapingModeDefault, } for _, opt := range opts { @@ -305,6 +340,9 @@ func NewServeMux(opts ...ServeMuxOption) *ServeMux { // Handle associates "h" to the pair of HTTP method and path pattern. func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { + if len(s.middlewares) > 0 { + h = chainMiddlewares(s.middlewares)(h) + } s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...) } @@ -405,7 +443,7 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { } continue } - h.h(w, r, pathParams) + s.handleHandler(h, w, r, pathParams) return } @@ -458,7 +496,7 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr) return } - h.h(w, r, pathParams) + s.handleHandler(h, w, r, pathParams) return } _, outboundMarshaler := MarshalerForRequest(s, r) @@ -484,3 +522,16 @@ type handler struct { pat Pattern h HandlerFunc } + +func (s *ServeMux) handleHandler(h handler, w http.ResponseWriter, r *http.Request, pathParams map[string]string) { + h.h(w, r.WithContext(withHTTPPattern(r.Context(), h.pat)), pathParams) +} + +func chainMiddlewares(mws []Middleware) Middleware { + return func(next HandlerFunc) HandlerFunc { + for i := len(mws); i > 0; i-- { + next = mws[i-1](next) + } + return next + } +} diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md index 10ddda1427..8d5a2a4789 100644 --- a/vendor/github.com/miekg/dns/README.md +++ b/vendor/github.com/miekg/dns/README.md @@ -148,6 +148,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository. * 3225 - DO bit (DNSSEC OK) * 340{1,2,3} - NAPTR record * 3445 - Limiting the scope of (DNS)KEY +* 3596 - AAAA record * 3597 - Unknown RRs * 4025 - A Method for Storing IPsec Keying Material in DNS * 403{3,4,5} - DNSSEC + validation functions diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go index 1b58e8f0aa..c1bbdaae2e 100644 --- a/vendor/github.com/miekg/dns/edns.go +++ b/vendor/github.com/miekg/dns/edns.go @@ -756,36 +756,48 @@ const ( ExtendedErrorCodeNoReachableAuthority ExtendedErrorCodeNetworkError ExtendedErrorCodeInvalidData + ExtendedErrorCodeSignatureExpiredBeforeValid + ExtendedErrorCodeTooEarly + ExtendedErrorCodeUnsupportedNSEC3IterValue + ExtendedErrorCodeUnableToConformToPolicy + ExtendedErrorCodeSynthesized + ExtendedErrorCodeInvalidQueryType ) // ExtendedErrorCodeToString maps extended error info codes to a human readable // description. var ExtendedErrorCodeToString = map[uint16]string{ - ExtendedErrorCodeOther: "Other", - ExtendedErrorCodeUnsupportedDNSKEYAlgorithm: "Unsupported DNSKEY Algorithm", - ExtendedErrorCodeUnsupportedDSDigestType: "Unsupported DS Digest Type", - ExtendedErrorCodeStaleAnswer: "Stale Answer", - ExtendedErrorCodeForgedAnswer: "Forged Answer", - ExtendedErrorCodeDNSSECIndeterminate: "DNSSEC Indeterminate", - ExtendedErrorCodeDNSBogus: "DNSSEC Bogus", - ExtendedErrorCodeSignatureExpired: "Signature Expired", - ExtendedErrorCodeSignatureNotYetValid: "Signature Not Yet Valid", - ExtendedErrorCodeDNSKEYMissing: "DNSKEY Missing", - ExtendedErrorCodeRRSIGsMissing: "RRSIGs Missing", - ExtendedErrorCodeNoZoneKeyBitSet: "No Zone Key Bit Set", - ExtendedErrorCodeNSECMissing: "NSEC Missing", - ExtendedErrorCodeCachedError: "Cached Error", - ExtendedErrorCodeNotReady: "Not Ready", - ExtendedErrorCodeBlocked: "Blocked", - ExtendedErrorCodeCensored: "Censored", - ExtendedErrorCodeFiltered: "Filtered", - ExtendedErrorCodeProhibited: "Prohibited", - ExtendedErrorCodeStaleNXDOMAINAnswer: "Stale NXDOMAIN Answer", - ExtendedErrorCodeNotAuthoritative: "Not Authoritative", - ExtendedErrorCodeNotSupported: "Not Supported", - ExtendedErrorCodeNoReachableAuthority: "No Reachable Authority", - ExtendedErrorCodeNetworkError: "Network Error", - ExtendedErrorCodeInvalidData: "Invalid Data", + ExtendedErrorCodeOther: "Other", + ExtendedErrorCodeUnsupportedDNSKEYAlgorithm: "Unsupported DNSKEY Algorithm", + ExtendedErrorCodeUnsupportedDSDigestType: "Unsupported DS Digest Type", + ExtendedErrorCodeStaleAnswer: "Stale Answer", + ExtendedErrorCodeForgedAnswer: "Forged Answer", + ExtendedErrorCodeDNSSECIndeterminate: "DNSSEC Indeterminate", + ExtendedErrorCodeDNSBogus: "DNSSEC Bogus", + ExtendedErrorCodeSignatureExpired: "Signature Expired", + ExtendedErrorCodeSignatureNotYetValid: "Signature Not Yet Valid", + ExtendedErrorCodeDNSKEYMissing: "DNSKEY Missing", + ExtendedErrorCodeRRSIGsMissing: "RRSIGs Missing", + ExtendedErrorCodeNoZoneKeyBitSet: "No Zone Key Bit Set", + ExtendedErrorCodeNSECMissing: "NSEC Missing", + ExtendedErrorCodeCachedError: "Cached Error", + ExtendedErrorCodeNotReady: "Not Ready", + ExtendedErrorCodeBlocked: "Blocked", + ExtendedErrorCodeCensored: "Censored", + ExtendedErrorCodeFiltered: "Filtered", + ExtendedErrorCodeProhibited: "Prohibited", + ExtendedErrorCodeStaleNXDOMAINAnswer: "Stale NXDOMAIN Answer", + ExtendedErrorCodeNotAuthoritative: "Not Authoritative", + ExtendedErrorCodeNotSupported: "Not Supported", + ExtendedErrorCodeNoReachableAuthority: "No Reachable Authority", + ExtendedErrorCodeNetworkError: "Network Error", + ExtendedErrorCodeInvalidData: "Invalid Data", + ExtendedErrorCodeSignatureExpiredBeforeValid: "Signature Expired Before Valid", + ExtendedErrorCodeTooEarly: "Too Early", + ExtendedErrorCodeUnsupportedNSEC3IterValue: "Unsupported NSEC3 Iterations Value", + ExtendedErrorCodeUnableToConformToPolicy: "Unable To Conform To Policy", + ExtendedErrorCodeSynthesized: "Synthesized", + ExtendedErrorCodeInvalidQueryType: "Invalid Query Type", } // StringToExtendedErrorCode is a map from human readable descriptions to diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go index 8e3129cbd2..7a34c14ca0 100644 --- a/vendor/github.com/miekg/dns/types.go +++ b/vendor/github.com/miekg/dns/types.go @@ -96,6 +96,7 @@ const ( TypeLP uint16 = 107 TypeEUI48 uint16 = 108 TypeEUI64 uint16 = 109 + TypeNXNAME uint16 = 128 TypeURI uint16 = 256 TypeCAA uint16 = 257 TypeAVC uint16 = 258 @@ -294,6 +295,19 @@ func (*NULL) parse(c *zlexer, origin string) *ParseError { return &ParseError{err: "NULL records do not have a presentation format"} } +// NXNAME is a meta record. See https://www.iana.org/go/draft-ietf-dnsop-compact-denial-of-existence-04 +// Reference: https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml +type NXNAME struct { + Hdr RR_Header + // Does not have any rdata +} + +func (rr *NXNAME) String() string { return rr.Hdr.String() } + +func (*NXNAME) parse(c *zlexer, origin string) *ParseError { + return &ParseError{err: "NXNAME records do not have a presentation format"} +} + // CNAME RR. See RFC 1034. type CNAME struct { Hdr RR_Header diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go index dc34e5902b..00c8629f27 100644 --- a/vendor/github.com/miekg/dns/version.go +++ b/vendor/github.com/miekg/dns/version.go @@ -3,7 +3,7 @@ package dns import "fmt" // Version is current version of this library. -var Version = v{1, 1, 58} +var Version = v{1, 1, 62} // v holds the version of this library. type v struct { diff --git a/vendor/github.com/miekg/dns/zduplicate.go b/vendor/github.com/miekg/dns/zduplicate.go index 03029fb3eb..330c05395f 100644 --- a/vendor/github.com/miekg/dns/zduplicate.go +++ b/vendor/github.com/miekg/dns/zduplicate.go @@ -886,6 +886,15 @@ func (r1 *NULL) isDuplicate(_r2 RR) bool { return true } +func (r1 *NXNAME) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NXNAME) + if !ok { + return false + } + _ = r2 + return true +} + func (r1 *NXT) isDuplicate(_r2 RR) bool { r2, ok := _r2.(*NXT) if !ok { diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go index 39b3bc8102..5a6cf4c6ad 100644 --- a/vendor/github.com/miekg/dns/zmsg.go +++ b/vendor/github.com/miekg/dns/zmsg.go @@ -706,6 +706,10 @@ func (rr *NULL) pack(msg []byte, off int, compression compressionMap, compress b return off, nil } +func (rr *NXNAME) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + return off, nil +} + func (rr *NXT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packDomainName(rr.NextDomain, msg, off, compression, false) if err != nil { @@ -2266,6 +2270,13 @@ func (rr *NULL) unpack(msg []byte, off int) (off1 int, err error) { return off, nil } +func (rr *NXNAME) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + return off, nil +} + func (rr *NXT) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go index 2c70fc44d6..11f13ecf9c 100644 --- a/vendor/github.com/miekg/dns/ztypes.go +++ b/vendor/github.com/miekg/dns/ztypes.go @@ -60,6 +60,7 @@ var TypeToRR = map[uint16]func() RR{ TypeNSEC3: func() RR { return new(NSEC3) }, TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) }, TypeNULL: func() RR { return new(NULL) }, + TypeNXNAME: func() RR { return new(NXNAME) }, TypeNXT: func() RR { return new(NXT) }, TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) }, TypeOPT: func() RR { return new(OPT) }, @@ -146,6 +147,7 @@ var TypeToString = map[uint16]string{ TypeNSEC3: "NSEC3", TypeNSEC3PARAM: "NSEC3PARAM", TypeNULL: "NULL", + TypeNXNAME: "NXNAME", TypeNXT: "NXT", TypeNone: "None", TypeOPENPGPKEY: "OPENPGPKEY", @@ -230,6 +232,7 @@ func (rr *NSEC) Header() *RR_Header { return &rr.Hdr } func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr } func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr } func (rr *NULL) Header() *RR_Header { return &rr.Hdr } +func (rr *NXNAME) Header() *RR_Header { return &rr.Hdr } func (rr *NXT) Header() *RR_Header { return &rr.Hdr } func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr } func (rr *OPT) Header() *RR_Header { return &rr.Hdr } @@ -594,6 +597,11 @@ func (rr *NULL) len(off int, compression map[string]struct{}) int { return l } +func (rr *NXNAME) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + return l +} + func (rr *OPENPGPKEY) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) @@ -1107,6 +1115,10 @@ func (rr *NULL) copy() RR { return &NULL{rr.Hdr, rr.Data} } +func (rr *NXNAME) copy() RR { + return &NXNAME{rr.Hdr} +} + func (rr *NXT) copy() RR { return &NXT{*rr.NSEC.copy().(*NSEC)} } diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE index dd878a30ee..b9cc55abbb 100644 --- a/vendor/github.com/prometheus/client_golang/NOTICE +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -16,8 +16,3 @@ Go support for Protocol Buffers - Google's data interchange format http://github.com/golang/protobuf/ Copyright 2010 The Go Authors See source code for license details. - -Support for streaming Protocol Buffer messages for the Go language (golang). -https://github.com/matttproud/golang_protobuf_extensions -Copyright 2013 Matt T. Proud -Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go index 1cfe8d863c..cddf027fda 100644 --- a/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go +++ b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go @@ -475,9 +475,9 @@ type API interface { // Flags returns the flag values that Prometheus was launched with. Flags(ctx context.Context) (FlagsResult, error) // LabelNames returns the unique label names present in the block in sorted order by given time range and matchers. - LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time) ([]string, Warnings, error) + LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time, opts ...Option) ([]string, Warnings, error) // LabelValues performs a query for the values of the given label, time range and matchers. - LabelValues(ctx context.Context, label string, matches []string, startTime, endTime time.Time) (model.LabelValues, Warnings, error) + LabelValues(ctx context.Context, label string, matches []string, startTime, endTime time.Time, opts ...Option) (model.LabelValues, Warnings, error) // Query performs a query for the given time. Query(ctx context.Context, query string, ts time.Time, opts ...Option) (model.Value, Warnings, error) // QueryRange performs a query for the given range. @@ -489,7 +489,7 @@ type API interface { // Runtimeinfo returns the various runtime information properties about the Prometheus server. Runtimeinfo(ctx context.Context) (RuntimeinfoResult, error) // Series finds series by label matchers. - Series(ctx context.Context, matches []string, startTime, endTime time.Time) ([]model.LabelSet, Warnings, error) + Series(ctx context.Context, matches []string, startTime, endTime time.Time, opts ...Option) ([]model.LabelSet, Warnings, error) // Snapshot creates a snapshot of all current data into snapshots/- // under the TSDB's data directory and returns the directory as response. Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) @@ -502,7 +502,7 @@ type API interface { // Metadata returns metadata about metrics currently scraped by the metric name. Metadata(ctx context.Context, metric, limit string) (map[string][]Metadata, error) // TSDB returns the cardinality statistics. - TSDB(ctx context.Context) (TSDBResult, error) + TSDB(ctx context.Context, opts ...Option) (TSDBResult, error) // WalReplay returns the current replay status of the wal. WalReplay(ctx context.Context) (WalReplayStatus, error) } @@ -1024,9 +1024,10 @@ func (h *httpAPI) Runtimeinfo(ctx context.Context) (RuntimeinfoResult, error) { return res, err } -func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time) ([]string, Warnings, error) { +func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time, opts ...Option) ([]string, Warnings, error) { u := h.client.URL(epLabels, nil) - q := u.Query() + q := addOptionalURLParams(u.Query(), opts) + if !startTime.IsZero() { q.Set("start", formatTime(startTime)) } @@ -1046,9 +1047,10 @@ func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime, e return labelNames, w, err } -func (h *httpAPI) LabelValues(ctx context.Context, label string, matches []string, startTime, endTime time.Time) (model.LabelValues, Warnings, error) { +func (h *httpAPI) LabelValues(ctx context.Context, label string, matches []string, startTime, endTime time.Time, opts ...Option) (model.LabelValues, Warnings, error) { u := h.client.URL(epLabelValues, map[string]string{"name": label}) - q := u.Query() + q := addOptionalURLParams(u.Query(), opts) + if !startTime.IsZero() { q.Set("start", formatTime(startTime)) } @@ -1076,6 +1078,7 @@ func (h *httpAPI) LabelValues(ctx context.Context, label string, matches []strin type apiOptions struct { timeout time.Duration + limit uint64 } type Option func(c *apiOptions) @@ -1088,20 +1091,35 @@ func WithTimeout(timeout time.Duration) Option { } } -func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time, opts ...Option) (model.Value, Warnings, error) { - u := h.client.URL(epQuery, nil) - q := u.Query() +// WithLimit provides an optional maximum number of returned entries for APIs that support limit parameter +// e.g. https://prometheus.io/docs/prometheus/latest/querying/api/#instant-querie:~:text=%3A%20End%20timestamp.-,limit%3D%3Cnumber%3E,-%3A%20Maximum%20number%20of +func WithLimit(limit uint64) Option { + return func(o *apiOptions) { + o.limit = limit + } +} +func addOptionalURLParams(q url.Values, opts []Option) url.Values { opt := &apiOptions{} for _, o := range opts { o(opt) } - d := opt.timeout - if d > 0 { - q.Set("timeout", d.String()) + if opt.timeout > 0 { + q.Set("timeout", opt.timeout.String()) } + if opt.limit > 0 { + q.Set("limit", strconv.FormatUint(opt.limit, 10)) + } + + return q +} + +func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time, opts ...Option) (model.Value, Warnings, error) { + u := h.client.URL(epQuery, nil) + q := addOptionalURLParams(u.Query(), opts) + q.Set("query", query) if !ts.IsZero() { q.Set("time", formatTime(ts)) @@ -1118,36 +1136,25 @@ func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time, opts .. func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range, opts ...Option) (model.Value, Warnings, error) { u := h.client.URL(epQueryRange, nil) - q := u.Query() + q := addOptionalURLParams(u.Query(), opts) q.Set("query", query) q.Set("start", formatTime(r.Start)) q.Set("end", formatTime(r.End)) q.Set("step", strconv.FormatFloat(r.Step.Seconds(), 'f', -1, 64)) - opt := &apiOptions{} - for _, o := range opts { - o(opt) - } - - d := opt.timeout - if d > 0 { - q.Set("timeout", d.String()) - } - _, body, warnings, err := h.client.DoGetFallback(ctx, u, q) if err != nil { return nil, warnings, err } var qres queryResult - return qres.v, warnings, json.Unmarshal(body, &qres) } -func (h *httpAPI) Series(ctx context.Context, matches []string, startTime, endTime time.Time) ([]model.LabelSet, Warnings, error) { +func (h *httpAPI) Series(ctx context.Context, matches []string, startTime, endTime time.Time, opts ...Option) ([]model.LabelSet, Warnings, error) { u := h.client.URL(epSeries, nil) - q := u.Query() + q := addOptionalURLParams(u.Query(), opts) for _, m := range matches { q.Add("match[]", m) @@ -1166,8 +1173,7 @@ func (h *httpAPI) Series(ctx context.Context, matches []string, startTime, endTi } var mset []model.LabelSet - err = json.Unmarshal(body, &mset) - return mset, warnings, err + return mset, warnings, json.Unmarshal(body, &mset) } func (h *httpAPI) Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) { @@ -1278,8 +1284,10 @@ func (h *httpAPI) Metadata(ctx context.Context, metric, limit string) (map[strin return res, err } -func (h *httpAPI) TSDB(ctx context.Context) (TSDBResult, error) { +func (h *httpAPI) TSDB(ctx context.Context, opts ...Option) (TSDBResult, error) { u := h.client.URL(epTSDB, nil) + q := addOptionalURLParams(u.Query(), opts) + u.RawQuery = q.Encode() req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE new file mode 100644 index 0000000000..65d761bc9f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go new file mode 100644 index 0000000000..8547c8dfd1 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go @@ -0,0 +1,145 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// Package header provides functions for parsing HTTP headers. +package header + +import ( + "net/http" + "strings" +) + +// Octet types from RFC 2616. +var octetTypes [256]octetType + +type octetType byte + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// AcceptSpec describes an Accept* header. +type AcceptSpec struct { + Value string + Q float64 +} + +// ParseAccept parses Accept* headers. +func ParseAccept(header http.Header, key string) (specs []AcceptSpec) { +loop: + for _, s := range header[key] { + for { + var spec AcceptSpec + spec.Value, s = expectTokenSlash(s) + if spec.Value == "" { + continue loop + } + spec.Q = 1.0 + s = skipSpace(s) + if strings.HasPrefix(s, ";") { + s = skipSpace(s[1:]) + if !strings.HasPrefix(s, "q=") { + continue loop + } + spec.Q, s = expectQuality(s[2:]) + if spec.Q < 0.0 { + continue loop + } + } + specs = append(specs, spec) + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + continue loop + } + s = skipSpace(s[1:]) + } + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectTokenSlash(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + b := s[i] + if (octetTypes[b]&isToken == 0) && b != '/' { + break + } + } + return s[:i], s[i:] +} + +func expectQuality(s string) (q float64, rest string) { + switch { + case len(s) == 0: + return -1, "" + case s[0] == '0': + q = 0 + case s[0] == '1': + q = 1 + default: + return -1, "" + } + s = s[1:] + if !strings.HasPrefix(s, ".") { + return q, s + } + s = s[1:] + i := 0 + n := 0 + d := 1 + for ; i < len(s); i++ { + b := s[i] + if b < '0' || b > '9' { + break + } + n = n*10 + int(b) - '0' + d *= 10 + } + return q + float64(n)/float64(d), s[i:] +} diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go new file mode 100644 index 0000000000..2e45780b74 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go @@ -0,0 +1,36 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +package httputil + +import ( + "net/http" + + "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header" +) + +// NegotiateContentEncoding returns the best offered content encoding for the +// request's Accept-Encoding header. If two offers match with equal weight and +// then the offer earlier in the list is preferred. If no offers are +// acceptable, then "" is returned. +func NegotiateContentEncoding(r *http.Request, offers []string) string { + bestOffer := "identity" + bestQ := -1.0 + specs := header.ParseAccept(r.Header, "Accept-Encoding") + for _, offer := range offers { + for _, spec := range specs { + if spec.Q > bestQ && + (spec.Value == "*" || spec.Value == offer) { + bestQ = spec.Q + bestOffer = offer + } + } + } + if bestQ == 0 { + bestOffer = "" + } + return bestOffer +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go index bcfa4fa10e..cc4ef1077e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go @@ -37,6 +37,9 @@ var ( // MetricsScheduler allows only scheduler metrics to be collected from Go runtime. // e.g. go_sched_goroutines_goroutines MetricsScheduler = GoRuntimeMetricsRule{regexp.MustCompile(`^/sched/.*`)} + // MetricsDebug allows only debug metrics to be collected from Go runtime. + // e.g. go_godebug_non_default_behavior_gocachetest_events_total + MetricsDebug = GoRuntimeMetricsRule{regexp.MustCompile(`^/godebug/.*`)} ) // WithGoCollectorMemStatsMetricsDisabled disables metrics that is gathered in runtime.MemStats structure such as: @@ -44,7 +47,6 @@ var ( // go_memstats_alloc_bytes // go_memstats_alloc_bytes_total // go_memstats_sys_bytes -// go_memstats_lookups_total // go_memstats_mallocs_total // go_memstats_frees_total // go_memstats_heap_alloc_bytes diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index ad9a71a5e0..520cbd7d41 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -22,13 +22,13 @@ import ( // goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats. // From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so // while eval closure works on runtime.MemStats, the struct from Go 1.17+ is -// populated using runtime/metrics. +// populated using runtime/metrics. Those are the defaults we can't alter. func goRuntimeMemStats() memStatsMetrics { return memStatsMetrics{ { desc: NewDesc( memstatNamespace("alloc_bytes"), - "Number of bytes allocated and still in use.", + "Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, @@ -36,7 +36,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("alloc_bytes_total"), - "Total number of bytes allocated, even if freed.", + "Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, @@ -44,23 +44,16 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("sys_bytes"), - "Number of bytes obtained from system.", + "Number of bytes obtained from system. Equals to /memory/classes/total:byte.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("lookups_total"), - "Total number of pointer lookups.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, - valType: CounterValue, }, { desc: NewDesc( memstatNamespace("mallocs_total"), - "Total number of mallocs.", + // TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric. + "Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, @@ -68,7 +61,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("frees_total"), - "Total number of frees.", + "Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, @@ -76,7 +69,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_alloc_bytes"), - "Number of heap bytes allocated and still in use.", + "Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, @@ -84,7 +77,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_sys_bytes"), - "Number of heap bytes obtained from system.", + "Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, @@ -92,7 +85,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_idle_bytes"), - "Number of heap bytes waiting to be used.", + "Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, @@ -100,7 +93,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_inuse_bytes"), - "Number of heap bytes that are in use.", + "Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, @@ -108,7 +101,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_released_bytes"), - "Number of heap bytes released to OS.", + "Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, @@ -116,7 +109,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_objects"), - "Number of allocated objects.", + "Number of currently allocated objects. Equals to /gc/heap/objects:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, @@ -124,7 +117,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_inuse_bytes"), - "Number of bytes in use by the stack allocator.", + "Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, @@ -132,7 +125,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_sys_bytes"), - "Number of bytes obtained from system for stack allocator.", + "Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, @@ -140,7 +133,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_inuse_bytes"), - "Number of bytes in use by mspan structures.", + "Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, @@ -148,7 +141,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_sys_bytes"), - "Number of bytes used for mspan structures obtained from system.", + "Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, @@ -156,7 +149,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_inuse_bytes"), - "Number of bytes in use by mcache structures.", + "Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, @@ -164,7 +157,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_sys_bytes"), - "Number of bytes used for mcache structures obtained from system.", + "Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, @@ -172,7 +165,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("buck_hash_sys_bytes"), - "Number of bytes used by the profiling bucket hash table.", + "Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, @@ -180,7 +173,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("gc_sys_bytes"), - "Number of bytes used for garbage collection system metadata.", + "Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, @@ -188,7 +181,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("other_sys_bytes"), - "Number of bytes used for other system allocations.", + "Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, @@ -196,7 +189,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("next_gc_bytes"), - "Number of heap bytes when next garbage collection will take place.", + "Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, @@ -225,7 +218,7 @@ func newBaseGoCollector() baseGoCollector { nil, nil), gcDesc: NewDesc( "go_gc_duration_seconds", - "A summary of the pause duration of garbage collection cycles.", + "A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.", nil, nil), gcLastTimeDesc: NewDesc( "go_memstats_last_gc_time_seconds", diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index 2d8d9f64f4..5117464172 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -17,6 +17,7 @@ package prometheus import ( + "fmt" "math" "runtime" "runtime/metrics" @@ -153,7 +154,8 @@ func defaultGoCollectorOptions() internal.GoCollectorOptions { "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes, }, RuntimeMetricRules: []internal.GoCollectorRule{ - //{Matcher: regexp.MustCompile("")}, + // Recommended metrics we want by default from runtime/metrics. + {Matcher: internal.GoCollectorDefaultRuntimeMetrics}, }, } } @@ -203,6 +205,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { // to fail here. This condition is tested in TestExpectedRuntimeMetrics. continue } + help := attachOriginalName(d.Description.Description, d.Name) sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name}) sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1] @@ -214,7 +217,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { m = newBatchHistogram( NewDesc( BuildFQName(namespace, subsystem, name), - d.Description.Description, + help, nil, nil, ), @@ -226,7 +229,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description.Description, + Help: help, }, ) } else { @@ -234,7 +237,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description.Description, + Help: help, }) } metricSet = append(metricSet, m) @@ -284,6 +287,10 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { } } +func attachOriginalName(desc, origName string) string { + return fmt.Sprintf("%s Sourced from %s", desc, origName) +} + // Describe returns all descriptions of the collector. func (c *goCollector) Describe(ch chan<- *Desc) { c.base.Describe(ch) @@ -376,13 +383,13 @@ func unwrapScalarRMValue(v metrics.Value) float64 { // // This should never happen because we always populate our metric // set from the runtime/metrics package. - panic("unexpected unsupported metric") + panic("unexpected bad kind metric") default: // Unsupported metric kind. // // This should never happen because we check for this during initialization // and flag and filter metrics whose kinds we don't understand. - panic("unexpected unsupported metric kind") + panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind())) } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index b5c8bcb395..8d35f2d8ae 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -440,7 +440,7 @@ type HistogramOpts struct { // constant (or any negative float value). NativeHistogramZeroThreshold float64 - // The remaining fields define a strategy to limit the number of + // The next three fields define a strategy to limit the number of // populated sparse buckets. If NativeHistogramMaxBucketNumber is left // at zero, the number of buckets is not limited. (Note that this might // lead to unbounded memory consumption if the values observed by the @@ -473,6 +473,22 @@ type HistogramOpts struct { NativeHistogramMinResetDuration time.Duration NativeHistogramMaxZeroThreshold float64 + // NativeHistogramMaxExemplars limits the number of exemplars + // that are kept in memory for each native histogram. If you leave it at + // zero, a default value of 10 is used. If no exemplars should be kept specifically + // for native histograms, set it to a negative value. (Scrapers can + // still use the exemplars exposed for classic buckets, which are managed + // independently.) + NativeHistogramMaxExemplars int + // NativeHistogramExemplarTTL is only checked once + // NativeHistogramMaxExemplars is exceeded. In that case, the + // oldest exemplar is removed if it is older than NativeHistogramExemplarTTL. + // Otherwise, the older exemplar in the pair of exemplars that are closest + // together (on an exponential scale) is removed. + // If NativeHistogramExemplarTTL is left at its zero value, a default value of + // 5m is used. To always delete the oldest exemplar, set it to a negative value. + NativeHistogramExemplarTTL time.Duration + // now is for testing purposes, by default it's time.Now. now func() time.Time @@ -532,6 +548,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr if opts.afterFunc == nil { opts.afterFunc = time.AfterFunc } + h := &histogram{ desc: desc, upperBounds: opts.Buckets, @@ -556,6 +573,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold } // Leave h.nativeHistogramZeroThreshold at 0 otherwise. h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor) + h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars) } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -725,7 +743,8 @@ type histogram struct { // resetScheduled is protected by mtx. It is true if a reset is // scheduled for a later time (when nativeHistogramMinResetDuration has // passed). - resetScheduled bool + resetScheduled bool + nativeExemplars nativeExemplars // now is for testing purposes, by default it's time.Now. now func() time.Time @@ -742,6 +761,9 @@ func (h *histogram) Observe(v float64) { h.observe(v, h.findBucket(v)) } +// ObserveWithExemplar should not be called in a high-frequency setting +// for a native histogram with configured exemplars. For this case, +// the implementation isn't lock-free and might suffer from lock contention. func (h *histogram) ObserveWithExemplar(v float64, e Labels) { i := h.findBucket(v) h.observe(v, i) @@ -821,6 +843,15 @@ func (h *histogram) Write(out *dto.Metric) error { Length: proto.Uint32(0), }} } + + // If exemplars are not configured, the cap will be 0. + // So append is not needed in this case. + if cap(h.nativeExemplars.exemplars) > 0 { + h.nativeExemplars.Lock() + his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...) + h.nativeExemplars.Unlock() + } + } addAndResetCounts(hotCounts, coldCounts) return nil @@ -1091,8 +1122,10 @@ func (h *histogram) resetCounts(counts *histogramCounts) { deleteSyncMap(&counts.nativeHistogramBucketsPositive) } -// updateExemplar replaces the exemplar for the provided bucket. With empty -// labels, it's a no-op. It panics if any of the labels is invalid. +// updateExemplar replaces the exemplar for the provided classic bucket. +// With empty labels, it's a no-op. It panics if any of the labels is invalid. +// If histogram is native, the exemplar will be cached into nativeExemplars, +// which has a limit, and will remove one exemplar when limit is reached. func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { if l == nil { return @@ -1102,6 +1135,10 @@ func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { panic(err) } h.exemplars[bucket].Store(e) + doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) + if doSparse { + h.nativeExemplars.addExemplar(e) + } } // HistogramVec is a Collector that bundles a set of Histograms that all share the @@ -1336,6 +1373,48 @@ func MustNewConstHistogram( return m } +// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp. +func NewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where +// NewConstHistogramWithCreatedTimestamp would have returned an error. +func MustNewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} + type buckSort []*dto.Bucket func (s buckSort) Len() int { @@ -1575,3 +1654,142 @@ func addAndResetCounts(hot, cold *histogramCounts) { atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket)) atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0) } + +type nativeExemplars struct { + sync.Mutex + + ttl time.Duration + exemplars []*dto.Exemplar +} + +func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars { + if ttl == 0 { + ttl = 5 * time.Minute + } + + if maxCount == 0 { + maxCount = 10 + } + + if maxCount < 0 { + maxCount = 0 + } + + return nativeExemplars{ + ttl: ttl, + exemplars: make([]*dto.Exemplar, 0, maxCount), + } +} + +func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { + if cap(n.exemplars) == 0 { + return + } + + n.Lock() + defer n.Unlock() + + // The index where to insert the new exemplar. + var nIdx int = -1 + + // When the number of exemplars has not yet exceeded or + // is equal to cap(n.exemplars), then + // insert the new exemplar directly. + if len(n.exemplars) < cap(n.exemplars) { + for nIdx = 0; nIdx < len(n.exemplars); nIdx++ { + if *e.Value < *n.exemplars[nIdx].Value { + break + } + } + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...) + return + } + + // When the number of exemplars exceeds the limit, remove one exemplar. + var ( + rIdx int // The index where to remove the old exemplar. + + ot = time.Now() // Oldest timestamp seen. + otIdx = -1 // Index of the exemplar with the oldest timestamp. + + md = -1.0 // Logarithm of the delta of the closest pair of exemplars. + mdIdx = -1 // Index of the older exemplar within the closest pair. + cLog float64 // Logarithm of the current exemplar. + pLog float64 // Logarithm of the previous exemplar. + ) + + for i, exemplar := range n.exemplars { + // Find the exemplar with the oldest timestamp. + if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) { + ot = exemplar.Timestamp.AsTime() + otIdx = i + } + + // Find the index at which to insert new the exemplar. + if *e.Value <= *exemplar.Value && nIdx == -1 { + nIdx = i + } + + // Find the two closest exemplars and pick the one the with older timestamp. + pLog = cLog + cLog = math.Log(exemplar.GetValue()) + if i == 0 { + continue + } + diff := math.Abs(cLog - pLog) + if md == -1 || diff < md { + md = diff + if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) { + mdIdx = i + } else { + mdIdx = i - 1 + } + } + + } + + // If all existing exemplar are smaller than new exemplar, + // then the exemplar should be inserted at the end. + if nIdx == -1 { + nIdx = len(n.exemplars) + } + + if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl { + rIdx = otIdx + } else { + // In the previous for loop, when calculating the closest pair of exemplars, + // we did not take into account the newly inserted exemplar. + // So we need to calculate with the newly inserted exemplar again. + elog := math.Log(e.GetValue()) + if nIdx > 0 { + diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue())) + if diff < md { + md = diff + mdIdx = nIdx + if n.exemplars[nIdx-1].Timestamp.AsTime().Before(e.Timestamp.AsTime()) { + mdIdx = nIdx - 1 + } + } + } + if nIdx < len(n.exemplars) { + diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog) + if diff < md { + mdIdx = nIdx + if n.exemplars[nIdx].Timestamp.AsTime().Before(e.Timestamp.AsTime()) { + mdIdx = nIdx + } + } + } + rIdx = mdIdx + } + + // Adjust the slice according to rIdx and nIdx. + switch { + case rIdx == nIdx: + n.exemplars[nIdx] = e + case rIdx < nIdx: + n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...) + case rIdx > nIdx: + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go index 723b45d644..a4fa6eabd7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go @@ -30,3 +30,5 @@ type GoCollectorOptions struct { RuntimeMetricSumForHist map[string]string RuntimeMetricRules []GoCollectorRule } + +var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index f018e57237..9d9b81ab44 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -234,7 +234,7 @@ func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) { ) for i, e := range exemplars { ts := e.Timestamp - if ts == (time.Time{}) { + if ts.IsZero() { ts = now } exs[i], err = newExemplar(e.Value, ts, e.Labels) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 8548dd18ed..62a4e7ad9a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -22,14 +22,15 @@ import ( ) type processCollector struct { - collectFn func(chan<- Metric) - pidFn func() (int, error) - reportErrors bool - cpuTotal *Desc - openFDs, maxFDs *Desc - vsize, maxVsize *Desc - rss *Desc - startTime *Desc + collectFn func(chan<- Metric) + pidFn func() (int, error) + reportErrors bool + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, maxVsize *Desc + rss *Desc + startTime *Desc + inBytes, outBytes *Desc } // ProcessCollectorOpts defines the behavior of a process metrics collector @@ -100,6 +101,16 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { "Start time of the process since unix epoch in seconds.", nil, nil, ), + inBytes: NewDesc( + ns+"process_network_receive_bytes_total", + "Number of bytes received by the process over the network.", + nil, nil, + ), + outBytes: NewDesc( + ns+"process_network_transmit_bytes_total", + "Number of bytes sent by the process over the network.", + nil, nil, + ), } if opts.PidFn == nil { @@ -129,6 +140,8 @@ func (c *processCollector) Describe(ch chan<- *Desc) { ch <- c.maxVsize ch <- c.rss ch <- c.startTime + ch <- c.inBytes + ch <- c.outBytes } // Collect returns the current state of all metrics of the collector. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go index 8c1136ceea..14d56d2d06 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -63,4 +63,18 @@ func (c *processCollector) processCollect(ch chan<- Metric) { } else { c.reportError(ch, nil, err) } + + if netstat, err := p.Netstat(); err == nil { + var inOctets, outOctets float64 + if netstat.IpExt.InOctets != nil { + inOctets = *netstat.IpExt.InOctets + } + if netstat.IpExt.OutOctets != nil { + outOctets = *netstat.IpExt.OutOctets + } + ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets) + ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets) + } else { + c.reportError(ch, nil, err) + } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index 9819917b83..315eab5f17 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -76,6 +76,12 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) { return n, err } +// Unwrap lets http.ResponseController get the underlying http.ResponseWriter, +// by implementing the [rwUnwrapper](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/net/http/responsecontroller.go;l=42-44) interface. +func (r *responseWriterDelegator) Unwrap() http.ResponseWriter { + return r.ResponseWriter +} + type ( closeNotifierDelegator struct{ *responseWriterDelegator } flusherDelegator struct{ *responseWriterDelegator } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index 09b8d2fbea..e598e66e68 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -38,12 +38,13 @@ import ( "io" "net/http" "strconv" - "strings" "sync" "time" + "github.com/klauspost/compress/zstd" "github.com/prometheus/common/expfmt" + "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil" "github.com/prometheus/client_golang/prometheus" ) @@ -54,6 +55,18 @@ const ( processStartTimeHeader = "Process-Start-Time-Unix" ) +// Compression represents the content encodings handlers support for the HTTP +// responses. +type Compression string + +const ( + Identity Compression = "identity" + Gzip Compression = "gzip" + Zstd Compression = "zstd" +) + +var defaultCompressionFormats = []Compression{Identity, Gzip, Zstd} + var gzipPool = sync.Pool{ New: func() interface{} { return gzip.NewWriter(nil) @@ -122,6 +135,18 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } } + // Select compression formats to offer based on default or user choice. + var compressions []string + if !opts.DisableCompression { + offers := defaultCompressionFormats + if len(opts.OfferedCompressions) > 0 { + offers = opts.OfferedCompressions + } + for _, comp := range offers { + compressions = append(compressions, string(comp)) + } + } + h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { if !opts.ProcessStartTime.IsZero() { rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10)) @@ -165,21 +190,23 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } else { contentType = expfmt.Negotiate(req.Header) } - header := rsp.Header() - header.Set(contentTypeHeader, string(contentType)) + rsp.Header().Set(contentTypeHeader, string(contentType)) - w := io.Writer(rsp) - if !opts.DisableCompression && gzipAccepted(req.Header) { - header.Set(contentEncodingHeader, "gzip") - gz := gzipPool.Get().(*gzip.Writer) - defer gzipPool.Put(gz) + w, encodingHeader, closeWriter, err := negotiateEncodingWriter(req, rsp, compressions) + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error getting writer", err) + } + w = io.Writer(rsp) + encodingHeader = string(Identity) + } - gz.Reset(w) - defer gz.Close() + defer closeWriter() - w = gz + // Set Content-Encoding only when data is compressed + if encodingHeader != string(Identity) { + rsp.Header().Set(contentEncodingHeader, encodingHeader) } - enc := expfmt.NewEncoder(w, contentType) // handleError handles the error according to opts.ErrorHandling @@ -343,9 +370,19 @@ type HandlerOpts struct { // no effect on the HTTP status code because ErrorHandling is set to // ContinueOnError. Registry prometheus.Registerer - // If DisableCompression is true, the handler will never compress the - // response, even if requested by the client. + // DisableCompression disables the response encoding (compression) and + // encoding negotiation. If true, the handler will + // never compress the response, even if requested + // by the client and the OfferedCompressions field is set. DisableCompression bool + // OfferedCompressions is a set of encodings (compressions) handler will + // try to offer when negotiating with the client. This defaults to identity, gzip + // and zstd. + // NOTE: If handler can't agree with the client on the encodings or + // unsupported or empty encodings are set in OfferedCompressions, + // handler always fallbacks to no compression (identity), for + // compatibility reasons. In such cases ErrorLog will be used if set. + OfferedCompressions []Compression // The number of concurrent HTTP requests is limited to // MaxRequestsInFlight. Additional requests are responded to with 503 // Service Unavailable and a suitable message in the body. If @@ -381,19 +418,6 @@ type HandlerOpts struct { ProcessStartTime time.Time } -// gzipAccepted returns whether the client will accept gzip-encoded content. -func gzipAccepted(header http.Header) bool { - a := header.Get(acceptEncodingHeader) - parts := strings.Split(a, ",") - for _, part := range parts { - part = strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return true - } - } - return false -} - // httpError removes any content-encoding header and then calls http.Error with // the provided error and http.StatusInternalServerError. Error contents is // supposed to be uncompressed plain text. Same as with a plain http.Error, this @@ -406,3 +430,38 @@ func httpError(rsp http.ResponseWriter, err error) { http.StatusInternalServerError, ) } + +// negotiateEncodingWriter reads the Accept-Encoding header from a request and +// selects the right compression based on an allow-list of supported +// compressions. It returns a writer implementing the compression and an the +// correct value that the caller can set in the response header. +func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []string) (_ io.Writer, encodingHeaderValue string, closeWriter func(), _ error) { + if len(compressions) == 0 { + return rw, string(Identity), func() {}, nil + } + + // TODO(mrueg): Replace internal/github.com/gddo once https://github.com/golang/go/issues/19307 is implemented. + selected := httputil.NegotiateContentEncoding(r, compressions) + + switch selected { + case "zstd": + // TODO(mrueg): Replace klauspost/compress with stdlib implementation once https://github.com/golang/go/issues/62513 is implemented. + z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest)) + if err != nil { + return nil, "", func() {}, err + } + + z.Reset(rw) + return z, selected, func() { _ = z.Close() }, nil + case "gzip": + gz := gzipPool.Get().(*gzip.Writer) + gz.Reset(rw) + return gz, selected, func() { _ = gz.Close(); gzipPool.Put(gz) }, nil + case "identity": + // This means the content is not compressed. + return rw, selected, func() {}, nil + default: + // The content encoding was not implemented yet. + return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 5e2ced25a0..c6fd2f58b7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -314,16 +314,17 @@ func (r *Registry) Register(c Collector) error { if dimHash != desc.dimHash { return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) } - } else { - // ...then check the new descriptors already seen. - if dimHash, exists := newDimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) - } - } else { - newDimHashesByName[desc.fqName] = desc.dimHash + continue + } + + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) } + continue } + newDimHashesByName[desc.fqName] = desc.dimHash } // A Collector yielding no Desc at all is considered unchecked. if len(newDescIDs) == 0 { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 1462704446..1ab0e47965 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -783,3 +783,45 @@ func MustNewConstSummary( } return m } + +// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp. +func NewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where +// NewConstSummaryWithCreatedTimestamp would have returned an error. +func MustNewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go index f52ad9eab6..e1441598da 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go @@ -30,4 +30,5 @@ var defaultValidations = []Validation{ validations.LintReservedChars, validations.LintCamelCase, validations.LintUnitAbbreviations, + validations.LintDuplicateMetric, } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go new file mode 100644 index 0000000000..fdc1e62394 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go @@ -0,0 +1,37 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "fmt" + "reflect" + + dto "github.com/prometheus/client_model/go" +) + +// LintDuplicateMetric detects duplicate metric. +func LintDuplicateMetric(mf *dto.MetricFamily) []error { + var problems []error + + for i, m := range mf.Metric { + for _, k := range mf.Metric[i+1:] { + if reflect.DeepEqual(m.Label, k.Label) { + problems = append(problems, fmt.Errorf("metric not unique")) + break + } + } + } + + return problems +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go index bc8dbd1e16..de52cfee44 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go @@ -44,21 +44,21 @@ func LintMetricUnits(mf *dto.MetricFamily) []error { return problems } -// LintMetricTypeInName detects when metric types are included in the metric name. +// LintMetricTypeInName detects when the metric type is included in the metric name. func LintMetricTypeInName(mf *dto.MetricFamily) []error { + if mf.GetType() == dto.MetricType_UNTYPED { + return nil + } + var problems []error - n := strings.ToLower(mf.GetName()) - for i, t := range dto.MetricType_name { - if i == int32(dto.MetricType_UNTYPED) { - continue - } + n := strings.ToLower(mf.GetName()) + typename := strings.ToLower(mf.GetType().String()) - typename := strings.ToLower(t) - if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { - problems = append(problems, fmt.Errorf(`metric name should not include type '%s'`, typename)) - } + if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { + problems = append(problems, fmt.Errorf(`metric name should not include type '%s'`, typename)) } + return problems } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go index 9dce15eafa..e0ac346665 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go @@ -42,9 +42,8 @@ import ( "fmt" "io" "net/http" - "reflect" - "github.com/davecgh/go-spew/spew" + "github.com/kylelemons/godebug/diff" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" "google.golang.org/protobuf/proto" @@ -184,9 +183,8 @@ func ScrapeAndCompare(url string, expected io.Reader, metricNames ...string) err return compareMetricFamilies(scraped, wanted, metricNames...) } -// CollectAndCompare registers the provided Collector with a newly created -// pedantic Registry. It then calls GatherAndCompare with that Registry and with -// the provided metricNames. +// CollectAndCompare collects the metrics identified by `metricNames` and compares them in the Prometheus text +// exposition format to the data read from expected. func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames ...string) error { reg := prometheus.NewPedanticRegistry() if err := reg.Register(c); err != nil { @@ -222,6 +220,31 @@ func TransactionalGatherAndCompare(g prometheus.TransactionalGatherer, expected return compareMetricFamilies(got, wanted, metricNames...) } +// CollectAndFormat collects the metrics identified by `metricNames` and returns them in the given format. +func CollectAndFormat(c prometheus.Collector, format expfmt.FormatType, metricNames ...string) ([]byte, error) { + reg := prometheus.NewPedanticRegistry() + if err := reg.Register(c); err != nil { + return nil, fmt.Errorf("registering collector failed: %w", err) + } + + gotFiltered, err := reg.Gather() + if err != nil { + return nil, fmt.Errorf("gathering metrics failed: %w", err) + } + + gotFiltered = filterMetrics(gotFiltered, metricNames) + + var gotFormatted bytes.Buffer + enc := expfmt.NewEncoder(&gotFormatted, expfmt.NewFormat(format)) + for _, mf := range gotFiltered { + if err := enc.Encode(mf); err != nil { + return nil, fmt.Errorf("encoding gathered metrics failed: %w", err) + } + } + + return gotFormatted.Bytes(), nil +} + // convertReaderToMetricFamily would read from a io.Reader object and convert it to a slice of // dto.MetricFamily. func convertReaderToMetricFamily(reader io.Reader) ([]*dto.MetricFamily, error) { @@ -254,6 +277,15 @@ func compareMetricFamilies(got, expected []*dto.MetricFamily, metricNames ...str if metricNames != nil { got = filterMetrics(got, metricNames) expected = filterMetrics(expected, metricNames) + if len(metricNames) > len(got) { + var missingMetricNames []string + for _, name := range metricNames { + if ok := hasMetricByName(got, name); !ok { + missingMetricNames = append(missingMetricNames, name) + } + } + return fmt.Errorf("expected metric name(s) not found: %v", missingMetricNames) + } } return compare(got, expected) @@ -277,73 +309,12 @@ func compare(got, want []*dto.MetricFamily) error { return fmt.Errorf("encoding expected metrics failed: %w", err) } } - if diffErr := diff(wantBuf, gotBuf); diffErr != "" { + if diffErr := diff.Diff(gotBuf.String(), wantBuf.String()); diffErr != "" { return fmt.Errorf(diffErr) } return nil } -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice, array or string. Otherwise it returns an empty string. -func diff(expected, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { - return "" - } - - var e, a string - c := spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, - } - if et != reflect.TypeOf("") { - e = c.Sdump(expected) - a = c.Sdump(actual) - } else { - e = reflect.ValueOf(expected).String() - a = reflect.ValueOf(actual).String() - } - - diff, _ := internal.GetUnifiedDiffString(internal.UnifiedDiff{ - A: internal.SplitLines(e), - B: internal.SplitLines(a), - FromFile: "metric output does not match expectation; want", - FromDate: "", - ToFile: "got:", - ToDate: "", - Context: 1, - }) - - if diff == "" { - return "" - } - - return "\n\nDiff:\n" + diff -} - -// typeAndKind returns the type and kind of the given interface{} -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFamily { var filtered []*dto.MetricFamily for _, m := range metrics { @@ -356,3 +327,12 @@ func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFam } return filtered } + +func hasMetricByName(metrics []*dto.MetricFamily, name string) bool { + for _, mf := range metrics { + if mf.GetName() == name { + return true + } + } + return false +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 955cfd59f8..2c808eece0 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -507,7 +507,7 @@ func (m *metricMap) getOrCreateMetricWithLabelValues( return metric } -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// getOrCreateMetricWithLabels retrieves the metric by hash and label value // or creates it and returns the new one. // // This function holds the mutex. diff --git a/vendor/github.com/prometheus/common/config/headers.go b/vendor/github.com/prometheus/common/config/headers.go index 4a0be4a10e..7276742ec9 100644 --- a/vendor/github.com/prometheus/common/config/headers.go +++ b/vendor/github.com/prometheus/common/config/headers.go @@ -52,14 +52,6 @@ var reservedHeaders = map[string]struct{}{ // Headers represents the configuration for HTTP headers. type Headers struct { Headers map[string]Header `yaml:",inline"` - dir string -} - -// Header represents the configuration for a single HTTP header. -type Header struct { - Values []string `yaml:"values,omitempty" json:"values,omitempty"` - Secrets []Secret `yaml:"secrets,omitempty" json:"secrets,omitempty"` - Files []string `yaml:"files,omitempty" json:"files,omitempty"` } func (h Headers) MarshalJSON() ([]byte, error) { @@ -67,32 +59,40 @@ func (h Headers) MarshalJSON() ([]byte, error) { return json.Marshal(h.Headers) } -// SetDirectory records the directory to make headers file relative to the -// configuration file. +// SetDirectory make headers file relative to the configuration file. func (h *Headers) SetDirectory(dir string) { if h == nil { return } - h.dir = dir + for _, h := range h.Headers { + h.SetDirectory(dir) + } } // Validate validates the Headers config. func (h *Headers) Validate() error { - for n, header := range h.Headers { + for n := range h.Headers { if _, ok := reservedHeaders[http.CanonicalHeaderKey(n)]; ok { return fmt.Errorf("setting header %q is not allowed", http.CanonicalHeaderKey(n)) } - for _, v := range header.Files { - f := JoinDir(h.dir, v) - _, err := os.ReadFile(f) - if err != nil { - return fmt.Errorf("unable to read header %q from file %s: %w", http.CanonicalHeaderKey(n), f, err) - } - } } return nil } +// Header represents the configuration for a single HTTP header. +type Header struct { + Values []string `yaml:"values,omitempty" json:"values,omitempty"` + Secrets []Secret `yaml:"secrets,omitempty" json:"secrets,omitempty"` + Files []string `yaml:"files,omitempty" json:"files,omitempty"` +} + +// SetDirectory makes headers file relative to the configuration file. +func (h *Header) SetDirectory(dir string) { + for i := range h.Files { + h.Files[i] = JoinDir(dir, h.Files[i]) + } +} + // NewHeadersRoundTripper returns a RoundTripper that sets HTTP headers on // requests as configured. func NewHeadersRoundTripper(config *Headers, next http.RoundTripper) http.RoundTripper { @@ -121,10 +121,9 @@ func (rt *headersRoundTripper) RoundTrip(req *http.Request) (*http.Response, err req.Header.Add(n, string(v)) } for _, v := range h.Files { - f := JoinDir(rt.config.dir, v) - b, err := os.ReadFile(f) + b, err := os.ReadFile(v) if err != nil { - return nil, fmt.Errorf("unable to read headers file %s: %w", f, err) + return nil, fmt.Errorf("unable to read headers file %s: %w", v, err) } req.Header.Add(n, strings.TrimSpace(string(b))) } diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go index 3e32013477..b640b89953 100644 --- a/vendor/github.com/prometheus/common/config/http_config.go +++ b/vendor/github.com/prometheus/common/config/http_config.go @@ -828,7 +828,7 @@ type basicAuthRoundTripper struct { // NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a request unless it has // already been set. -func NewBasicAuthRoundTripper(username SecretReader, password SecretReader, rt http.RoundTripper) http.RoundTripper { +func NewBasicAuthRoundTripper(username, password SecretReader, rt http.RoundTripper) http.RoundTripper { return &basicAuthRoundTripper{username, password, rt} } @@ -964,7 +964,7 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro } rt.mtx.Lock() - rt.lastSecret = secret + rt.lastSecret = newSecret rt.lastRT.Source = source if rt.client != nil { rt.client.CloseIdleConnections() diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 25cfaa2164..1448439b7f 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -45,7 +45,7 @@ func ResponseFormat(h http.Header) Format { mediatype, params, err := mime.ParseMediaType(ct) if err != nil { - return fmtUnknown + return FmtUnknown } const textType = "text/plain" @@ -53,21 +53,21 @@ func ResponseFormat(h http.Header) Format { switch mediatype { case ProtoType: if p, ok := params["proto"]; ok && p != ProtoProtocol { - return fmtUnknown + return FmtUnknown } if e, ok := params["encoding"]; ok && e != "delimited" { - return fmtUnknown + return FmtUnknown } - return fmtProtoDelim + return FmtProtoDelim case textType: if v, ok := params["version"]; ok && v != TextVersion { - return fmtUnknown + return FmtUnknown } - return fmtText + return FmtText } - return fmtUnknown + return FmtUnknown } // NewDecoder returns a new decoder based on the given input format. diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index ff5ef7a9d9..cf0c150c2e 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -77,18 +77,18 @@ func Negotiate(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return fmtProtoDelim + escapingScheme + return FmtProtoDelim + escapingScheme case "text": - return fmtProtoText + escapingScheme + return FmtProtoText + escapingScheme case "compact-text": - return fmtProtoCompact + escapingScheme + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme + return FmtText + escapingScheme } } - return fmtText + escapingScheme + return FmtText + escapingScheme } // NegotiateIncludingOpenMetrics works like Negotiate but includes @@ -110,26 +110,26 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return fmtProtoDelim + escapingScheme + return FmtProtoDelim + escapingScheme case "text": - return fmtProtoText + escapingScheme + return FmtProtoText + escapingScheme case "compact-text": - return fmtProtoCompact + escapingScheme + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme + return FmtText + escapingScheme } if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { switch ver { case OpenMetricsVersion_1_0_0: - return fmtOpenMetrics_1_0_0 + escapingScheme + return FmtOpenMetrics_1_0_0 + escapingScheme default: - return fmtOpenMetrics_0_0_1 + escapingScheme + return FmtOpenMetrics_0_0_1 + escapingScheme } } } - return fmtText + escapingScheme + return FmtText + escapingScheme } // NewEncoder returns a new encoder based on content type negotiation. All diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index 051b38cd17..14034a673a 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -32,24 +32,31 @@ type Format string // it on the wire, new content-type strings will have to be agreed upon and // added here. const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" OpenMetricsType = `application/openmetrics-text` OpenMetricsVersion_0_0_1 = "0.0.1" OpenMetricsVersion_1_0_0 = "1.0.0" - // The Content-Type values for the different wire protocols. Note that these - // values are now unexported. If code was relying on comparisons to these - // constants, instead use FormatType(). - fmtUnknown Format = `` - fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - fmtProtoDelim Format = protoFmt + ` encoding=delimited` - fmtProtoText Format = protoFmt + ` encoding=text` - fmtProtoCompact Format = protoFmt + ` encoding=compact-text` - fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` - fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` + // The Content-Type values for the different wire protocols. Do not do direct + // comparisons to these constants, instead use the comparison functions. + // Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead. + FmtUnknown Format = `` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead. + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead. + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead. + FmtProtoText Format = ProtoFmt + ` encoding=text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) const ( @@ -79,17 +86,17 @@ const ( func NewFormat(t FormatType) Format { switch t { case TypeProtoCompact: - return fmtProtoCompact + return FmtProtoCompact case TypeProtoDelim: - return fmtProtoDelim + return FmtProtoDelim case TypeProtoText: - return fmtProtoText + return FmtProtoText case TypeTextPlain: - return fmtText + return FmtText case TypeOpenMetrics: - return fmtOpenMetrics_1_0_0 + return FmtOpenMetrics_1_0_0 default: - return fmtUnknown + return FmtUnknown } } @@ -97,12 +104,12 @@ func NewFormat(t FormatType) Format { // specified version number. func NewOpenMetricsFormat(version string) (Format, error) { if version == OpenMetricsVersion_0_0_1 { - return fmtOpenMetrics_0_0_1, nil + return FmtOpenMetrics_0_0_1, nil } if version == OpenMetricsVersion_1_0_0 { - return fmtOpenMetrics_1_0_0, nil + return FmtOpenMetrics_1_0_0, nil } - return fmtUnknown, fmt.Errorf("unknown open metrics version string") + return FmtUnknown, fmt.Errorf("unknown open metrics version string") } // FormatType deduces an overall FormatType for the given format. diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 353c5e93f9..11c8ff4b9d 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -477,7 +477,7 @@ func writeOpenMetricsNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces, quoted. - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + if !model.IsValidLegacyMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index f9b8265a9e..4b86434b33 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -354,7 +354,7 @@ func writeNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces. - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + if !model.IsValidLegacyMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) { // writeName writes a string as-is if it complies with the legacy naming // scheme, or escapes it in double quotes if not. func writeName(w enhancedWriter, name string) (int, error) { - if model.IsValidLegacyMetricName(model.LabelValue(name)) { + if model.IsValidLegacyMetricName(name) { return w.WriteString(name) } var written int diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 26490211af..25db4f2151 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -22,9 +22,9 @@ import ( "math" "strconv" "strings" + "unicode/utf8" dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" "github.com/prometheus/common/model" @@ -60,6 +60,7 @@ type TextParser struct { currentMF *dto.MetricFamily currentMetric *dto.Metric currentLabelPair *dto.LabelPair + currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line. // The remaining member variables are only used for summaries/histograms. currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' @@ -74,6 +75,7 @@ type TextParser struct { // count and sum of that summary/histogram. currentIsSummaryCount, currentIsSummarySum bool currentIsHistogramCount, currentIsHistogramSum bool + currentMetricIsInsideBraces bool } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -137,12 +139,14 @@ func (p *TextParser) reset(in io.Reader) { } p.currentQuantile = math.NaN() p.currentBucket = math.NaN() + p.currentMF = nil } // startOfLine represents the state where the next byte read from p.buf is the // start of a line (or whitespace leading up to it). func (p *TextParser) startOfLine() stateFn { p.lineCount++ + p.currentMetricIsInsideBraces = false if p.skipBlankTab(); p.err != nil { // This is the only place that we expect to see io.EOF, // which is not an error but the signal that we are done. @@ -158,6 +162,9 @@ func (p *TextParser) startOfLine() stateFn { return p.startComment case '\n': return p.startOfLine // Empty line, start the next one. + case '{': + p.currentMetricIsInsideBraces = true + return p.readingLabels } return p.readingMetricName } @@ -275,6 +282,8 @@ func (p *TextParser) startLabelName() stateFn { return nil // Unexpected end of input. } if p.currentByte == '}' { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -287,6 +296,38 @@ func (p *TextParser) startLabelName() stateFn { p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) return nil } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + if p.currentMetricIsInsideBraces { + if p.currentMF != nil && p.currentMF.GetName() != p.currentToken.String() { + p.parseError(fmt.Sprintf("multiple metric names %s %s", p.currentMF.GetName(), p.currentToken.String())) + return nil + } + switch p.currentByte { + case ',': + p.setOrCreateCurrentMF() + p.currentMetric = &dto.Metric{} + return p.startLabelName + case '}': + p.setOrCreateCurrentMF() + p.currentMetric = &dto.Metric{} + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte)) + return nil + } + } + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + p.currentLabelPairs = nil + return nil + } p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) @@ -296,23 +337,17 @@ func (p *TextParser) startLabelName() stateFn { // labels to 'real' labels. if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - return nil + p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. labels := make(map[string]struct{}) - for _, l := range p.currentMetric.Label { + for _, l := range p.currentLabelPairs { lName := l.GetName() if _, exists := labels[lName]; !exists { labels[lName] = struct{}{} } else { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) + p.currentLabelPairs = nil return nil } } @@ -345,6 +380,7 @@ func (p *TextParser) startLabelValue() stateFn { if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } else { @@ -371,12 +407,19 @@ func (p *TextParser) startLabelValue() stateFn { return p.startLabelName case '}': + if p.currentMF == nil { + p.parseError("invalid metric name") + return nil + } + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } return p.readingValue default: p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } @@ -585,6 +628,8 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { p.currentToken.WriteByte(p.currentByte) case 'n': p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) return @@ -610,13 +655,45 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { // but not into p.currentToken. func (p *TextParser) readTokenAsMetricName() { p.currentToken.Reset() + // A UTF-8 metric name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidMetricNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') { return } } @@ -628,13 +705,45 @@ func (p *TextParser) readTokenAsMetricName() { // but not into p.currentToken. func (p *TextParser) readTokenAsLabelName() { p.currentToken.Reset() + // A UTF-8 label name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidLabelNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') { return } } @@ -660,6 +769,7 @@ func (p *TextParser) readTokenAsLabelValue() { p.currentToken.WriteByte('\n') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + p.currentLabelPairs = nil return } escaped = false @@ -718,19 +828,19 @@ func (p *TextParser) setOrCreateCurrentMF() { } func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"' } -func isValidLabelNameContinuation(b byte) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +func isValidLabelNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b))) } func isValidMetricNameStart(b byte) bool { return isValidLabelNameStart(b) || b == ':' } -func isValidMetricNameContinuation(b byte) bool { - return isValidLabelNameContinuation(b) || b == ':' +func isValidMetricNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameContinuation(b, quoted) || b == ':' } func isBlankOrTab(b byte) bool { diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index 3317ce22ff..73b7aa3e60 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -97,26 +97,35 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") // therewith. type LabelName string -// IsValid returns true iff name matches the pattern of LabelNameRE for legacy -// names, and iff it's valid UTF-8 if NameValidationScheme is set to -// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the -// check but a much faster hardcoded implementation. +// IsValid returns true iff the name matches the pattern of LabelNameRE when +// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if +// NameValidationScheme is set to UTF8Validation. func (ln LabelName) IsValid() bool { if len(ln) == 0 { return false } switch NameValidationScheme { case LegacyValidation: - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } + return ln.IsValidLegacy() case UTF8Validation: return utf8.ValidString(string(ln)) default: panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) } +} + +// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for +// legacy names. It does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValidLegacy() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } return true } diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index eb865e5a59..c44f93f314 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -161,7 +161,7 @@ func (m Metric) FastFingerprint() Fingerprint { func IsValidMetricName(n LabelValue) bool { switch NameValidationScheme { case LegacyValidation: - return IsValidLegacyMetricName(n) + return IsValidLegacyMetricName(string(n)) case UTF8Validation: if len(n) == 0 { return false @@ -176,7 +176,7 @@ func IsValidMetricName(n LabelValue) bool { // legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. -func IsValidLegacyMetricName(n LabelValue) bool { +func IsValidLegacyMetricName(n string) bool { if len(n) == 0 { return false } @@ -208,7 +208,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF } // If the name is nil, copy as-is, don't try to escape. - if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) { + if v.Name == nil || IsValidLegacyMetricName(v.GetName()) { out.Name = v.Name } else { out.Name = proto.String(EscapeName(v.GetName(), scheme)) @@ -230,7 +230,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF for _, l := range m.Label { if l.GetName() == MetricNameLabel { - if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) { + if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) { escaped.Label = append(escaped.Label, l) continue } @@ -240,7 +240,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF }) continue } - if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) { + if l.Name == nil || IsValidLegacyMetricName(l.GetName()) { escaped.Label = append(escaped.Label, l) continue } @@ -256,10 +256,10 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF func metricNeedsEscaping(m *dto.Metric) bool { for _, l := range m.Label { - if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) { + if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) { return true } - if !IsValidLegacyMetricName(LabelValue(l.GetName())) { + if !IsValidLegacyMetricName(l.GetName()) { return true } } @@ -283,7 +283,7 @@ func EscapeName(name string, scheme EscapingScheme) string { case NoEscaping: return name case UnderscoreEscaping: - if IsValidLegacyMetricName(LabelValue(name)) { + if IsValidLegacyMetricName(name) { return name } for i, b := range name { @@ -309,7 +309,7 @@ func EscapeName(name string, scheme EscapingScheme) string { } return escaped.String() case ValueEncodingEscaping: - if IsValidLegacyMetricName(LabelValue(name)) { + if IsValidLegacyMetricName(name) { return name } escaped.WriteString("U__") @@ -452,6 +452,6 @@ func ToEscapingScheme(s string) (EscapingScheme, error) { case EscapeValues: return ValueEncodingEscaping, nil default: - return NoEscaping, fmt.Errorf("unknown format scheme " + s) + return NoEscaping, fmt.Errorf("unknown format scheme %s", s) } } diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go index 883b8e0608..772d37a48b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go @@ -257,7 +257,11 @@ func (f *ConcurrentLister) GetActiveAndPartialBlockIDs(ctx context.Context, ch c mu.Unlock() continue } - ch <- uid + select { + case <-ctx.Done(): + return ctx.Err() + case ch <- uid: + } } return nil }) diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go index e9fe5eb7dc..ff3975663c 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go @@ -32,7 +32,7 @@ func NewReaderPoolMetrics(reg prometheus.Registerer) *ReaderPoolMetrics { } } -// ReaderPool is used to istantiate new index-header readers and keep track of them. +// ReaderPool is used to instantiate new index-header readers and keep track of them. // When the lazy reader is enabled, the pool keeps track of all instantiated readers // and automatically close them once the idle timeout is reached. A closed lazy reader // will be automatically re-opened upon next usage. @@ -73,7 +73,7 @@ func (s IndexHeaderLazyDownloadStrategy) StrategyToDownloadFunc() LazyDownloadIn } } -// LazyDownloadIndexHeaderFunc is used to determinte whether to download the index header lazily +// LazyDownloadIndexHeaderFunc is used to determine whether to download the index header lazily // or not by checking its block metadata. Usecase can be by time or by index file size. type LazyDownloadIndexHeaderFunc func(meta *metadata.Meta) bool diff --git a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go index 8c10a9a874..a5b0c5b2a4 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go +++ b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go @@ -134,7 +134,7 @@ type MemcachedClientConfig struct { MaxItemSize model.Bytes `yaml:"max_item_size"` // MaxGetMultiBatchSize specifies the maximum number of keys a single underlying - // GetMulti() should run. If more keys are specified, internally keys are splitted + // GetMulti() should run. If more keys are specified, internally keys are split // into multiple batches and fetched concurrently, honoring MaxGetMultiConcurrency parallelism. // If set to 0, the max batch size is unlimited. MaxGetMultiBatchSize int `yaml:"max_get_multi_batch_size"` diff --git a/vendor/github.com/thanos-io/thanos/pkg/clientconfig/http.go b/vendor/github.com/thanos-io/thanos/pkg/clientconfig/http.go index dc94938d75..69f2baf165 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/clientconfig/http.go +++ b/vendor/github.com/thanos-io/thanos/pkg/clientconfig/http.go @@ -199,11 +199,18 @@ func NewRoundTripperFromConfig(cfg config_util.HTTPClientConfig, transportConfig return newRT(tlsConfig) } - return config_util.NewTLSRoundTripper(tlsConfig, config_util.TLSRoundTripperSettings{ - CA: config_util.NewFileSecret(cfg.TLSConfig.CAFile), - Cert: config_util.NewFileSecret(cfg.TLSConfig.CertFile), - Key: config_util.NewFileSecret(cfg.TLSConfig.KeyFile), - }, newRT) + rtConfig := config_util.TLSRoundTripperSettings{ + Cert: config_util.NewFileSecret(cfg.TLSConfig.CAFile), + } + if len(cfg.TLSConfig.CertFile) > 0 { + rtConfig.Cert = config_util.NewFileSecret(cfg.TLSConfig.CertFile) + } + + if len(cfg.TLSConfig.KeyFile) > 0 { + rtConfig.Key = config_util.NewFileSecret(cfg.TLSConfig.KeyFile) + } + + return config_util.NewTLSRoundTripper(tlsConfig, rtConfig, newRT) } // NewHTTPClient returns a new HTTP client. diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go index 522e4c9d4c..7f08297671 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go @@ -894,7 +894,7 @@ func (cg *Group) Compact(ctx context.Context, dir string, planner Planner, comp _, _ = sb.WriteString(",") } } - rerr = fmt.Errorf("paniced while compacting %s: %v", sb.String(), p) + rerr = fmt.Errorf("panicked while compacting %s: %v", sb.String(), p) } }() diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go index 6aa2b23dfe..46d590186e 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go @@ -149,7 +149,7 @@ func Downsample( // Raw and already downsampled data need different processing. if origMeta.Thanos.Downsample.Resolution == 0 { for _, c := range chks { - // TODO(bwplotka): We can optimze this further by using in WriteSeries iterators of each chunk instead of + // TODO(bwplotka): We can optimize this further by using in WriteSeries iterators of each chunk instead of // samples. Also ensure 120 sample limit, otherwise we have gigantic chunks. // https://github.com/thanos-io/thanos/issues/2542. if err := expandChunkIterator(c.Chunk.Iterator(reuseIt), &all); err != nil { diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go b/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go index 6d7d03eea2..394e33185b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/planner.go @@ -68,7 +68,7 @@ func (p *tsdbBasedPlanner) plan(noCompactMarked map[ulid.ULID]*metadata.NoCompac } // No overlapping blocks, do compaction the usual way. - // We do not include a recently producted block with max(minTime), so the block which was just uploaded to bucket. + // We do not include a recently produced block with max(minTime), so the block which was just uploaded to bucket. // This gives users a window of a full block size maintenance if needed. if _, excluded := noCompactMarked[metasByMinTime[len(metasByMinTime)-1].ULID]; !excluded { notExcludedMetasByMinTime = notExcludedMetasByMinTime[:len(notExcludedMetasByMinTime)-1] @@ -200,7 +200,7 @@ func splitByRange(metasByMinTime []*metadata.Meta, tr int64) [][]*metadata.Meta t0 = tr * ((m.MinTime - tr + 1) / tr) } - // Skip blocks that don't fall into the range. This can happen via mis-alignment or + // Skip blocks that don't fall into the range. This can happen via misalignment or // by being the multiple of the intended range. if m.MaxTime > t0+tr { i++ diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/grpc.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/grpc.go new file mode 100644 index 0000000000..4e315596df --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/grpc.go @@ -0,0 +1,97 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package dns + +import ( + "context" + "sync" + "time" + + grpcresolver "google.golang.org/grpc/resolver" +) + +var ( + _ grpcresolver.Builder = &builder{} + _ grpcresolver.Resolver = &resolver{} +) + +type builder struct { + resolveInterval time.Duration + provider *Provider +} + +func RegisterGRPCResolver(provider *Provider, interval time.Duration) { + grpcresolver.Register(&builder{ + resolveInterval: interval, + provider: provider, + }) +} + +func (b *builder) Scheme() string { return "thanos" } + +func (b *builder) Build(t grpcresolver.Target, cc grpcresolver.ClientConn, _ grpcresolver.BuildOptions) (grpcresolver.Resolver, error) { + ctx, cancel := context.WithCancel(context.Background()) + r := &resolver{ + provider: b.provider, + target: t.Endpoint(), + ctx: ctx, + cancel: cancel, + cc: cc, + interval: b.resolveInterval, + } + r.wg.Add(1) + go r.run() + + return r, nil +} + +type resolver struct { + provider *Provider + + target string + ctx context.Context + cancel context.CancelFunc + cc grpcresolver.ClientConn + interval time.Duration + + wg sync.WaitGroup +} + +func (r *resolver) Close() { + r.cancel() + r.wg.Wait() +} + +func (r *resolver) ResolveNow(_ grpcresolver.ResolveNowOptions) {} + +func (r *resolver) resolve() error { + ctx, cancel := context.WithTimeout(r.ctx, r.interval) + defer cancel() + return r.provider.Resolve(ctx, []string{r.target}) +} + +func (r *resolver) addresses() []string { + return r.provider.AddressesForHost(r.target) +} + +func (r *resolver) run() { + defer r.wg.Done() + for { + if err := r.resolve(); err != nil { + r.cc.ReportError(err) + } else { + state := grpcresolver.State{} + for _, addr := range r.addresses() { + raddr := grpcresolver.Address{Addr: addr} + state.Addresses = append(state.Addresses, raddr) + } + _ = r.cc.UpdateState(state) + } + select { + case <-r.ctx.Done(): + return + case <-time.After(r.interval): + } + } +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go index 3ec032a654..8f42bf4d26 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go @@ -164,3 +164,16 @@ func (p *Provider) Addresses() []string { } return result } + +// AddressesForHost returns the latest addresses present for the host in the Provider. +func (p *Provider) AddressesForHost(host string) []string { + p.RLock() + defer p.RUnlock() + + addrs := p.resolved[host] + + res := make([]string, len(addrs)) + copy(res, addrs) + + return res +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go b/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go index 5dde62c5ee..1f96f4c666 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go +++ b/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go @@ -734,7 +734,7 @@ func (c *Client) get2xxResultWithGRPCErrors(ctx context.Context, spanName string // SeriesInGRPC returns the labels from Prometheus series API. It uses gRPC errors. // NOTE: This method is tested in pkg/store/prometheus_test.go against Prometheus. -func (c *Client) SeriesInGRPC(ctx context.Context, base *url.URL, matchers []*labels.Matcher, startTime, endTime int64) ([]map[string]string, error) { +func (c *Client) SeriesInGRPC(ctx context.Context, base *url.URL, matchers []*labels.Matcher, startTime, endTime int64, limit int) ([]map[string]string, error) { u := *base u.Path = path.Join(u.Path, "/api/v1/series") q := u.Query() @@ -742,6 +742,7 @@ func (c *Client) SeriesInGRPC(ctx context.Context, base *url.URL, matchers []*la q.Add("match[]", storepb.PromMatchersToString(matchers...)) q.Add("start", formatTime(timestamp.Time(startTime))) q.Add("end", formatTime(timestamp.Time(endTime))) + q.Add("limit", strconv.Itoa(limit)) u.RawQuery = q.Encode() var m struct { @@ -753,7 +754,7 @@ func (c *Client) SeriesInGRPC(ctx context.Context, base *url.URL, matchers []*la // LabelNamesInGRPC returns all known label names constrained by the given matchers. It uses gRPC errors. // NOTE: This method is tested in pkg/store/prometheus_test.go against Prometheus. -func (c *Client) LabelNamesInGRPC(ctx context.Context, base *url.URL, matchers []*labels.Matcher, startTime, endTime int64) ([]string, error) { +func (c *Client) LabelNamesInGRPC(ctx context.Context, base *url.URL, matchers []*labels.Matcher, startTime, endTime int64, limit int) ([]string, error) { u := *base u.Path = path.Join(u.Path, "/api/v1/labels") q := u.Query() @@ -763,6 +764,7 @@ func (c *Client) LabelNamesInGRPC(ctx context.Context, base *url.URL, matchers [ } q.Add("start", formatTime(timestamp.Time(startTime))) q.Add("end", formatTime(timestamp.Time(endTime))) + q.Add("limit", strconv.Itoa(limit)) u.RawQuery = q.Encode() var m struct { @@ -773,7 +775,7 @@ func (c *Client) LabelNamesInGRPC(ctx context.Context, base *url.URL, matchers [ // LabelValuesInGRPC returns all known label values for a given label name. It uses gRPC errors. // NOTE: This method is tested in pkg/store/prometheus_test.go against Prometheus. -func (c *Client) LabelValuesInGRPC(ctx context.Context, base *url.URL, label string, matchers []*labels.Matcher, startTime, endTime int64) ([]string, error) { +func (c *Client) LabelValuesInGRPC(ctx context.Context, base *url.URL, label string, matchers []*labels.Matcher, startTime, endTime int64, limit int) ([]string, error) { u := *base u.Path = path.Join(u.Path, "/api/v1/label/", label, "/values") q := u.Query() @@ -783,6 +785,7 @@ func (c *Client) LabelValuesInGRPC(ctx context.Context, base *url.URL, label str } q.Add("start", formatTime(timestamp.Time(startTime))) q.Add("end", formatTime(timestamp.Time(endTime))) + q.Add("limit", strconv.Itoa(limit)) u.RawQuery = q.Encode() var m struct { diff --git a/vendor/github.com/thanos-io/thanos/pkg/query/endpointset.go b/vendor/github.com/thanos-io/thanos/pkg/query/endpointset.go index b1faff425b..951db8ecc2 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/query/endpointset.go +++ b/vendor/github.com/thanos-io/thanos/pkg/query/endpointset.go @@ -717,7 +717,7 @@ func (er *endpointRef) updateMetadata(metadata *endpointMetadata, err error) { } // isQueryable returns true if an endpointRef should be used for querying. -// A strict endpointRef is always queriable. A non-strict endpointRef +// A strict endpointRef is always queryable. A non-strict endpointRef // is queryable if the last health check (info call) succeeded. func (er *endpointRef) isQueryable() bool { er.mtx.RLock() @@ -797,11 +797,7 @@ func (er *endpointRef) labelSets() []labels.Labels { labelSet := make([]labels.Labels, 0, len(er.metadata.LabelSets)) for _, ls := range labelpb.ZLabelSetsToPromLabelSets(er.metadata.LabelSets...) { - if len(ls) == 0 { - continue - } - // Compatibility label for Queriers pre 0.8.1. Filter it out now. - if ls[0].Name == store.CompatibilityTypeLabelName { + if ls.Len() == 0 { continue } labelSet = append(labelSet, ls.Copy()) diff --git a/vendor/github.com/thanos-io/thanos/pkg/query/querier.go b/vendor/github.com/thanos-io/thanos/pkg/query/querier.go index 9fddae11a5..e084344ed9 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/query/querier.go +++ b/vendor/github.com/thanos-io/thanos/pkg/query/querier.go @@ -331,6 +331,7 @@ func (q *querier) selectFn(ctx context.Context, hints *storage.SelectHints, ms . req := storepb.SeriesRequest{ MinTime: hints.Start, MaxTime: hints.End, + Limit: int64(hints.Limit), Matchers: sms, MaxResolutionWindow: q.maxResolutionMillis, Aggregates: aggrs, @@ -373,7 +374,7 @@ func (q *querier) selectFn(ctx context.Context, hints *storage.SelectHints, ms . } // LabelValues returns all potential values for a label name. -func (q *querier) LabelValues(ctx context.Context, name string, _ *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (q *querier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { span, ctx := tracing.StartSpan(ctx, "querier_label_values") defer span.Finish() @@ -384,12 +385,18 @@ func (q *querier) LabelValues(ctx context.Context, name string, _ *storage.Label if err != nil { return nil, nil, errors.Wrap(err, "converting prom matchers to storepb matchers") } + + if hints == nil { + hints = &storage.LabelHints{} + } + req := &storepb.LabelValuesRequest{ Label: name, PartialResponseStrategy: q.partialResponseStrategy, Start: q.mint, End: q.maxt, Matchers: pbMatchers, + Limit: int64(hints.Limit), } if q.isDedupEnabled() { @@ -411,7 +418,7 @@ func (q *querier) LabelValues(ctx context.Context, name string, _ *storage.Label // LabelNames returns all the unique label names present in the block in sorted order constrained // by the given matchers. -func (q *querier) LabelNames(ctx context.Context, _ *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (q *querier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { span, ctx := tracing.StartSpan(ctx, "querier_label_names") defer span.Finish() @@ -423,11 +430,16 @@ func (q *querier) LabelNames(ctx context.Context, _ *storage.LabelHints, matcher return nil, nil, errors.Wrap(err, "converting prom matchers to storepb matchers") } + if hints == nil { + hints = &storage.LabelHints{} + } + req := &storepb.LabelNamesRequest{ PartialResponseStrategy: q.partialResponseStrategy, Start: q.mint, End: q.maxt, Matchers: pbMatchers, + Limit: int64(hints.Limit), } if q.isDedupEnabled() { diff --git a/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.pb.go b/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.pb.go index 941014a06a..298ce03063 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.pb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.pb.go @@ -109,6 +109,9 @@ type RulesRequest struct { Type RulesRequest_Type `protobuf:"varint,1,opt,name=type,proto3,enum=thanos.RulesRequest_Type" json:"type,omitempty"` PartialResponseStrategy storepb.PartialResponseStrategy `protobuf:"varint,2,opt,name=partial_response_strategy,json=partialResponseStrategy,proto3,enum=thanos.PartialResponseStrategy" json:"partial_response_strategy,omitempty"` MatcherString []string `protobuf:"bytes,3,rep,name=matcher_string,json=matcherString,proto3" json:"matcher_string,omitempty"` + RuleName []string `protobuf:"bytes,4,rep,name=rule_name,json=ruleName,proto3" json:"rule_name,omitempty"` + RuleGroup []string `protobuf:"bytes,5,rep,name=rule_group,json=ruleGroup,proto3" json:"rule_group,omitempty"` + File []string `protobuf:"bytes,6,rep,name=file,proto3" json:"file,omitempty"` } func (m *RulesRequest) Reset() { *m = RulesRequest{} } @@ -554,74 +557,76 @@ func init() { func init() { proto.RegisterFile("rules/rulespb/rpc.proto", fileDescriptor_91b1d28f30eb5efb) } var fileDescriptor_91b1d28f30eb5efb = []byte{ - // 1058 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x4e, 0x23, 0xc7, - 0x13, 0xf7, 0x60, 0xcf, 0xd8, 0x53, 0xc6, 0x2c, 0xdb, 0x0b, 0x62, 0x60, 0xff, 0xf2, 0x20, 0x4b, - 0xfc, 0x45, 0xa2, 0xac, 0x1d, 0x81, 0x76, 0xa3, 0x3d, 0x45, 0x98, 0x8f, 0x05, 0x09, 0x91, 0x55, - 0x1b, 0xe5, 0xb0, 0x39, 0x38, 0x8d, 0x69, 0xcc, 0x28, 0xe3, 0x99, 0xd9, 0xee, 0x36, 0x11, 0x6f, - 0xb1, 0xe7, 0xbc, 0x48, 0x5e, 0x81, 0x5b, 0xf6, 0x98, 0x93, 0x93, 0xc0, 0x29, 0x3e, 0xe4, 0x19, - 0xa2, 0xae, 0x9e, 0xb1, 0x0d, 0x81, 0xb0, 0x9b, 0x90, 0xcb, 0x54, 0x77, 0xd5, 0xaf, 0x7a, 0xea, - 0xe3, 0xd7, 0x35, 0x03, 0x0b, 0xa2, 0x1f, 0x72, 0xd9, 0xc0, 0x67, 0x72, 0xd4, 0x10, 0x49, 0xa7, - 0x9e, 0x88, 0x58, 0xc5, 0xc4, 0x51, 0xa7, 0x2c, 0x8a, 0xe5, 0xd2, 0xa2, 0x54, 0xb1, 0xe0, 0x0d, - 0x7c, 0x26, 0x47, 0x0d, 0x75, 0x9e, 0x70, 0x69, 0x20, 0x99, 0x29, 0x64, 0x47, 0x3c, 0xbc, 0x61, - 0x9a, 0xeb, 0xc6, 0xdd, 0x18, 0x97, 0x0d, 0xbd, 0x4a, 0xb5, 0x7e, 0x37, 0x8e, 0xbb, 0x21, 0x6f, - 0xe0, 0xee, 0xa8, 0x7f, 0xd2, 0x50, 0x41, 0x8f, 0x4b, 0xc5, 0x7a, 0x89, 0x01, 0xd4, 0x7e, 0xb7, - 0x60, 0x9a, 0xea, 0x50, 0x28, 0x7f, 0xdb, 0xe7, 0x52, 0x91, 0x67, 0x50, 0xd0, 0xc7, 0x7a, 0xd6, - 0xb2, 0xb5, 0x3a, 0xb3, 0xb6, 0x58, 0x37, 0x41, 0xd5, 0x27, 0x31, 0xf5, 0xc3, 0xf3, 0x84, 0x53, - 0x84, 0x91, 0x6f, 0x60, 0x31, 0x61, 0x42, 0x05, 0x2c, 0x6c, 0x0b, 0x2e, 0x93, 0x38, 0x92, 0xbc, - 0x2d, 0x95, 0x60, 0x8a, 0x77, 0xcf, 0xbd, 0x29, 0x3c, 0xc3, 0xcf, 0xce, 0x78, 0x6d, 0x80, 0x34, - 0xc5, 0xb5, 0x52, 0x18, 0x5d, 0x48, 0x6e, 0x37, 0x90, 0x15, 0x98, 0xe9, 0x31, 0xd5, 0x39, 0xe5, - 0x42, 0x9f, 0x19, 0x44, 0x5d, 0x2f, 0xbf, 0x9c, 0x5f, 0x75, 0x69, 0x25, 0xd5, 0xb6, 0x50, 0x59, - 0xfb, 0x3f, 0x14, 0x74, 0x44, 0xa4, 0x08, 0xf9, 0x8d, 0xfd, 0xfd, 0xd9, 0x1c, 0x71, 0xc1, 0xde, - 0xd8, 0xdf, 0xa6, 0x87, 0xb3, 0x16, 0x01, 0x70, 0xe8, 0xf6, 0xe6, 0x57, 0x74, 0x6b, 0x76, 0xaa, - 0xf6, 0x2d, 0x54, 0xd2, 0x34, 0xcc, 0x7b, 0xc8, 0x27, 0x60, 0x77, 0x45, 0xdc, 0x4f, 0x30, 0xd9, - 0xf2, 0xda, 0xe3, 0xc9, 0x64, 0x5f, 0x69, 0xc3, 0x6e, 0x8e, 0x1a, 0x04, 0x59, 0x82, 0xe2, 0xf7, - 0x4c, 0x44, 0x3a, 0x06, 0x9d, 0x95, 0xbb, 0x9b, 0xa3, 0x99, 0xa2, 0x59, 0x02, 0x47, 0x70, 0xd9, - 0x0f, 0x55, 0x6d, 0x13, 0x60, 0xe4, 0x2b, 0xc9, 0x73, 0x70, 0xd0, 0x59, 0x7a, 0xd6, 0x72, 0xfe, - 0xd6, 0xf3, 0x9b, 0x30, 0x1c, 0xf8, 0x29, 0x88, 0xa6, 0xb2, 0xf6, 0x47, 0x1e, 0xdc, 0x11, 0x82, - 0xfc, 0x0f, 0x0a, 0x11, 0xeb, 0x99, 0x7e, 0xb8, 0xcd, 0xd2, 0x70, 0xe0, 0xe3, 0x9e, 0xe2, 0x53, - 0x5b, 0x4f, 0x82, 0x90, 0x9b, 0x98, 0x8c, 0x55, 0xef, 0x29, 0x3e, 0xc9, 0x33, 0xb0, 0x91, 0x66, - 0x58, 0xb6, 0xf2, 0xda, 0xf4, 0xe4, 0xfb, 0x9b, 0xee, 0x70, 0xe0, 0x1b, 0x33, 0x35, 0x82, 0xac, - 0x42, 0x29, 0x88, 0x14, 0x17, 0x67, 0x2c, 0xf4, 0x0a, 0xcb, 0xd6, 0xaa, 0xd5, 0x9c, 0x1e, 0x0e, - 0xfc, 0x91, 0x8e, 0x8e, 0x56, 0x84, 0xc2, 0x53, 0x7e, 0xc6, 0xc2, 0x3e, 0x53, 0x41, 0x1c, 0xb5, - 0x8f, 0xfb, 0xc2, 0x2c, 0x24, 0xef, 0xc4, 0xd1, 0xb1, 0xf4, 0x6c, 0x74, 0x26, 0xc3, 0x81, 0x3f, - 0x33, 0x86, 0x1d, 0x06, 0x3d, 0x4e, 0x17, 0xc7, 0xfb, 0xad, 0xd4, 0xab, 0x65, 0x9c, 0x48, 0x1b, - 0x1e, 0x85, 0x4c, 0xaa, 0xf6, 0x18, 0xe1, 0x39, 0xd8, 0x96, 0xa5, 0xba, 0x21, 0x71, 0x3d, 0x23, - 0x71, 0xfd, 0x30, 0x23, 0x71, 0x73, 0xe9, 0x62, 0xe0, 0xe7, 0xf4, 0x7b, 0xb4, 0xeb, 0xf6, 0xc8, - 0xf3, 0xdd, 0x2f, 0xbe, 0x45, 0x6f, 0xe8, 0x88, 0x0f, 0x76, 0x18, 0xf4, 0x02, 0xe5, 0xb9, 0xcb, - 0xd6, 0x6a, 0xde, 0xe4, 0x8f, 0x0a, 0x6a, 0x04, 0x39, 0x83, 0x85, 0x3b, 0x28, 0xea, 0x95, 0x3e, - 0x88, 0xc9, 0xcd, 0xa7, 0xc3, 0x81, 0x7f, 0x17, 0x9b, 0xe9, 0x5d, 0x87, 0xd7, 0x22, 0x28, 0xe8, - 0x8e, 0x90, 0xe7, 0xe0, 0x0a, 0xde, 0x89, 0xc5, 0xb1, 0x66, 0x99, 0xa1, 0xe4, 0xfc, 0xa8, 0x65, - 0x99, 0x41, 0x23, 0x77, 0x73, 0x74, 0x8c, 0x24, 0x2b, 0x60, 0xb3, 0x90, 0x0b, 0x85, 0x24, 0x28, - 0xaf, 0x55, 0x32, 0x97, 0x0d, 0xad, 0xd4, 0x0c, 0x46, 0xeb, 0x04, 0x4b, 0x7f, 0xcc, 0x43, 0x05, - 0x8d, 0x7b, 0x91, 0x54, 0x2c, 0xea, 0x70, 0xf2, 0x12, 0x1c, 0x9c, 0x29, 0xf2, 0xe6, 0x4d, 0x78, - 0xb3, 0xaf, 0xd5, 0x2d, 0xae, 0x9a, 0x33, 0x69, 0xa5, 0x53, 0x20, 0x4d, 0x25, 0xd9, 0x85, 0x32, - 0x8b, 0xa2, 0x58, 0x61, 0x8d, 0x65, 0x1a, 0xc3, 0x2d, 0xfe, 0x4f, 0x52, 0xff, 0x49, 0x34, 0x9d, - 0xdc, 0x90, 0x75, 0xb0, 0xa5, 0x62, 0x8a, 0x7b, 0x79, 0x2c, 0x36, 0xb9, 0x96, 0x47, 0x4b, 0x5b, - 0x4c, 0xcf, 0x10, 0x44, 0x8d, 0x20, 0x2d, 0x70, 0x59, 0x47, 0x05, 0x67, 0xbc, 0xcd, 0x14, 0x92, - 0xf6, 0x1e, 0xbe, 0x0c, 0x07, 0x3e, 0x31, 0x0e, 0x1b, 0xea, 0xb3, 0xb8, 0x17, 0x28, 0xde, 0x4b, - 0xd4, 0x39, 0xf2, 0xa5, 0x94, 0xe9, 0x35, 0x53, 0x34, 0x6d, 0x38, 0x12, 0xd9, 0x35, 0x6f, 0x45, - 0x05, 0x35, 0xe2, 0xef, 0x98, 0xe2, 0xfc, 0x97, 0x4c, 0xf9, 0xc9, 0x06, 0x1b, 0xcb, 0x31, 0x2e, - 0x96, 0xf5, 0x11, 0xc5, 0xca, 0x66, 0xc9, 0xd4, 0xad, 0xb3, 0xc4, 0x07, 0xfb, 0x6d, 0x9f, 0x8b, - 0x73, 0xac, 0x7f, 0x9a, 0x35, 0x2a, 0xa8, 0x11, 0xe4, 0x0b, 0x98, 0xfd, 0xcb, 0x55, 0x9f, 0x98, - 0x13, 0x99, 0x8d, 0x3e, 0x3a, 0xbe, 0x71, 0xb5, 0xc7, 0xf4, 0xb2, 0xff, 0x25, 0xbd, 0x9c, 0x7f, - 0x4e, 0xaf, 0x97, 0xe0, 0xe0, 0x45, 0x90, 0x5e, 0x11, 0xa7, 0xe1, 0xfc, 0xb5, 0x92, 0x65, 0x57, - 0xc1, 0x4c, 0x64, 0x03, 0xa4, 0xa9, 0x24, 0x35, 0x70, 0x4e, 0x39, 0x0b, 0xd5, 0x29, 0xce, 0x01, - 0xd7, 0x60, 0x8c, 0x86, 0xa6, 0x92, 0xbc, 0x00, 0x30, 0xe3, 0x4b, 0x88, 0x58, 0xe0, 0x88, 0x71, - 0x9b, 0x0b, 0xc3, 0x81, 0xff, 0x04, 0xa7, 0x90, 0x56, 0x8e, 0xe9, 0x46, 0xdd, 0x91, 0xf2, 0xbe, - 0x51, 0x0a, 0x0f, 0x34, 0x4a, 0xcb, 0x0f, 0x3a, 0x4a, 0x77, 0x61, 0xe1, 0x3b, 0xce, 0x93, 0xf6, - 0x49, 0xa0, 0x3f, 0xc0, 0xed, 0x93, 0x58, 0x8c, 0x02, 0x9e, 0xc6, 0x80, 0x1f, 0x0f, 0x07, 0x7e, - 0x45, 0x43, 0x76, 0x10, 0xb1, 0x13, 0x0b, 0x3a, 0x77, 0x6d, 0x9b, 0x86, 0x5a, 0xfb, 0x21, 0x0f, - 0x95, 0x6b, 0xb3, 0xed, 0x9e, 0x0f, 0xde, 0x88, 0xa4, 0x53, 0x77, 0x90, 0x74, 0xcc, 0xb5, 0xfc, - 0xc7, 0x72, 0x6d, 0xdc, 0xe6, 0xc2, 0x07, 0xb6, 0xd9, 0x7e, 0xa8, 0x36, 0x3b, 0x0f, 0xd4, 0xe6, - 0xe2, 0x43, 0xb6, 0xf9, 0xd3, 0x75, 0x80, 0xf1, 0x3c, 0x21, 0xd3, 0x50, 0xda, 0x3b, 0xd8, 0xd8, - 0x3c, 0xdc, 0xfb, 0x7a, 0x7b, 0x36, 0x47, 0xca, 0x50, 0x7c, 0xbd, 0x7d, 0xb0, 0xb5, 0x77, 0xf0, - 0xca, 0xfc, 0x65, 0xed, 0xec, 0x51, 0xbd, 0x9e, 0x5a, 0xfb, 0x12, 0x6c, 0xfc, 0xcb, 0x22, 0x2f, - 0xb2, 0xc5, 0xdc, 0x6d, 0x3f, 0x91, 0x4b, 0xf3, 0x37, 0xb4, 0x66, 0xd4, 0x7d, 0x6e, 0x35, 0x57, - 0x2e, 0x7e, 0xab, 0xe6, 0x2e, 0x2e, 0xab, 0xd6, 0xfb, 0xcb, 0xaa, 0xf5, 0xeb, 0x65, 0xd5, 0x7a, - 0x77, 0x55, 0xcd, 0xbd, 0xbf, 0xaa, 0xe6, 0x7e, 0xbe, 0xaa, 0xe6, 0xde, 0x14, 0xd3, 0x1f, 0xe7, - 0x23, 0x07, 0x93, 0x5b, 0xff, 0x33, 0x00, 0x00, 0xff, 0xff, 0xeb, 0xed, 0x6d, 0x27, 0x50, 0x0b, - 0x00, 0x00, + // 1096 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x4e, 0x23, 0x47, + 0x10, 0xf6, 0xd8, 0x9e, 0xb1, 0xa7, 0x8c, 0x59, 0xb6, 0x17, 0xc4, 0x00, 0x89, 0x07, 0x59, 0x22, + 0x22, 0x51, 0xd6, 0x8e, 0x40, 0xbb, 0xd1, 0x9e, 0x22, 0xcc, 0xcf, 0x82, 0x84, 0xc8, 0xaa, 0x8d, + 0x72, 0xd8, 0x1c, 0x9c, 0xc6, 0x34, 0x66, 0x94, 0xf1, 0xcc, 0x6c, 0x4f, 0x9b, 0x88, 0xb7, 0xd8, + 0x73, 0x5e, 0x24, 0xca, 0x1b, 0x70, 0xcb, 0x1e, 0x73, 0x72, 0x12, 0xb8, 0xf9, 0x90, 0x67, 0x88, + 0xba, 0x7a, 0xc6, 0x63, 0x08, 0x84, 0xdd, 0x84, 0x5c, 0xdc, 0xdd, 0x5f, 0x7d, 0xd5, 0x3f, 0x55, + 0x5f, 0x95, 0x07, 0xe6, 0xc5, 0xc0, 0xe7, 0x71, 0x13, 0x7f, 0xa3, 0xa3, 0xa6, 0x88, 0xba, 0x8d, + 0x48, 0x84, 0x32, 0x24, 0x96, 0x3c, 0x65, 0x41, 0x18, 0x2f, 0x2e, 0xc4, 0x32, 0x14, 0xbc, 0x89, + 0xbf, 0xd1, 0x51, 0x53, 0x9e, 0x47, 0x3c, 0xd6, 0x94, 0xd4, 0xe4, 0xb3, 0x23, 0xee, 0xdf, 0x30, + 0xcd, 0xf6, 0xc2, 0x5e, 0x88, 0xd3, 0xa6, 0x9a, 0x25, 0xa8, 0xdb, 0x0b, 0xc3, 0x9e, 0xcf, 0x9b, + 0xb8, 0x3a, 0x1a, 0x9c, 0x34, 0xa5, 0xd7, 0xe7, 0xb1, 0x64, 0xfd, 0x48, 0x13, 0xea, 0x3f, 0xe7, + 0x61, 0x8a, 0xaa, 0xab, 0x50, 0xfe, 0x66, 0xc0, 0x63, 0x49, 0x9e, 0x42, 0x51, 0x6d, 0xeb, 0x18, + 0xcb, 0xc6, 0xea, 0xf4, 0xda, 0x42, 0x43, 0x5f, 0xaa, 0x31, 0xc9, 0x69, 0x1c, 0x9e, 0x47, 0x9c, + 0x22, 0x8d, 0x7c, 0x0b, 0x0b, 0x11, 0x13, 0xd2, 0x63, 0x7e, 0x47, 0xf0, 0x38, 0x0a, 0x83, 0x98, + 0x77, 0x62, 0x29, 0x98, 0xe4, 0xbd, 0x73, 0x27, 0x8f, 0x7b, 0xb8, 0xe9, 0x1e, 0xaf, 0x34, 0x91, + 0x26, 0xbc, 0x76, 0x42, 0xa3, 0xf3, 0xd1, 0xed, 0x06, 0xb2, 0x02, 0xd3, 0x7d, 0x26, 0xbb, 0xa7, + 0x5c, 0xa8, 0x3d, 0xbd, 0xa0, 0xe7, 0x14, 0x96, 0x0b, 0xab, 0x36, 0xad, 0x26, 0x68, 0x1b, 0x41, + 0xb2, 0x04, 0xb6, 0x8a, 0x66, 0x27, 0x60, 0x7d, 0xee, 0x14, 0x91, 0x51, 0x56, 0xc0, 0x01, 0xeb, + 0x73, 0xf2, 0x31, 0x00, 0x1a, 0x7b, 0x22, 0x1c, 0x44, 0x8e, 0x89, 0x56, 0xa4, 0xbf, 0x54, 0x00, + 0x21, 0x50, 0x3c, 0xf1, 0x7c, 0xee, 0x58, 0x68, 0xc0, 0x79, 0xfd, 0x13, 0x28, 0xaa, 0x17, 0x92, + 0x12, 0x14, 0x36, 0xf6, 0xf7, 0x67, 0x72, 0xc4, 0x06, 0x73, 0x63, 0x7f, 0x9b, 0x1e, 0xce, 0x18, + 0x04, 0xc0, 0xa2, 0xdb, 0x9b, 0x5f, 0xd3, 0xad, 0x99, 0x7c, 0xfd, 0x3b, 0xa8, 0x26, 0x61, 0xd1, + 0xf7, 0x26, 0x9f, 0x82, 0xa9, 0x8f, 0x51, 0xc1, 0xab, 0xac, 0x3d, 0x9e, 0x0c, 0x1e, 0x1e, 0xb7, + 0x9b, 0xa3, 0x9a, 0x41, 0x16, 0xa1, 0xf4, 0x03, 0x13, 0x81, 0x7a, 0x93, 0x8a, 0x92, 0xbd, 0x9b, + 0xa3, 0x29, 0xd0, 0x2a, 0x83, 0x25, 0x78, 0x3c, 0xf0, 0x65, 0x7d, 0x13, 0x60, 0xec, 0x1b, 0x93, + 0x67, 0x60, 0xa1, 0x73, 0xec, 0x18, 0xcb, 0x85, 0x5b, 0xf7, 0x6f, 0xc1, 0x68, 0xe8, 0x26, 0x24, + 0x9a, 0x8c, 0xf5, 0x3f, 0x0b, 0x60, 0x8f, 0x19, 0xe4, 0x23, 0x28, 0x62, 0x9c, 0xd4, 0x15, 0xed, + 0x56, 0x79, 0x34, 0x74, 0x71, 0x4d, 0xf1, 0x57, 0x59, 0x31, 0x1c, 0xf9, 0xcc, 0xaa, 0xd6, 0x3a, + 0x30, 0xe4, 0x29, 0x98, 0x28, 0x5b, 0x4c, 0x43, 0x65, 0x6d, 0x6a, 0xf2, 0xfc, 0x96, 0x3d, 0x1a, + 0xba, 0xda, 0x4c, 0xf5, 0x40, 0x56, 0xa1, 0xec, 0x05, 0x92, 0x8b, 0x33, 0xe6, 0x3b, 0xc5, 0x65, + 0x63, 0xd5, 0x68, 0x4d, 0x8d, 0x86, 0xee, 0x18, 0xa3, 0xe3, 0x19, 0xa1, 0xb0, 0xc4, 0xcf, 0x98, + 0x3f, 0x60, 0xd2, 0x0b, 0x83, 0xce, 0xf1, 0x40, 0xe8, 0x49, 0xcc, 0xbb, 0x61, 0x70, 0x1c, 0x3b, + 0x26, 0x3a, 0x93, 0xd1, 0xd0, 0x9d, 0xce, 0x68, 0x87, 0x5e, 0x9f, 0xd3, 0x85, 0x6c, 0xbd, 0x95, + 0x78, 0xb5, 0xb5, 0x13, 0xe9, 0xc0, 0x23, 0x9f, 0xc5, 0xb2, 0x93, 0x31, 0x1c, 0x0b, 0xd3, 0xb2, + 0xd8, 0xd0, 0x45, 0xd1, 0x48, 0x8b, 0xa2, 0x71, 0x98, 0x16, 0x45, 0x6b, 0xf1, 0x62, 0xe8, 0xe6, + 0xd4, 0x39, 0xca, 0x75, 0x7b, 0xec, 0xf9, 0xf6, 0x37, 0xd7, 0xa0, 0x37, 0x30, 0xe2, 0x82, 0xe9, + 0x7b, 0x7d, 0x4f, 0x3a, 0xf6, 0xb2, 0xb1, 0x5a, 0xd0, 0xef, 0x47, 0x80, 0xea, 0x81, 0x9c, 0xc1, + 0xfc, 0x1d, 0x92, 0x77, 0xca, 0xef, 0x55, 0x19, 0xad, 0xa5, 0xd1, 0xd0, 0xbd, 0xab, 0x3a, 0xe8, + 0x5d, 0x9b, 0xd7, 0x03, 0x28, 0xaa, 0x8c, 0x90, 0x67, 0x60, 0x0b, 0xde, 0x0d, 0xc5, 0xb1, 0x52, + 0x99, 0x96, 0xe4, 0xdc, 0x38, 0x65, 0xa9, 0x41, 0x31, 0x77, 0x73, 0x34, 0x63, 0x92, 0x15, 0x30, + 0x99, 0xcf, 0x85, 0x44, 0x11, 0x54, 0xd6, 0xaa, 0xa9, 0xcb, 0x86, 0x02, 0x95, 0x82, 0xd1, 0x3a, + 0xa1, 0xd2, 0x9f, 0x0a, 0x50, 0x45, 0xe3, 0x5e, 0x10, 0x4b, 0x16, 0x74, 0x39, 0x79, 0x01, 0x16, + 0xf6, 0xa8, 0xf8, 0x66, 0x25, 0xbc, 0xde, 0x57, 0x70, 0x9b, 0xcb, 0xd6, 0x74, 0x12, 0xe9, 0x84, + 0x48, 0x93, 0x91, 0xec, 0x42, 0x85, 0x05, 0x41, 0x28, 0x31, 0xc6, 0x71, 0x72, 0x87, 0x5b, 0xfc, + 0x9f, 0x24, 0xfe, 0x93, 0x6c, 0x3a, 0xb9, 0x20, 0xeb, 0x60, 0xc6, 0x92, 0x49, 0xee, 0x14, 0x30, + 0xd8, 0xe4, 0xda, 0x3b, 0xda, 0xca, 0xa2, 0x73, 0x86, 0x24, 0xaa, 0x07, 0xd2, 0x06, 0x9b, 0x75, + 0xa5, 0x77, 0xc6, 0x3b, 0x4c, 0xa2, 0x68, 0xef, 0xd1, 0xcb, 0x68, 0xe8, 0x12, 0xed, 0xb0, 0x21, + 0x3f, 0x0f, 0xfb, 0x9e, 0xe4, 0xfd, 0x48, 0x9e, 0xa3, 0x5e, 0xca, 0x29, 0xae, 0x94, 0xa2, 0x64, + 0xc3, 0x51, 0xc8, 0xb6, 0x3e, 0x15, 0x01, 0xaa, 0x87, 0x7f, 0x52, 0x8a, 0xf5, 0x7f, 0x2a, 0xe5, + 0x17, 0x13, 0x4c, 0x0c, 0x47, 0x16, 0x2c, 0xe3, 0x03, 0x82, 0x95, 0xf6, 0x92, 0xfc, 0xad, 0xbd, + 0xc4, 0x05, 0xf3, 0xcd, 0x80, 0x8b, 0x73, 0x8c, 0x7f, 0xf2, 0x6a, 0x04, 0xa8, 0x1e, 0xc8, 0x97, + 0x30, 0xf3, 0xb7, 0x52, 0x9f, 0xe8, 0x13, 0xa9, 0x8d, 0x3e, 0x3a, 0xbe, 0x51, 0xda, 0x99, 0xbc, + 0xcc, 0xff, 0x28, 0x2f, 0xeb, 0xdf, 0xcb, 0xeb, 0x05, 0x58, 0x58, 0x08, 0xb1, 0x53, 0xc2, 0x6e, + 0x38, 0x77, 0x2d, 0x64, 0x69, 0x29, 0xe8, 0x8e, 0xac, 0x89, 0x34, 0x19, 0x49, 0x1d, 0xac, 0x53, + 0xce, 0x7c, 0x79, 0x8a, 0x7d, 0xc0, 0xd6, 0x1c, 0x8d, 0xd0, 0x64, 0x24, 0xcf, 0x01, 0x74, 0xfb, + 0x12, 0x22, 0x14, 0xd8, 0x62, 0xec, 0xd6, 0xfc, 0x68, 0xe8, 0x3e, 0xc1, 0x2e, 0xa4, 0xc0, 0x4c, + 0x6e, 0xd4, 0x1e, 0x83, 0xf7, 0xb5, 0x52, 0x78, 0xa0, 0x56, 0x5a, 0x79, 0xd0, 0x56, 0xba, 0x0b, + 0xf3, 0xdf, 0x73, 0x1e, 0x75, 0x4e, 0x3c, 0xf5, 0x87, 0xde, 0x39, 0x09, 0xc5, 0xf8, 0xc2, 0x53, + 0x78, 0xe1, 0xc7, 0xa3, 0xa1, 0x5b, 0x55, 0x94, 0x1d, 0x64, 0xec, 0x84, 0x82, 0xce, 0x5e, 0x5b, + 0x26, 0x57, 0xad, 0xff, 0x58, 0x80, 0xea, 0xb5, 0xde, 0x76, 0xcf, 0x1f, 0xde, 0x58, 0xa4, 0xf9, + 0x3b, 0x44, 0x9a, 0x69, 0xad, 0xf0, 0xa1, 0x5a, 0xcb, 0xd2, 0x5c, 0x7c, 0xcf, 0x34, 0x9b, 0x0f, + 0x95, 0x66, 0xeb, 0x81, 0xd2, 0x5c, 0x7a, 0xc8, 0x34, 0x7f, 0xb6, 0x0e, 0x90, 0xf5, 0x13, 0x32, + 0x05, 0xe5, 0xbd, 0x83, 0x8d, 0xcd, 0xc3, 0xbd, 0x6f, 0xb6, 0x67, 0x72, 0xa4, 0x02, 0xa5, 0x57, + 0xdb, 0x07, 0x5b, 0x7b, 0x07, 0x2f, 0xf5, 0x57, 0xd6, 0xce, 0x1e, 0x55, 0xf3, 0xfc, 0xda, 0x57, + 0x60, 0xe2, 0x57, 0x16, 0x79, 0x9e, 0x4e, 0x66, 0x6f, 0xfb, 0x28, 0x5d, 0x9c, 0xbb, 0x81, 0xea, + 0x56, 0xf7, 0x85, 0xd1, 0x5a, 0xb9, 0xf8, 0xa3, 0x96, 0xbb, 0xb8, 0xac, 0x19, 0xef, 0x2e, 0x6b, + 0xc6, 0xef, 0x97, 0x35, 0xe3, 0xed, 0x55, 0x2d, 0xf7, 0xee, 0xaa, 0x96, 0xfb, 0xf5, 0xaa, 0x96, + 0x7b, 0x5d, 0x4a, 0x3e, 0xc4, 0x8f, 0x2c, 0x7c, 0xdc, 0xfa, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, + 0xa0, 0xc1, 0xf7, 0x10, 0xa0, 0x0b, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -755,6 +760,33 @@ func (m *RulesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.File) > 0 { + for iNdEx := len(m.File) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.File[iNdEx]) + copy(dAtA[i:], m.File[iNdEx]) + i = encodeVarintRpc(dAtA, i, uint64(len(m.File[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.RuleGroup) > 0 { + for iNdEx := len(m.RuleGroup) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RuleGroup[iNdEx]) + copy(dAtA[i:], m.RuleGroup[iNdEx]) + i = encodeVarintRpc(dAtA, i, uint64(len(m.RuleGroup[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.RuleName) > 0 { + for iNdEx := len(m.RuleName) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RuleName[iNdEx]) + copy(dAtA[i:], m.RuleName[iNdEx]) + i = encodeVarintRpc(dAtA, i, uint64(len(m.RuleName[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } if len(m.MatcherString) > 0 { for iNdEx := len(m.MatcherString) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.MatcherString[iNdEx]) @@ -1326,6 +1358,24 @@ func (m *RulesRequest) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } + if len(m.RuleName) > 0 { + for _, s := range m.RuleName { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.RuleGroup) > 0 { + for _, s := range m.RuleGroup { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.File) > 0 { + for _, s := range m.File { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } return n } @@ -1664,6 +1714,102 @@ func (m *RulesRequest) Unmarshal(dAtA []byte) error { } m.MatcherString = append(m.MatcherString, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuleName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuleName = append(m.RuleName, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuleGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuleGroup = append(m.RuleGroup, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.File = append(m.File, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) diff --git a/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.proto b/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.proto index 25d809ede9..f5fc8a038b 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.proto +++ b/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/rpc.proto @@ -41,6 +41,9 @@ message RulesRequest { Type type = 1; PartialResponseStrategy partial_response_strategy = 2; repeated string matcher_string = 3; + repeated string rule_name = 4; + repeated string rule_group = 5; + repeated string file = 6; } message RulesResponse { diff --git a/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go b/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go index 809dfce36b..9aaeeca615 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go +++ b/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go @@ -3,7 +3,7 @@ // Package runutil provides helpers to advanced function scheduling control like repeat or retry. // -// It's very often the case when you need to excutes some code every fixed intervals or have it retried automatically. +// It's very often the case when you need to executes some code every fixed intervals or have it retried automatically. // To make it reliably with proper timeout, you need to carefully arrange some boilerplate for this. // Below function does it for you. // diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go index 75a85dd9fb..32a4323b62 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go @@ -117,6 +117,9 @@ const ( // SeriesBatchSize is the default batch size when fetching series from object storage. SeriesBatchSize = 10000 + + // checkContextEveryNIterations is used in some tight loops to check if the context is done. + checkContextEveryNIterations = 128 ) var ( @@ -1005,6 +1008,7 @@ type blockSeriesClient struct { mint int64 maxt int64 + seriesLimit int indexr *bucketIndexReader chunkr *bucketChunkReader loadAggregates []storepb.Aggr @@ -1080,6 +1084,7 @@ func newBlockSeriesClient( mint: req.MinTime, maxt: req.MaxTime, + seriesLimit: int(req.Limit), indexr: b.indexReader(logger), chunkr: chunkr, seriesLimiter: seriesLimiter, @@ -1159,14 +1164,20 @@ func (b *blockSeriesClient) ExpandPostings( b.expandedPostings = make([]storage.SeriesRef, 0, len(b.lazyPostings.postings)/2) b.lazyExpandedPostingsCount.Inc() } else { + // If seriesLimit is set, it can be applied here to limit the amount of series. + // Note: This can only be done when postings are not expanded lazily. + if b.seriesLimit > 0 && len(b.lazyPostings.postings) > b.seriesLimit { + b.lazyPostings.postings = b.lazyPostings.postings[:b.seriesLimit] + } + // Apply series limiter eargerly if lazy postings not enabled. - if err := seriesLimiter.Reserve(uint64(len(ps.postings))); err != nil { + if err := seriesLimiter.Reserve(uint64(len(b.lazyPostings.postings))); err != nil { return httpgrpc.Errorf(int(codes.ResourceExhausted), "exceeded series limit: %s", err) } } - if b.batchSize > len(ps.postings) { - b.batchSize = len(ps.postings) + if b.batchSize > len(b.lazyPostings.postings) { + b.batchSize = len(b.lazyPostings.postings) } b.entries = make([]seriesEntry, 0, b.batchSize) @@ -1288,6 +1299,11 @@ OUTER: } seriesMatched++ + if b.seriesLimit > 0 && seriesMatched > b.seriesLimit { + // Exit early if seriesLimit is set. + b.hasMorePostings = false + break + } s := seriesEntry{lset: completeLabelset} if b.skipChunks { b.entries = append(b.entries, s) @@ -1691,7 +1707,12 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, seriesSrv storepb.Store tracing.DoInSpan(ctx, "bucket_store_merge_all", func(ctx context.Context) { begin := time.Now() set := NewResponseDeduplicator(NewProxyResponseLoserTree(respSets...)) + i := 0 for set.Next() { + i++ + if req.Limit > 0 && i > int(req.Limit) { + break + } at := set.At() warn := at.GetWarning() if warn != "" { @@ -1942,8 +1963,13 @@ func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesReq return nil, status.Error(codes.Unknown, errors.Wrap(err, "marshal label names response hints").Error()) } + names := strutil.MergeSlices(sets...) + if req.Limit > 0 && len(names) > int(req.Limit) { + names = names[:req.Limit] + } + return &storepb.LabelNamesResponse{ - Names: strutil.MergeSlices(sets...), + Names: names, Hints: anyHints, }, nil } @@ -1957,7 +1983,7 @@ func (b *bucketBlock) FilterExtLabelsMatchers(matchers []*labels.Matcher) ([]*la // If value is empty string the matcher is a valid one since it's not part of external labels. if v == "" { result = append(result, m) - } else if v != "" && v != m.Value { + } else if v != "" && !m.Matches(v) { // If matcher is external label but value is different we don't want to look in block anyway. return []*labels.Matcher{}, false } @@ -2157,8 +2183,13 @@ func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesR return nil, status.Error(codes.Unknown, errors.Wrap(err, "marshal label values response hints").Error()) } + vals := strutil.MergeSlices(sets...) + if req.Limit > 0 && len(vals) > int(req.Limit) { + vals = vals[:req.Limit] + } + return &storepb.LabelValuesResponse{ - Values: strutil.MergeSlices(sets...), + Values: vals, Hints: anyHints, }, nil } @@ -2605,10 +2636,15 @@ func (r *bucketIndexReader) ExpandedPostings(ctx context.Context, ms sortedMatch } // ExpandPostingsWithContext returns the postings expanded as a slice and considers context. -func ExpandPostingsWithContext(ctx context.Context, p index.Postings) (res []storage.SeriesRef, err error) { +func ExpandPostingsWithContext(ctx context.Context, p index.Postings) ([]storage.SeriesRef, error) { + res := make([]storage.SeriesRef, 0, 1024) // Pre-allocate slice with initial capacity + i := 0 for p.Next() { - if ctx.Err() != nil { - return nil, ctx.Err() + i++ + if i%checkContextEveryNIterations == 0 { + if err := ctx.Err(); err != nil { + return nil, err + } } res = append(res, p.At()) } @@ -2831,8 +2867,8 @@ func toPostingGroup(ctx context.Context, lvalsFn func(name string) ([]string, er return nil, nil, err } - for _, val := range vals { - if ctx.Err() != nil { + for i, val := range vals { + if (i+1)%checkContextEveryNIterations == 0 && ctx.Err() != nil { return nil, nil, ctx.Err() } if !m.Matches(val) { @@ -2860,8 +2896,8 @@ func toPostingGroup(ctx context.Context, lvalsFn func(name string) ([]string, er } var toAdd []string - for _, val := range vals { - if ctx.Err() != nil { + for i, val := range vals { + if (i+1)%checkContextEveryNIterations == 0 && ctx.Err() != nil { return nil, nil, ctx.Err() } if m.Matches(val) { @@ -2964,8 +3000,10 @@ func (r *bucketIndexReader) fetchPostings(ctx context.Context, keys []labels.Lab // If we have a miss, mark key to be fetched in `ptrs` slice. // Overlaps are well handled by partitioner, so we don't need to deduplicate keys. for ix, key := range keys { - if err := ctx.Err(); err != nil { - return nil, closeFns, err + if (ix+1)%checkContextEveryNIterations == 0 { + if err := ctx.Err(); err != nil { + return nil, closeFns, err + } } // Get postings for the given key from cache first. if b, ok := fromCache[key]; ok { @@ -3567,10 +3605,10 @@ func (r *bucketChunkReader) loadChunks(ctx context.Context, res []seriesEntry, a bufPooled, err := r.block.chunkPool.Get(r.block.estimatedMaxChunkSize) if err == nil { buf = *bufPooled + defer r.block.chunkPool.Put(&buf) } else { buf = make([]byte, r.block.estimatedMaxChunkSize) } - defer r.block.chunkPool.Put(&buf) for i, pIdx := range pIdxs { // Fast forward range reader to the next chunk start in case of sparse (for our purposes) byte range. diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/inmemory.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/inmemory.go index 42e6de55a7..3a8ddbb86d 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/cache/inmemory.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/inmemory.go @@ -30,7 +30,12 @@ var ( } ) -const maxInt = int(^uint(0) >> 1) +const ( + maxInt = int(^uint(0) >> 1) + + // checkContextEveryNIterations is used in some tight loops to check if the context is done. + checkContextEveryNIterations = 128 +) type InMemoryIndexCache struct { mtx sync.Mutex @@ -302,11 +307,13 @@ func (c *InMemoryIndexCache) FetchMultiPostings(ctx context.Context, blockID uli blockIDKey := blockID.String() requests := 0 hit := 0 - for _, key := range keys { - if ctx.Err() != nil { - c.commonMetrics.RequestTotal.WithLabelValues(CacheTypePostings, tenant).Add(float64(requests)) - c.commonMetrics.HitsTotal.WithLabelValues(CacheTypePostings, tenant).Add(float64(hit)) - return hits, misses + for i, key := range keys { + if (i+1)%checkContextEveryNIterations == 0 { + if ctx.Err() != nil { + c.commonMetrics.RequestTotal.WithLabelValues(CacheTypePostings, tenant).Add(float64(requests)) + c.commonMetrics.HitsTotal.WithLabelValues(CacheTypePostings, tenant).Add(float64(hit)) + return hits, misses + } } requests++ if b, ok := c.get(CacheKey{blockIDKey, CacheKeyPostings(key), ""}); ok { @@ -363,11 +370,13 @@ func (c *InMemoryIndexCache) FetchMultiSeries(ctx context.Context, blockID ulid. blockIDKey := blockID.String() requests := 0 hit := 0 - for _, id := range ids { - if ctx.Err() != nil { - c.commonMetrics.RequestTotal.WithLabelValues(CacheTypeSeries, tenant).Add(float64(requests)) - c.commonMetrics.HitsTotal.WithLabelValues(CacheTypeSeries, tenant).Add(float64(hit)) - return hits, misses + for i, id := range ids { + if (i+1)%checkContextEveryNIterations == 0 { + if ctx.Err() != nil { + c.commonMetrics.RequestTotal.WithLabelValues(CacheTypeSeries, tenant).Add(float64(requests)) + c.commonMetrics.HitsTotal.WithLabelValues(CacheTypeSeries, tenant).Add(float64(hit)) + return hits, misses + } } requests++ if b, ok := c.get(CacheKey{blockIDKey, CacheKeySeries(id), ""}); ok { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/tracing_index_cache.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/tracing_index_cache.go index a72ce0d664..38a0f61822 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/cache/tracing_index_cache.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/tracing_index_cache.go @@ -66,8 +66,8 @@ func (c *TracingIndexCache) FetchExpandedPostings(ctx context.Context, blockID u return data, exists } -// StoreSeries stores a single series. Skip intrumenting this method -// excessive spans as a single request can store millions of serieses. +// StoreSeries stores a single series. Skip instrumenting this method +// excessive spans as a single request can store millions of series. func (c *TracingIndexCache) StoreSeries(blockID ulid.ULID, id storage.SeriesRef, v []byte, tenant string) { c.cache.StoreSeries(blockID, id, v, tenant) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/io.go b/vendor/github.com/thanos-io/thanos/pkg/store/io.go index 657f3134d2..f2356e6759 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/io.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/io.go @@ -109,7 +109,7 @@ func readByteRanges(src io.Reader, dst []byte, byteRanges byteRanges) ([]byte, e if err != nil { // We get an ErrUnexpectedEOF if EOF is reached before we fill the slice. // Due to how the reading logic works in the bucket store, we may try to overread - // the last byte range so, if the error occurrs on the last one, we consider it legit. + // the last byte range so, if the error occurs on the last one, we consider it legit. if err == io.ErrUnexpectedEOF && idx == len(byteRanges)-1 { return dst, nil } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/lazy_postings.go b/vendor/github.com/thanos-io/thanos/pkg/store/lazy_postings.go index 1858b7dee4..f8363ab477 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/lazy_postings.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/lazy_postings.go @@ -308,8 +308,8 @@ func fetchAndExpandPostingGroups(ctx context.Context, r *bucketIndexReader, post result := index.Without(index.Intersect(groupAdds...), index.Merge(ctx, groupRemovals...)) - if ctx.Err() != nil { - return nil, nil, ctx.Err() + if err := ctx.Err(); err != nil { + return nil, nil, err } ps, err := ExpandPostingsWithContext(ctx, result) if err != nil { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go b/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go index 721e9ed51e..2946278978 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go @@ -173,7 +173,7 @@ func (p *PrometheusStore) Series(r *storepb.SeriesRequest, seriesSrv storepb.Sto if r.SkipChunks { finalExtLset := rmLabels(extLset.Copy(), extLsetToRemove) - labelMaps, err := p.client.SeriesInGRPC(s.Context(), p.base, matchers, r.MinTime, r.MaxTime) + labelMaps, err := p.client.SeriesInGRPC(s.Context(), p.base, matchers, r.MinTime, r.MaxTime, int(r.Limit)) if err != nil { return err } @@ -571,12 +571,12 @@ func (p *PrometheusStore) LabelNames(ctx context.Context, r *storepb.LabelNamesR var lbls []string if len(matchers) == 0 || p.labelCallsSupportMatchers() { - lbls, err = p.client.LabelNamesInGRPC(ctx, p.base, matchers, r.Start, r.End) + lbls, err = p.client.LabelNamesInGRPC(ctx, p.base, matchers, r.Start, r.End, int(r.Limit)) if err != nil { return nil, err } } else { - sers, err := p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End) + sers, err := p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End, int(r.Limit)) if err != nil { return nil, err } @@ -642,7 +642,7 @@ func (p *PrometheusStore) LabelValues(ctx context.Context, r *storepb.LabelValue if len(matchers) == 0 { return &storepb.LabelValuesResponse{Values: []string{val}}, nil } - sers, err = p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End) + sers, err = p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End, int(r.Limit)) if err != nil { return nil, err } @@ -653,12 +653,12 @@ func (p *PrometheusStore) LabelValues(ctx context.Context, r *storepb.LabelValue } if len(matchers) == 0 || p.labelCallsSupportMatchers() { - vals, err = p.client.LabelValuesInGRPC(ctx, p.base, r.Label, matchers, r.Start, r.End) + vals, err = p.client.LabelValuesInGRPC(ctx, p.base, r.Label, matchers, r.Start, r.End, int(r.Limit)) if err != nil { return nil, err } } else { - sers, err = p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End) + sers, err = p.client.SeriesInGRPC(ctx, p.base, matchers, r.Start, r.End, int(r.Limit)) if err != nil { return nil, err } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go index 0ac1fc659c..c0c1bacc68 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go @@ -286,7 +286,7 @@ func (s *ProxyStore) TSDBInfos() []infopb.TSDBInfo { func (s *ProxyStore) Series(originalRequest *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { // TODO(bwplotka): This should be part of request logger, otherwise it does not make much sense. Also, could be - // tiggered by tracing span to reduce cognitive load. + // triggered by tracing span to reduce cognitive load. reqLogger := log.With(s.logger, "component", "proxy") if s.debugLogging { reqLogger = log.With(reqLogger, "request", originalRequest.String()) @@ -327,6 +327,7 @@ func (s *ProxyStore) Series(originalRequest *storepb.SeriesRequest, srv storepb. r := &storepb.SeriesRequest{ MinTime: originalRequest.MinTime, MaxTime: originalRequest.MaxTime, + Limit: originalRequest.Limit, Matchers: append(storeMatchers, MatchersForLabelSets(storeLabelSets)...), Aggregates: originalRequest.Aggregates, MaxResolutionWindow: originalRequest.MaxResolutionWindow, @@ -363,7 +364,13 @@ func (s *ProxyStore) Series(originalRequest *storepb.SeriesRequest, srv storepb. level.Debug(reqLogger).Log("msg", "Series: started fanout streams", "status", strings.Join(storeDebugMsgs, ";")) respHeap := NewResponseDeduplicator(NewProxyResponseLoserTree(storeResponses...)) + + i := 0 for respHeap.Next() { + i++ + if r.Limit > 0 && i > int(r.Limit) { + break + } resp := respHeap.At() if resp.GetWarning() != "" && (r.PartialResponseDisabled || r.PartialResponseStrategy == storepb.PartialResponseStrategy_ABORT) { @@ -381,7 +388,7 @@ func (s *ProxyStore) Series(originalRequest *storepb.SeriesRequest, srv storepb. // LabelNames returns all known label names. func (s *ProxyStore) LabelNames(ctx context.Context, originalRequest *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) { // TODO(bwplotka): This should be part of request logger, otherwise it does not make much sense. Also, could be - // tiggered by tracing span to reduce cognitive load. + // triggered by tracing span to reduce cognitive load. reqLogger := log.With(s.logger, "component", "proxy") if s.debugLogging { reqLogger = log.With(reqLogger, "request", originalRequest.String()) @@ -419,6 +426,7 @@ func (s *ProxyStore) LabelNames(ctx context.Context, originalRequest *storepb.La End: originalRequest.End, Matchers: append(storeMatchers, MatchersForLabelSets(storeLabelSets)...), WithoutReplicaLabels: originalRequest.WithoutReplicaLabels, + Hints: originalRequest.Hints, } var ( @@ -465,8 +473,13 @@ func (s *ProxyStore) LabelNames(ctx context.Context, originalRequest *storepb.La return nil, err } + result := strutil.MergeUnsortedSlices(names...) + if originalRequest.Limit > 0 && len(result) > int(originalRequest.Limit) { + result = result[:originalRequest.Limit] + } + return &storepb.LabelNamesResponse{ - Names: strutil.MergeUnsortedSlices(names...), + Names: result, Warnings: warnings, }, nil } @@ -476,7 +489,7 @@ func (s *ProxyStore) LabelValues(ctx context.Context, originalRequest *storepb.L *storepb.LabelValuesResponse, error, ) { // TODO(bwplotka): This should be part of request logger, otherwise it does not make much sense. Also, could be - // tiggered by tracing span to reduce cognitive load. + // triggered by tracing span to reduce cognitive load. reqLogger := log.With(s.logger, "component", "proxy") if s.debugLogging { reqLogger = log.With(reqLogger, "request", originalRequest.String()) @@ -520,6 +533,7 @@ func (s *ProxyStore) LabelValues(ctx context.Context, originalRequest *storepb.L End: originalRequest.End, Matchers: append(storeMatchers, MatchersForLabelSets(storeLabelSets)...), WithoutReplicaLabels: originalRequest.WithoutReplicaLabels, + Limit: originalRequest.Limit, } var ( @@ -567,8 +581,13 @@ func (s *ProxyStore) LabelValues(ctx context.Context, originalRequest *storepb.L return nil, err } + vals := strutil.MergeUnsortedSlices(all...) + if originalRequest.Limit > 0 && len(vals) > int(originalRequest.Limit) { + vals = vals[:originalRequest.Limit] + } + return &storepb.LabelValuesResponse{ - Values: strutil.MergeUnsortedSlices(all...), + Values: vals, Warnings: warnings, }, nil } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/proxy_merge.go b/vendor/github.com/thanos-io/thanos/pkg/store/proxy_merge.go index fa002cc9b1..e2764d574a 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/proxy_merge.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/proxy_merge.go @@ -361,7 +361,9 @@ func newLazyRespSet( var rerr error // If timer is already stopped if t != nil && !t.Stop() { - <-t.C // Drain the channel if it was already stopped. + if t.C != nil { + <-t.C // Drain the channel if it was already stopped. + } rerr = errors.Wrapf(err, "failed to receive any data in %s from %s", l.frameTimeout, st) } else { rerr = errors.Wrapf(err, "receive series from %s", st) @@ -614,7 +616,9 @@ func newEagerRespSet( var rerr error // If timer is already stopped if t != nil && !t.Stop() { - <-t.C // Drain the channel if it was already stopped. + if t.C != nil { + <-t.C // Drain the channel if it was already stopped. + } rerr = errors.Wrapf(err, "failed to receive any data in %s from %s", l.frameTimeout, storeName) } else { rerr = errors.Wrapf(err, "receive series from %s", storeName) diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/samples.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/samples.go index 050b8e912f..0da00daf4d 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/samples.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/samples.go @@ -61,7 +61,7 @@ func SamplesFromPromqlSeries(series promql.Series) ([]Sample, []Histogram) { // HistogramProtoToHistogram extracts a (normal integer) Histogram from the // provided proto message. The caller has to make sure that the proto message -// represents an interger histogram and not a float histogram. +// represents an integer histogram and not a float histogram. // Copied from https://github.com/prometheus/prometheus/blob/0ab95536115adfe50af249d36d73674be694ca3f/storage/remote/codec.go#L626-L645 func HistogramProtoToHistogram(hp Histogram) *histogram.Histogram { if hp.IsFloatHistogram() { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.pb.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.pb.go index b5e85d69d8..3ddb507327 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.pb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.pb.go @@ -305,6 +305,8 @@ type SeriesRequest struct { // NOTE(bwplotka): thanos.info.store.supports_without_replica_labels field has to return true to let client knows // server supports it. WithoutReplicaLabels []string `protobuf:"bytes,14,rep,name=without_replica_labels,json=withoutReplicaLabels,proto3" json:"without_replica_labels,omitempty"` + // limit is used to limit the number of results returned + Limit int64 `protobuf:"varint,15,opt,name=limit,proto3" json:"limit,omitempty"` } func (m *SeriesRequest) Reset() { *m = SeriesRequest{} } @@ -659,6 +661,8 @@ type LabelNamesRequest struct { Matchers []LabelMatcher `protobuf:"bytes,6,rep,name=matchers,proto3" json:"matchers"` // same as in series request. WithoutReplicaLabels []string `protobuf:"bytes,7,rep,name=without_replica_labels,json=withoutReplicaLabels,proto3" json:"without_replica_labels,omitempty"` + // limit is used to limit the number of results returned + Limit int64 `protobuf:"varint,8,opt,name=limit,proto3" json:"limit,omitempty"` } func (m *LabelNamesRequest) Reset() { *m = LabelNamesRequest{} } @@ -750,6 +754,8 @@ type LabelValuesRequest struct { Matchers []LabelMatcher `protobuf:"bytes,7,rep,name=matchers,proto3" json:"matchers"` // same as in series request. WithoutReplicaLabels []string `protobuf:"bytes,8,rep,name=without_replica_labels,json=withoutReplicaLabels,proto3" json:"without_replica_labels,omitempty"` + // limit is used to limit the number of results returned + Limit int64 `protobuf:"varint,9,opt,name=limit,proto3" json:"limit,omitempty"` } func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} } @@ -850,91 +856,92 @@ func init() { func init() { proto.RegisterFile("store/storepb/rpc.proto", fileDescriptor_a938d55a388af629) } var fileDescriptor_a938d55a388af629 = []byte{ - // 1331 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x4f, 0x6f, 0x13, 0x47, - 0x14, 0xf7, 0x7a, 0xbd, 0xfe, 0xf3, 0x9c, 0xb8, 0x66, 0x30, 0x61, 0x63, 0x24, 0xc7, 0x75, 0x55, - 0xc9, 0x42, 0xd4, 0xa6, 0x06, 0x21, 0xb5, 0xe2, 0x92, 0x04, 0x43, 0xa2, 0x12, 0x53, 0xc6, 0x09, - 0x69, 0xa9, 0x2a, 0x6b, 0x6d, 0x4f, 0xd6, 0x2b, 0xec, 0xdd, 0x65, 0x67, 0xb6, 0x89, 0xaf, 0xad, - 0x7a, 0xab, 0xaa, 0xaa, 0x1f, 0xa1, 0x9f, 0x86, 0x23, 0xc7, 0xaa, 0x07, 0xd4, 0xc2, 0xbd, 0x9f, - 0xa1, 0x9a, 0x3f, 0xbb, 0xf6, 0xa6, 0x21, 0x08, 0x91, 0x4b, 0x34, 0xef, 0xf7, 0x7b, 0xf3, 0xe6, - 0xfd, 0xcf, 0x1a, 0xae, 0x52, 0xe6, 0x05, 0xa4, 0x2d, 0xfe, 0xfa, 0xc3, 0x76, 0xe0, 0x8f, 0x5a, - 0x7e, 0xe0, 0x31, 0x0f, 0x65, 0xd9, 0xc4, 0x72, 0x3d, 0x5a, 0x5d, 0x4f, 0x2a, 0xb0, 0xb9, 0x4f, - 0xa8, 0x54, 0xa9, 0x56, 0x6c, 0xcf, 0xf6, 0xc4, 0xb1, 0xcd, 0x4f, 0x0a, 0xad, 0x27, 0x2f, 0xf8, - 0x81, 0x37, 0x3b, 0x75, 0x4f, 0x99, 0x9c, 0x5a, 0x43, 0x32, 0x3d, 0x4d, 0xd9, 0x9e, 0x67, 0x4f, - 0x49, 0x5b, 0x48, 0xc3, 0xf0, 0xa8, 0x6d, 0xb9, 0x73, 0x49, 0x35, 0x3e, 0x82, 0xd5, 0xc3, 0xc0, - 0x61, 0x04, 0x13, 0xea, 0x7b, 0x2e, 0x25, 0x8d, 0x9f, 0x34, 0x58, 0x51, 0xc8, 0xf3, 0x90, 0x50, - 0x86, 0x36, 0x01, 0x98, 0x33, 0x23, 0x94, 0x04, 0x0e, 0xa1, 0xa6, 0x56, 0xd7, 0x9b, 0xc5, 0xce, - 0x35, 0x7e, 0x7b, 0x46, 0xd8, 0x84, 0x84, 0x74, 0x30, 0xf2, 0xfc, 0x79, 0x6b, 0xdf, 0x99, 0x91, - 0xbe, 0x50, 0xd9, 0xca, 0xbc, 0x78, 0xb5, 0x91, 0xc2, 0x4b, 0x97, 0xd0, 0x1a, 0x64, 0x19, 0x71, - 0x2d, 0x97, 0x99, 0xe9, 0xba, 0xd6, 0x2c, 0x60, 0x25, 0x21, 0x13, 0x72, 0x01, 0xf1, 0xa7, 0xce, - 0xc8, 0x32, 0xf5, 0xba, 0xd6, 0xd4, 0x71, 0x24, 0x36, 0x56, 0xa1, 0xb8, 0xeb, 0x1e, 0x79, 0xca, - 0x87, 0xc6, 0xef, 0x69, 0x58, 0x91, 0xb2, 0xf4, 0x12, 0x8d, 0x20, 0x2b, 0x02, 0x8d, 0x1c, 0x5a, - 0x6d, 0xc9, 0xc4, 0xb6, 0x1e, 0x72, 0x74, 0xeb, 0x2e, 0x77, 0xe1, 0xaf, 0x57, 0x1b, 0xb7, 0x6d, - 0x87, 0x4d, 0xc2, 0x61, 0x6b, 0xe4, 0xcd, 0xda, 0x52, 0xe1, 0x33, 0xc7, 0x53, 0xa7, 0xb6, 0xff, - 0xcc, 0x6e, 0x27, 0x72, 0xd6, 0x7a, 0x2a, 0x6e, 0x63, 0x65, 0x1a, 0xad, 0x43, 0x7e, 0xe6, 0xb8, - 0x03, 0x1e, 0x88, 0x70, 0x5c, 0xc7, 0xb9, 0x99, 0xe3, 0xf2, 0x48, 0x05, 0x65, 0x9d, 0x48, 0x4a, - 0xb9, 0x3e, 0xb3, 0x4e, 0x04, 0xd5, 0x86, 0x82, 0xb0, 0xba, 0x3f, 0xf7, 0x89, 0x99, 0xa9, 0x6b, - 0xcd, 0x52, 0xe7, 0x52, 0xe4, 0x5d, 0x3f, 0x22, 0xf0, 0x42, 0x07, 0xdd, 0x01, 0x10, 0x0f, 0x0e, - 0x28, 0x61, 0xd4, 0x34, 0x44, 0x3c, 0xf1, 0x0d, 0xe9, 0x52, 0x9f, 0x30, 0x95, 0xd6, 0xc2, 0x54, - 0xc9, 0xb4, 0xf1, 0x8b, 0x01, 0xab, 0x32, 0xe5, 0x51, 0xa9, 0x96, 0x1d, 0xd6, 0xde, 0xee, 0x70, - 0x3a, 0xe9, 0xf0, 0x1d, 0x4e, 0xb1, 0xd1, 0x84, 0x04, 0xd4, 0xd4, 0xc5, 0xeb, 0x95, 0x44, 0x36, - 0xf7, 0x24, 0xa9, 0x1c, 0x88, 0x75, 0x51, 0x07, 0xae, 0x70, 0x93, 0x01, 0xa1, 0xde, 0x34, 0x64, - 0x8e, 0xe7, 0x0e, 0x8e, 0x1d, 0x77, 0xec, 0x1d, 0x8b, 0xa0, 0x75, 0x7c, 0x79, 0x66, 0x9d, 0xe0, - 0x98, 0x3b, 0x14, 0x14, 0xba, 0x01, 0x60, 0xd9, 0x76, 0x40, 0x6c, 0x8b, 0x11, 0x19, 0x6b, 0xa9, - 0xb3, 0x12, 0xbd, 0xb6, 0x69, 0xdb, 0x01, 0x5e, 0xe2, 0xd1, 0x97, 0xb0, 0xee, 0x5b, 0x01, 0x73, - 0xac, 0x29, 0x7f, 0x45, 0x54, 0x7e, 0x30, 0x76, 0xa8, 0x35, 0x9c, 0x92, 0xb1, 0x99, 0xad, 0x6b, - 0xcd, 0x3c, 0xbe, 0xaa, 0x14, 0xa2, 0xce, 0xb8, 0xa7, 0x68, 0xf4, 0xdd, 0x19, 0x77, 0x29, 0x0b, - 0x2c, 0x46, 0xec, 0xb9, 0x99, 0x13, 0x65, 0xd9, 0x88, 0x1e, 0xfe, 0x3a, 0x69, 0xa3, 0xaf, 0xd4, - 0xfe, 0x67, 0x3c, 0x22, 0xd0, 0x06, 0x14, 0xe9, 0x33, 0xc7, 0x1f, 0x8c, 0x26, 0xa1, 0xfb, 0x8c, - 0x9a, 0x79, 0xe1, 0x0a, 0x70, 0x68, 0x5b, 0x20, 0xe8, 0x3a, 0x18, 0x13, 0xc7, 0x65, 0xd4, 0x2c, - 0xd4, 0x35, 0x91, 0x50, 0x39, 0x81, 0xad, 0x68, 0x02, 0x5b, 0x9b, 0xee, 0x1c, 0x4b, 0x15, 0x84, - 0x20, 0x43, 0x19, 0xf1, 0x4d, 0x10, 0x69, 0x13, 0x67, 0x54, 0x01, 0x23, 0xb0, 0x5c, 0x9b, 0x98, - 0x45, 0x01, 0x4a, 0x01, 0xdd, 0x82, 0xe2, 0xf3, 0x90, 0x04, 0xf3, 0x81, 0xb4, 0xbd, 0x22, 0x6c, - 0xa3, 0x28, 0x8a, 0xc7, 0x9c, 0xda, 0xe1, 0x0c, 0x86, 0xe7, 0xf1, 0x19, 0xdd, 0x04, 0xa0, 0x13, - 0x2b, 0x18, 0x0f, 0x1c, 0xf7, 0xc8, 0x33, 0x57, 0xc5, 0x9d, 0x45, 0x43, 0x72, 0x46, 0x4c, 0x56, - 0x81, 0x46, 0x47, 0x74, 0x1b, 0xd6, 0x8e, 0x1d, 0x36, 0xf1, 0x42, 0x36, 0x50, 0xf3, 0x38, 0x50, - 0xc3, 0x56, 0xaa, 0xeb, 0xcd, 0x02, 0xae, 0x28, 0x16, 0x4b, 0x52, 0x34, 0x09, 0x6d, 0xfc, 0xa1, - 0x01, 0x2c, 0x5c, 0x10, 0x29, 0x62, 0xc4, 0x1f, 0xcc, 0x9c, 0xe9, 0xd4, 0xa1, 0xaa, 0x1d, 0x81, - 0x43, 0x7b, 0x02, 0x41, 0x75, 0xc8, 0x1c, 0x85, 0xee, 0x48, 0x74, 0x63, 0x71, 0xd1, 0x04, 0xf7, - 0x43, 0x77, 0x84, 0x05, 0x83, 0x6e, 0x40, 0xde, 0x0e, 0xbc, 0xd0, 0x77, 0x5c, 0x5b, 0xf4, 0x54, - 0xb1, 0x53, 0x8e, 0xb4, 0x1e, 0x28, 0x1c, 0xc7, 0x1a, 0xe8, 0x93, 0x28, 0x65, 0x86, 0x50, 0x8d, - 0x37, 0x02, 0xe6, 0xa0, 0xca, 0x60, 0xe3, 0x18, 0x0a, 0x71, 0xc8, 0xc2, 0x45, 0x95, 0x99, 0x31, - 0x39, 0x89, 0x5d, 0x94, 0xfc, 0x98, 0x9c, 0xa0, 0x8f, 0x61, 0x85, 0x79, 0xcc, 0x9a, 0x0e, 0x04, - 0x46, 0xd5, 0xe0, 0x14, 0x05, 0x26, 0xcc, 0x50, 0x54, 0x82, 0xf4, 0x70, 0x2e, 0x56, 0x40, 0x1e, - 0xa7, 0x87, 0x73, 0xbe, 0xea, 0x54, 0xae, 0x32, 0x22, 0x57, 0x4a, 0x6a, 0x54, 0x21, 0xc3, 0x23, - 0xe3, 0xc5, 0x76, 0x2d, 0x35, 0x9e, 0x05, 0x2c, 0xce, 0x8d, 0x0e, 0xe4, 0xa3, 0x78, 0x94, 0x3d, - 0xed, 0x0c, 0x7b, 0x7a, 0xc2, 0xde, 0x06, 0x18, 0x22, 0x30, 0xae, 0x90, 0x48, 0xb1, 0x92, 0x1a, - 0xbf, 0x6a, 0x50, 0x8a, 0xb6, 0x83, 0x5a, 0x9a, 0x4d, 0xc8, 0xc6, 0x5b, 0x9c, 0xa7, 0xa8, 0x14, - 0x77, 0x81, 0x40, 0x77, 0x52, 0x58, 0xf1, 0xa8, 0x0a, 0xb9, 0x63, 0x2b, 0x70, 0x79, 0xe2, 0xc5, - 0xc6, 0xde, 0x49, 0xe1, 0x08, 0x40, 0x37, 0xa2, 0xd6, 0xd6, 0xdf, 0xde, 0xda, 0x3b, 0x29, 0xd5, - 0xdc, 0x5b, 0x79, 0xc8, 0x06, 0x84, 0x86, 0x53, 0xd6, 0xf8, 0x37, 0x0d, 0x97, 0x44, 0xab, 0xf4, - 0xac, 0xd9, 0x62, 0x65, 0x9d, 0x3b, 0xe2, 0xda, 0x07, 0x8c, 0x78, 0xfa, 0x03, 0x47, 0xbc, 0x02, - 0x06, 0x65, 0x56, 0xc0, 0xd4, 0x7a, 0x97, 0x02, 0x2a, 0x83, 0x4e, 0xdc, 0xb1, 0xda, 0x70, 0xfc, - 0xb8, 0x98, 0x74, 0xe3, 0xdd, 0x93, 0xbe, 0xbc, 0x69, 0xb3, 0xef, 0xb1, 0x69, 0xdf, 0x3e, 0x90, - 0xb9, 0x73, 0x06, 0x32, 0x00, 0xb4, 0x9c, 0x6f, 0xd5, 0x04, 0x15, 0x30, 0x78, 0xd3, 0xc9, 0x7f, - 0x9c, 0x05, 0x2c, 0x05, 0x54, 0x85, 0xbc, 0xaa, 0x2f, 0xef, 0x72, 0x4e, 0xc4, 0xf2, 0x22, 0x42, - 0xfd, 0x9d, 0x11, 0x36, 0x7e, 0xd6, 0xd5, 0xa3, 0x4f, 0xac, 0x69, 0xb8, 0xa8, 0x72, 0x05, 0x0c, - 0xe1, 0xb0, 0x6a, 0x7b, 0x29, 0x9c, 0x5f, 0xfb, 0xf4, 0x07, 0xd4, 0x5e, 0xbf, 0xa8, 0xda, 0x67, - 0xce, 0xa8, 0xbd, 0x71, 0x46, 0xed, 0xb3, 0xef, 0x57, 0xfb, 0xdc, 0x85, 0xd4, 0x3e, 0x7f, 0x4e, - 0xed, 0x43, 0xb8, 0x9c, 0x28, 0x83, 0x2a, 0xfe, 0x1a, 0x64, 0x7f, 0x10, 0x88, 0xaa, 0xbe, 0x92, - 0x2e, 0xaa, 0xfc, 0xd7, 0xbf, 0x87, 0x42, 0xfc, 0x89, 0x83, 0x8a, 0x90, 0x3b, 0xe8, 0x7d, 0xd5, - 0x7b, 0x74, 0xd8, 0x2b, 0xa7, 0x50, 0x01, 0x8c, 0xc7, 0x07, 0x5d, 0xfc, 0x6d, 0x59, 0x43, 0x79, - 0xc8, 0xe0, 0x83, 0x87, 0xdd, 0x72, 0x9a, 0x6b, 0xf4, 0x77, 0xef, 0x75, 0xb7, 0x37, 0x71, 0x59, - 0xe7, 0x1a, 0xfd, 0xfd, 0x47, 0xb8, 0x5b, 0xce, 0x70, 0x1c, 0x77, 0xb7, 0xbb, 0xbb, 0x4f, 0xba, - 0x65, 0x83, 0xe3, 0xf7, 0xba, 0x5b, 0x07, 0x0f, 0xca, 0xd9, 0xeb, 0x5b, 0x90, 0xe1, 0xdf, 0x08, - 0x28, 0x07, 0x3a, 0xde, 0x3c, 0x94, 0x56, 0xb7, 0x1f, 0x1d, 0xf4, 0xf6, 0xcb, 0x1a, 0xc7, 0xfa, - 0x07, 0x7b, 0xe5, 0x34, 0x3f, 0xec, 0xed, 0xf6, 0xca, 0xba, 0x38, 0x6c, 0x7e, 0x23, 0xcd, 0x09, - 0xad, 0x2e, 0x2e, 0x1b, 0x9d, 0x1f, 0xd3, 0x60, 0x08, 0x1f, 0xd1, 0xe7, 0x90, 0x11, 0xff, 0x06, - 0x2e, 0x47, 0x75, 0x58, 0xfa, 0xe2, 0xac, 0x56, 0x92, 0xa0, 0xca, 0xdf, 0x17, 0x90, 0x95, 0xbb, - 0x12, 0x5d, 0x49, 0xee, 0xce, 0xe8, 0xda, 0xda, 0x69, 0x58, 0x5e, 0xbc, 0xa9, 0xa1, 0x6d, 0x80, - 0xc5, 0x34, 0xa2, 0xf5, 0x44, 0xed, 0x97, 0x37, 0x62, 0xb5, 0x7a, 0x16, 0xa5, 0xde, 0xbf, 0x0f, - 0xc5, 0xa5, 0xb2, 0xa2, 0xa4, 0x6a, 0x62, 0xe4, 0xaa, 0xd7, 0xce, 0xe4, 0xa4, 0x9d, 0x4e, 0x0f, - 0x4a, 0xe2, 0x1b, 0x9f, 0xcf, 0x92, 0x4c, 0xc6, 0x5d, 0x28, 0x62, 0x32, 0xf3, 0x18, 0x11, 0x38, - 0x8a, 0xc3, 0x5f, 0xfe, 0x29, 0x50, 0xbd, 0x72, 0x0a, 0x55, 0x3f, 0x19, 0x52, 0x5b, 0x9f, 0xbe, - 0xf8, 0xa7, 0x96, 0x7a, 0xf1, 0xba, 0xa6, 0xbd, 0x7c, 0x5d, 0xd3, 0xfe, 0x7e, 0x5d, 0xd3, 0x7e, - 0x7b, 0x53, 0x4b, 0xbd, 0x7c, 0x53, 0x4b, 0xfd, 0xf9, 0xa6, 0x96, 0x7a, 0x9a, 0x53, 0xbf, 0x5a, - 0x86, 0x59, 0xd1, 0x33, 0xb7, 0xfe, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x14, 0xa2, 0x0f, 0x1f, - 0x0d, 0x00, 0x00, + // 1351 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x4d, 0x6f, 0xdb, 0x46, + 0x13, 0x16, 0x45, 0x51, 0x1f, 0x23, 0x5b, 0x51, 0x36, 0x8a, 0x43, 0x2b, 0x80, 0xac, 0x57, 0x2f, + 0x0a, 0x08, 0x41, 0x2a, 0xa5, 0x4a, 0x10, 0xa0, 0x45, 0x2e, 0xb6, 0xa3, 0xc4, 0x46, 0x63, 0xa5, + 0x59, 0xd9, 0x71, 0x9b, 0xa2, 0x10, 0x28, 0x69, 0x4d, 0x11, 0xa1, 0x48, 0x86, 0xbb, 0xac, 0xad, + 0x6b, 0xdb, 0x73, 0x51, 0xf4, 0xda, 0x5b, 0x7e, 0x4d, 0x8e, 0x39, 0x16, 0x3d, 0x04, 0x6d, 0xf2, + 0x47, 0x8a, 0xfd, 0xa0, 0x24, 0xba, 0xce, 0x17, 0xe2, 0x8b, 0xb1, 0xf3, 0x3c, 0xb3, 0xb3, 0xb3, + 0xb3, 0xcf, 0x8c, 0x45, 0xb8, 0x42, 0x99, 0x1f, 0x92, 0xb6, 0xf8, 0x1b, 0x0c, 0xdb, 0x61, 0x30, + 0x6a, 0x05, 0xa1, 0xcf, 0x7c, 0x94, 0x65, 0x13, 0xcb, 0xf3, 0x69, 0x75, 0x3d, 0xe9, 0xc0, 0x66, + 0x01, 0xa1, 0xd2, 0xa5, 0x5a, 0xb1, 0x7d, 0xdb, 0x17, 0xcb, 0x36, 0x5f, 0x29, 0xb4, 0x9e, 0xdc, + 0x10, 0x84, 0xfe, 0xf4, 0xd4, 0x3e, 0x15, 0xd2, 0xb5, 0x86, 0xc4, 0x3d, 0x4d, 0xd9, 0xbe, 0x6f, + 0xbb, 0xa4, 0x2d, 0xac, 0x61, 0x74, 0xd4, 0xb6, 0xbc, 0x99, 0xa4, 0x1a, 0x17, 0x60, 0xf5, 0x30, + 0x74, 0x18, 0xc1, 0x84, 0x06, 0xbe, 0x47, 0x49, 0xe3, 0x67, 0x0d, 0x56, 0x14, 0xf2, 0x2c, 0x22, + 0x94, 0xa1, 0x4d, 0x00, 0xe6, 0x4c, 0x09, 0x25, 0xa1, 0x43, 0xa8, 0xa9, 0xd5, 0xf5, 0x66, 0xb1, + 0x73, 0x95, 0xef, 0x9e, 0x12, 0x36, 0x21, 0x11, 0x1d, 0x8c, 0xfc, 0x60, 0xd6, 0xda, 0x77, 0xa6, + 0xa4, 0x2f, 0x5c, 0xb6, 0x32, 0x2f, 0x5e, 0x6d, 0xa4, 0xf0, 0xd2, 0x26, 0xb4, 0x06, 0x59, 0x46, + 0x3c, 0xcb, 0x63, 0x66, 0xba, 0xae, 0x35, 0x0b, 0x58, 0x59, 0xc8, 0x84, 0x5c, 0x48, 0x02, 0xd7, + 0x19, 0x59, 0xa6, 0x5e, 0xd7, 0x9a, 0x3a, 0x8e, 0xcd, 0xc6, 0x2a, 0x14, 0x77, 0xbd, 0x23, 0x5f, + 0xe5, 0xd0, 0xf8, 0x3d, 0x0d, 0x2b, 0xd2, 0x96, 0x59, 0xa2, 0x11, 0x64, 0xc5, 0x45, 0xe3, 0x84, + 0x56, 0x5b, 0xb2, 0xb0, 0xad, 0x07, 0x1c, 0xdd, 0xba, 0xc3, 0x53, 0xf8, 0xeb, 0xd5, 0xc6, 0x2d, + 0xdb, 0x61, 0x93, 0x68, 0xd8, 0x1a, 0xf9, 0xd3, 0xb6, 0x74, 0xf8, 0xdc, 0xf1, 0xd5, 0xaa, 0x1d, + 0x3c, 0xb5, 0xdb, 0x89, 0x9a, 0xb5, 0x9e, 0x88, 0xdd, 0x58, 0x85, 0x46, 0xeb, 0x90, 0x9f, 0x3a, + 0xde, 0x80, 0x5f, 0x44, 0x24, 0xae, 0xe3, 0xdc, 0xd4, 0xf1, 0xf8, 0x4d, 0x05, 0x65, 0x9d, 0x48, + 0x4a, 0xa5, 0x3e, 0xb5, 0x4e, 0x04, 0xd5, 0x86, 0x82, 0x88, 0xba, 0x3f, 0x0b, 0x88, 0x99, 0xa9, + 0x6b, 0xcd, 0x52, 0xe7, 0x62, 0x9c, 0x5d, 0x3f, 0x26, 0xf0, 0xc2, 0x07, 0xdd, 0x06, 0x10, 0x07, + 0x0e, 0x28, 0x61, 0xd4, 0x34, 0xc4, 0x7d, 0xe6, 0x3b, 0x64, 0x4a, 0x7d, 0xc2, 0x54, 0x59, 0x0b, + 0xae, 0xb2, 0x69, 0xe3, 0xb9, 0x01, 0xab, 0xb2, 0xe4, 0xf1, 0x53, 0x2d, 0x27, 0xac, 0xbd, 0x3d, + 0xe1, 0x74, 0x32, 0xe1, 0xdb, 0x9c, 0x62, 0xa3, 0x09, 0x09, 0xa9, 0xa9, 0x8b, 0xd3, 0x2b, 0x89, + 0x6a, 0xee, 0x49, 0x52, 0x25, 0x30, 0xf7, 0x45, 0x1d, 0xb8, 0xcc, 0x43, 0x86, 0x84, 0xfa, 0x6e, + 0xc4, 0x1c, 0xdf, 0x1b, 0x1c, 0x3b, 0xde, 0xd8, 0x3f, 0x16, 0x97, 0xd6, 0xf1, 0xa5, 0xa9, 0x75, + 0x82, 0xe7, 0xdc, 0xa1, 0xa0, 0xd0, 0x75, 0x00, 0xcb, 0xb6, 0x43, 0x62, 0x5b, 0x8c, 0xc8, 0xbb, + 0x96, 0x3a, 0x2b, 0xf1, 0x69, 0x9b, 0xb6, 0x1d, 0xe2, 0x25, 0x1e, 0x7d, 0x05, 0xeb, 0x81, 0x15, + 0x32, 0xc7, 0x72, 0xf9, 0x29, 0xe2, 0xe5, 0x07, 0x63, 0x87, 0x5a, 0x43, 0x97, 0x8c, 0xcd, 0x6c, + 0x5d, 0x6b, 0xe6, 0xf1, 0x15, 0xe5, 0x10, 0x2b, 0xe3, 0xae, 0xa2, 0xd1, 0xf7, 0x67, 0xec, 0xa5, + 0x2c, 0xb4, 0x18, 0xb1, 0x67, 0x66, 0x4e, 0x3c, 0xcb, 0x46, 0x7c, 0xf0, 0x37, 0xc9, 0x18, 0x7d, + 0xe5, 0xf6, 0x9f, 0xe0, 0x31, 0x81, 0x36, 0xa0, 0x48, 0x9f, 0x3a, 0xc1, 0x60, 0x34, 0x89, 0xbc, + 0xa7, 0xd4, 0xcc, 0x8b, 0x54, 0x80, 0x43, 0xdb, 0x02, 0x41, 0xd7, 0xc0, 0x98, 0x38, 0x1e, 0xa3, + 0x66, 0xa1, 0xae, 0x89, 0x82, 0xca, 0x0e, 0x6c, 0xc5, 0x1d, 0xd8, 0xda, 0xf4, 0x66, 0x58, 0xba, + 0x20, 0x04, 0x19, 0xca, 0x48, 0x60, 0x82, 0x28, 0x9b, 0x58, 0xa3, 0x0a, 0x18, 0xa1, 0xe5, 0xd9, + 0xc4, 0x2c, 0x0a, 0x50, 0x1a, 0xe8, 0x26, 0x14, 0x9f, 0x45, 0x24, 0x9c, 0x0d, 0x64, 0xec, 0x15, + 0x11, 0x1b, 0xc5, 0xb7, 0x78, 0xc4, 0xa9, 0x1d, 0xce, 0x60, 0x78, 0x36, 0x5f, 0xa3, 0x1b, 0x00, + 0x74, 0x62, 0x85, 0xe3, 0x81, 0xe3, 0x1d, 0xf9, 0xe6, 0xaa, 0xd8, 0xb3, 0x10, 0x24, 0x67, 0x44, + 0x67, 0x15, 0x68, 0xbc, 0x44, 0xb7, 0x60, 0xed, 0xd8, 0x61, 0x13, 0x3f, 0x62, 0x03, 0xd5, 0x8f, + 0x03, 0xd5, 0x6c, 0xa5, 0xba, 0xde, 0x2c, 0xe0, 0x8a, 0x62, 0xb1, 0x24, 0x1f, 0xc8, 0x6e, 0xa9, + 0x80, 0xe1, 0x3a, 0x53, 0x87, 0x99, 0x17, 0x64, 0xca, 0xc2, 0x68, 0x3c, 0xd7, 0x00, 0x16, 0x89, + 0x89, 0xc2, 0x31, 0x12, 0x0c, 0xa6, 0x8e, 0xeb, 0x3a, 0x54, 0x89, 0x14, 0x38, 0xb4, 0x27, 0x10, + 0x54, 0x87, 0xcc, 0x51, 0xe4, 0x8d, 0x84, 0x46, 0x8b, 0x0b, 0x69, 0xdc, 0x8b, 0xbc, 0x11, 0x16, + 0x0c, 0xba, 0x0e, 0x79, 0x3b, 0xf4, 0xa3, 0xc0, 0xf1, 0x6c, 0xa1, 0xb4, 0x62, 0xa7, 0x1c, 0x7b, + 0xdd, 0x57, 0x38, 0x9e, 0x7b, 0xa0, 0xff, 0xc7, 0x85, 0x34, 0x84, 0xeb, 0x7c, 0x4e, 0x60, 0x0e, + 0xaa, 0xba, 0x36, 0x8e, 0xa1, 0x30, 0x2f, 0x84, 0x48, 0x51, 0xd5, 0x6b, 0x4c, 0x4e, 0xe6, 0x29, + 0x4a, 0x7e, 0x4c, 0x4e, 0xd0, 0xff, 0x60, 0x85, 0xf9, 0xcc, 0x72, 0x07, 0x02, 0xa3, 0xaa, 0x9d, + 0x8a, 0x02, 0x13, 0x61, 0x28, 0x2a, 0x41, 0x7a, 0x38, 0x13, 0x83, 0x21, 0x8f, 0xd3, 0xc3, 0x19, + 0x1f, 0x80, 0xaa, 0x82, 0x19, 0x51, 0x41, 0x65, 0x35, 0xaa, 0x90, 0xe1, 0x37, 0xe3, 0x12, 0xf0, + 0x2c, 0xd5, 0xb4, 0x05, 0x2c, 0xd6, 0x8d, 0x0e, 0xe4, 0xe3, 0xfb, 0xa8, 0x78, 0xda, 0x19, 0xf1, + 0xf4, 0x44, 0xbc, 0x0d, 0x30, 0xc4, 0xc5, 0xb8, 0x43, 0xa2, 0xc4, 0xca, 0x6a, 0xfc, 0xaa, 0x41, + 0x29, 0x9e, 0x19, 0x6a, 0x94, 0x36, 0x21, 0x3b, 0x9f, 0xed, 0xbc, 0x44, 0xa5, 0xb9, 0x36, 0x04, + 0xba, 0x93, 0xc2, 0x8a, 0x47, 0x55, 0xc8, 0x1d, 0x5b, 0xa1, 0xc7, 0x0b, 0x2f, 0xe6, 0xf8, 0x4e, + 0x0a, 0xc7, 0x00, 0xba, 0x1e, 0x0b, 0x5e, 0x7f, 0xbb, 0xe0, 0x77, 0x52, 0x4a, 0xf2, 0x5b, 0x79, + 0xc8, 0x86, 0x84, 0x46, 0x2e, 0x6b, 0xfc, 0xa2, 0xc3, 0x45, 0x21, 0xa0, 0x9e, 0x35, 0x5d, 0x0c, + 0xb2, 0x77, 0x36, 0xbe, 0xf6, 0x09, 0x8d, 0x9f, 0xfe, 0xc4, 0xc6, 0xaf, 0x80, 0x41, 0x99, 0x15, + 0x32, 0x35, 0xf4, 0xa5, 0x81, 0xca, 0xa0, 0x13, 0x6f, 0xac, 0xe6, 0x1e, 0x5f, 0x2e, 0xfa, 0xdf, + 0x78, 0x7f, 0xff, 0x2f, 0xcf, 0xdf, 0xec, 0x47, 0xcc, 0xdf, 0xb7, 0xb7, 0x69, 0xee, 0x43, 0xda, + 0x34, 0xbf, 0xdc, 0xa6, 0x21, 0xa0, 0xe5, 0x57, 0x50, 0xd2, 0xa8, 0x80, 0xc1, 0xa5, 0x28, 0xff, + 0xc9, 0x16, 0xb0, 0x34, 0x50, 0x15, 0xf2, 0xea, 0xd5, 0xb9, 0xf6, 0x39, 0x31, 0xb7, 0x17, 0xf7, + 0xd6, 0xdf, 0x7b, 0xef, 0xc6, 0x1f, 0xba, 0x3a, 0xf4, 0xb1, 0xe5, 0x46, 0x8b, 0xb7, 0xe7, 0x09, + 0x72, 0x54, 0x35, 0x83, 0x34, 0xde, 0xad, 0x88, 0xf4, 0x27, 0x28, 0x42, 0x3f, 0x2f, 0x45, 0x64, + 0xce, 0x50, 0x84, 0x71, 0x86, 0x22, 0xb2, 0x1f, 0xa7, 0x88, 0xdc, 0xb9, 0x28, 0x22, 0xff, 0x21, + 0x8a, 0x28, 0x2c, 0x2b, 0x22, 0x82, 0x4b, 0x89, 0xc7, 0x51, 0x92, 0x58, 0x83, 0xec, 0x8f, 0x02, + 0x51, 0x9a, 0x50, 0xd6, 0x79, 0x89, 0xe2, 0xda, 0x0f, 0x50, 0x98, 0xff, 0x48, 0x42, 0x45, 0xc8, + 0x1d, 0xf4, 0xbe, 0xee, 0x3d, 0x3c, 0xec, 0x95, 0x53, 0xa8, 0x00, 0xc6, 0xa3, 0x83, 0x2e, 0xfe, + 0xae, 0xac, 0xa1, 0x3c, 0x64, 0xf0, 0xc1, 0x83, 0x6e, 0x39, 0xcd, 0x3d, 0xfa, 0xbb, 0x77, 0xbb, + 0xdb, 0x9b, 0xb8, 0xac, 0x73, 0x8f, 0xfe, 0xfe, 0x43, 0xdc, 0x2d, 0x67, 0x38, 0x8e, 0xbb, 0xdb, + 0xdd, 0xdd, 0xc7, 0xdd, 0xb2, 0xc1, 0xf1, 0xbb, 0xdd, 0xad, 0x83, 0xfb, 0xe5, 0xec, 0xb5, 0x2d, + 0xc8, 0xf0, 0x5f, 0x19, 0x28, 0x07, 0x3a, 0xde, 0x3c, 0x94, 0x51, 0xb7, 0x1f, 0x1e, 0xf4, 0xf6, + 0xcb, 0x1a, 0xc7, 0xfa, 0x07, 0x7b, 0xe5, 0x34, 0x5f, 0xec, 0xed, 0xf6, 0xca, 0xba, 0x58, 0x6c, + 0x7e, 0x2b, 0xc3, 0x09, 0xaf, 0x2e, 0x2e, 0x1b, 0x9d, 0x9f, 0xd2, 0x60, 0x88, 0x1c, 0xd1, 0x17, + 0x90, 0x11, 0xff, 0x32, 0x2e, 0xc5, 0xaf, 0xb3, 0xf4, 0x9b, 0xb5, 0x5a, 0x49, 0x82, 0xaa, 0x7e, + 0x5f, 0x42, 0x56, 0xce, 0x55, 0x74, 0x39, 0x39, 0x67, 0xe3, 0x6d, 0x6b, 0xa7, 0x61, 0xb9, 0xf1, + 0x86, 0x86, 0xb6, 0x01, 0x16, 0x3d, 0x8a, 0xd6, 0x13, 0x8a, 0x58, 0x9e, 0x9e, 0xd5, 0xea, 0x59, + 0x94, 0x3a, 0xff, 0x1e, 0x14, 0x97, 0x9e, 0x15, 0x25, 0x5d, 0x13, 0x8d, 0x58, 0xbd, 0x7a, 0x26, + 0x27, 0xe3, 0x74, 0x7a, 0x50, 0x12, 0x5f, 0x09, 0xbc, 0xc3, 0x64, 0x31, 0xee, 0x40, 0x11, 0x93, + 0xa9, 0xcf, 0x88, 0xc0, 0xd1, 0xfc, 0xfa, 0xcb, 0x1f, 0x13, 0xd5, 0xcb, 0xa7, 0x50, 0xf5, 0xd1, + 0x91, 0xda, 0xfa, 0xec, 0xc5, 0x3f, 0xb5, 0xd4, 0x8b, 0xd7, 0x35, 0xed, 0xe5, 0xeb, 0x9a, 0xf6, + 0xf7, 0xeb, 0x9a, 0xf6, 0xdb, 0x9b, 0x5a, 0xea, 0xe5, 0x9b, 0x5a, 0xea, 0xcf, 0x37, 0xb5, 0xd4, + 0x93, 0x9c, 0xfa, 0xee, 0x19, 0x66, 0x85, 0x66, 0x6e, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x22, + 0xcb, 0x99, 0x7e, 0x61, 0x0d, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1436,6 +1443,11 @@ func (m *SeriesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Limit != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x78 + } if len(m.WithoutReplicaLabels) > 0 { for iNdEx := len(m.WithoutReplicaLabels) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.WithoutReplicaLabels[iNdEx]) @@ -1890,6 +1902,11 @@ func (m *LabelNamesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Limit != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x40 + } if len(m.WithoutReplicaLabels) > 0 { for iNdEx := len(m.WithoutReplicaLabels) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.WithoutReplicaLabels[iNdEx]) @@ -2026,6 +2043,11 @@ func (m *LabelValuesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Limit != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x48 + } if len(m.WithoutReplicaLabels) > 0 { for iNdEx := len(m.WithoutReplicaLabels) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.WithoutReplicaLabels[iNdEx]) @@ -2291,6 +2313,9 @@ func (m *SeriesRequest) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } + if m.Limit != 0 { + n += 1 + sovRpc(uint64(m.Limit)) + } return n } @@ -2465,6 +2490,9 @@ func (m *LabelNamesRequest) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } + if m.Limit != 0 { + n += 1 + sovRpc(uint64(m.Limit)) + } return n } @@ -2531,6 +2559,9 @@ func (m *LabelValuesRequest) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } + if m.Limit != 0 { + n += 1 + sovRpc(uint64(m.Limit)) + } return n } @@ -3401,6 +3432,25 @@ func (m *SeriesRequest) Unmarshal(dAtA []byte) error { } m.WithoutReplicaLabels = append(m.WithoutReplicaLabels, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -4352,6 +4402,25 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { } m.WithoutReplicaLabels = append(m.WithoutReplicaLabels, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -4763,6 +4832,25 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { } m.WithoutReplicaLabels = append(m.WithoutReplicaLabels, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto index 2a9e9e3eaf..a15e5b6f8e 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto @@ -137,6 +137,9 @@ message SeriesRequest { // NOTE(bwplotka): thanos.info.store.supports_without_replica_labels field has to return true to let client knows // server supports it. repeated string without_replica_labels = 14; + + // limit is used to limit the number of results returned + int64 limit = 15; } // QueryHints represents hints from PromQL that might help to @@ -235,6 +238,9 @@ message LabelNamesRequest { // same as in series request. repeated string without_replica_labels = 7; + + // limit is used to limit the number of results returned + int64 limit = 8; } message LabelNamesResponse { @@ -268,6 +274,9 @@ message LabelValuesRequest { // same as in series request. repeated string without_replica_labels = 8; + + // limit is used to limit the number of results returned + int64 limit = 9; } message LabelValuesResponse { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go b/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go index b19c131b54..6dd18af0a8 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go @@ -220,7 +220,12 @@ func (s *TSDBStore) Series(r *storepb.SeriesRequest, seriesSrv storepb.Store_Ser defer runutil.CloseWithLogOnErr(s.logger, q, "close tsdb chunk querier series") } - set := q.Select(srv.Context(), true, nil, matchers...) + hints := &storage.SelectHints{ + Start: r.MinTime, + End: r.MaxTime, + Limit: int(r.Limit), + } + set := q.Select(srv.Context(), true, hints, matchers...) shardMatcher := r.ShardInfo.Matcher(&s.buffers) defer shardMatcher.Close() @@ -328,7 +333,10 @@ func (s *TSDBStore) LabelNames(ctx context.Context, r *storepb.LabelNamesRequest } defer runutil.CloseWithLogOnErr(s.logger, q, "close tsdb querier label names") - res, _, err := q.LabelNames(ctx, nil, matchers...) + hints := &storage.LabelHints{ + Limit: int(r.Limit), + } + res, _, err := q.LabelNames(ctx, hints, matchers...) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } @@ -396,6 +404,7 @@ func (s *TSDBStore) LabelValues(ctx context.Context, r *storepb.LabelValuesReque Start: r.Start, End: r.End, Func: "series", + Limit: int(r.Limit), } set := q.Select(ctx, false, hints, matchers...) @@ -405,7 +414,10 @@ func (s *TSDBStore) LabelValues(ctx context.Context, r *storepb.LabelValuesReque return &storepb.LabelValuesResponse{}, nil } - res, _, err := q.LabelValues(ctx, r.Label, nil, matchers...) + hints := &storage.LabelHints{ + Limit: int(r.Limit), + } + res, _, err := q.LabelValues(ctx, r.Label, hints, matchers...) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } diff --git a/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go b/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go index 88f1309f30..b05991e172 100644 --- a/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go +++ b/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go @@ -5,7 +5,7 @@ package b3 // import "go.opentelemetry.io/contrib/propagators/b3" // Version is the current release version of the B3 propagator. func Version() string { - return "1.28.0" + return "1.29.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/propagators/jaeger/version.go b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/version.go index a64230ebaf..eb220fbfe6 100644 --- a/vendor/go.opentelemetry.io/contrib/propagators/jaeger/version.go +++ b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/version.go @@ -5,7 +5,7 @@ package jaeger // import "go.opentelemetry.io/contrib/propagators/jaeger" // Version is the current release version of the Jaeger propagator. func Version() string { - return "1.28.0" + return "1.29.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/propagators/ot/version.go b/vendor/go.opentelemetry.io/contrib/propagators/ot/version.go index 3c726b1896..3c2eca9865 100644 --- a/vendor/go.opentelemetry.io/contrib/propagators/ot/version.go +++ b/vendor/go.opentelemetry.io/contrib/propagators/ot/version.go @@ -5,7 +5,7 @@ package ot // import "go.opentelemetry.io/contrib/propagators/ot" // Version is the current release version of the ot propagator. func Version() string { - return "1.28.0" + return "1.29.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go index e783b57ac4..b7bd429ffd 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go @@ -12,9 +12,8 @@ The environment variables described below can be used for configuration. OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") - target to which the exporter sends telemetry. The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md. -The value must contain a host. -The value may additionally a port, a scheme, and a path. -The value accepts "http" and "https" scheme. +The value must contain a scheme ("http" or "https") and host. +The value may additionally contain a port, and a path. The value should not contain a query string or fragment. OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT. The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go index 9513c0a57c..4abf48d1f6 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go @@ -15,6 +15,7 @@ import ( "strconv" "strings" "time" + "unicode" "go.opentelemetry.io/otel/internal/global" ) @@ -163,12 +164,16 @@ func stringToHeader(value string) map[string]string { global.Error(errors.New("missing '="), "parse headers", "input", header) continue } - name, err := url.PathUnescape(n) - if err != nil { - global.Error(err, "escape header key", "key", n) + + trimmedName := strings.TrimSpace(n) + + // Validate the key. + if !isValidHeaderKey(trimmedName) { + global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName) continue } - trimmedName := strings.TrimSpace(name) + + // Only decode the value. value, err := url.PathUnescape(v) if err != nil { global.Error(err, "escape header value", "value", v) @@ -189,3 +194,22 @@ func createCertPool(certBytes []byte) (*x509.CertPool, error) { } return cp, nil } + +func isValidHeaderKey(key string) bool { + if key == "" { + return false + } + for _, c := range key { + if !isTokenChar(c) { + return false + } + } + return true +} + +func isTokenChar(c rune) bool { + return c <= unicode.MaxASCII && (unicode.IsLetter(c) || + unicode.IsDigit(c) || + c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' || + c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~') +} diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/mod/LICENSE +++ b/vendor/golang.org/x/mod/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/oauth2/LICENSE +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/vendor/golang.org/x/tools/LICENSE +++ b/vendor/golang.org/x/tools/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 34306ddd39..0b6bfaff80 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -46,7 +46,6 @@ import ( // // Unfortunately there are a number of open bugs related to // interactions among the LoadMode bits: -// - https://github.com/golang/go/issues/48226 // - https://github.com/golang/go/issues/56633 // - https://github.com/golang/go/issues/56677 // - https://github.com/golang/go/issues/58726 @@ -76,7 +75,7 @@ const ( // NeedTypes adds Types, Fset, and IllTyped. NeedTypes - // NeedSyntax adds Syntax. + // NeedSyntax adds Syntax and Fset. NeedSyntax // NeedTypesInfo adds TypesInfo. @@ -961,12 +960,14 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { } if ld.requestedMode&NeedTypes == 0 { ld.pkgs[i].Types = nil - ld.pkgs[i].Fset = nil ld.pkgs[i].IllTyped = false } if ld.requestedMode&NeedSyntax == 0 { ld.pkgs[i].Syntax = nil } + if ld.requestedMode&NeedTypes == 0 && ld.requestedMode&NeedSyntax == 0 { + ld.pkgs[i].Fset = nil + } if ld.requestedMode&NeedTypesInfo == 0 { ld.pkgs[i].TypesInfo = nil } @@ -1499,6 +1500,10 @@ func impliedLoadMode(loadMode LoadMode) LoadMode { // All these things require knowing the import graph. loadMode |= NeedImports } + if loadMode&NeedTypes != 0 { + // Types require the GoVersion from Module. + loadMode |= NeedModule + } return loadMode } diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go index a1dcc40b72..df14ffd94d 100644 --- a/vendor/golang.org/x/tools/go/packages/visit.go +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -49,11 +49,20 @@ func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { // PrintErrors returns the number of errors printed. func PrintErrors(pkgs []*Package) int { var n int + errModules := make(map[*Module]bool) Visit(pkgs, nil, func(pkg *Package) { for _, err := range pkg.Errors { fmt.Fprintln(os.Stderr, err) n++ } + + // Print pkg.Module.Error once if present. + mod := pkg.Module + if mod != nil && mod.Error != nil && !errModules[mod] { + errModules[mod] = true + fmt.Fprintln(os.Stderr, mod.Error.Err) + n++ + } }) return n } diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index d648c3d071..9ada177758 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -51,7 +51,7 @@ type Path string // // PO package->object Package.Scope.Lookup // OT object->type Object.Type -// TT type->type Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying} [EKPRUTrC] +// TT type->type Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying,Rhs} [EKPRUTrCa] // TO type->object Type.{At,Field,Method,Obj} [AFMO] // // All valid paths start with a package and end at an object @@ -63,7 +63,7 @@ type Path string // - The only PO operator is Package.Scope.Lookup, which requires an identifier. // - The only OT operator is Object.Type, // which we encode as '.' because dot cannot appear in an identifier. -// - The TT operators are encoded as [EKPRUTrC]; +// - The TT operators are encoded as [EKPRUTrCa]; // two of these ({,Recv}TypeParams) require an integer operand, // which is encoded as a string of decimal digits. // - The TO operators are encoded as [AFMO]; @@ -106,6 +106,7 @@ const ( opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) opRecvTypeParam = 'r' // .RecvTypeParams.At(i) (Signature) opConstraint = 'C' // .Constraint() (TypeParam) + opRhs = 'a' // .Rhs() (Alias) // type->object operators opAt = 'A' // .At(i) (Tuple) @@ -279,21 +280,26 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { path = append(path, opType) T := o.Type() + if alias, ok := T.(*aliases.Alias); ok { + if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam, nil); r != nil { + return Path(r), nil + } + if r := find(obj, aliases.Rhs(alias), append(path, opRhs), nil); r != nil { + return Path(r), nil + } - if tname.IsAlias() { - // type alias + } else if tname.IsAlias() { + // legacy alias if r := find(obj, T, path, nil); r != nil { return Path(r), nil } - } else { - if named, _ := T.(*types.Named); named != nil { - if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam, nil); r != nil { - // generic named type - return Path(r), nil - } - } + + } else if named, ok := T.(*types.Named); ok { // defined (named) type - if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil { + if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam, nil); r != nil { + return Path(r), nil + } + if r := find(obj, named.Underlying(), append(path, opUnderlying), nil); r != nil { return Path(r), nil } } @@ -657,6 +663,16 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } t = named.Underlying() + case opRhs: + if alias, ok := t.(*aliases.Alias); ok { + t = aliases.Rhs(alias) + } else if false && aliases.Enabled() { + // The Enabled check is too expensive, so for now we + // simply assume that aliases are not enabled. + // TODO(adonovan): replace with "if true {" when go1.24 is assured. + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want alias)", code, t, t) + } + case opTypeParam: hasTypeParams, ok := t.(hasTypeParams) // Named, Signature if !ok { diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go index c027b9f315..6652f7db0f 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go @@ -15,10 +15,14 @@ import ( // It will never be created by go/types. type Alias struct{} -func (*Alias) String() string { panic("unreachable") } -func (*Alias) Underlying() types.Type { panic("unreachable") } -func (*Alias) Obj() *types.TypeName { panic("unreachable") } -func Rhs(alias *Alias) types.Type { panic("unreachable") } +func (*Alias) String() string { panic("unreachable") } +func (*Alias) Underlying() types.Type { panic("unreachable") } +func (*Alias) Obj() *types.TypeName { panic("unreachable") } +func Rhs(alias *Alias) types.Type { panic("unreachable") } +func TypeParams(alias *Alias) *types.TypeParamList { panic("unreachable") } +func SetTypeParams(alias *Alias, tparams []*types.TypeParam) { panic("unreachable") } +func TypeArgs(alias *Alias) *types.TypeList { panic("unreachable") } +func Origin(alias *Alias) *Alias { panic("unreachable") } // Unalias returns the type t for go <=1.21. func Unalias(t types.Type) types.Type { return t } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go index b329954841..3ef1afeb40 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go @@ -28,6 +28,42 @@ func Rhs(alias *Alias) types.Type { return Unalias(alias) } +// TypeParams returns the type parameter list of the alias. +func TypeParams(alias *Alias) *types.TypeParamList { + if alias, ok := any(alias).(interface{ TypeParams() *types.TypeParamList }); ok { + return alias.TypeParams() // go1.23+ + } + return nil +} + +// SetTypeParams sets the type parameters of the alias type. +func SetTypeParams(alias *Alias, tparams []*types.TypeParam) { + if alias, ok := any(alias).(interface { + SetTypeParams(tparams []*types.TypeParam) + }); ok { + alias.SetTypeParams(tparams) // go1.23+ + } else if len(tparams) > 0 { + panic("cannot set type parameters of an Alias type in go1.22") + } +} + +// TypeArgs returns the type arguments used to instantiate the Alias type. +func TypeArgs(alias *Alias) *types.TypeList { + if alias, ok := any(alias).(interface{ TypeArgs() *types.TypeList }); ok { + return alias.TypeArgs() // go1.23+ + } + return nil // empty (go1.22) +} + +// Origin returns the generic Alias type of which alias is an instance. +// If alias is not an instance of a generic alias, Origin returns alias. +func Origin(alias *Alias) *Alias { + if alias, ok := any(alias).(interface{ Origin() *types.Alias }); ok { + return alias.Origin() // go1.23+ + } + return alias // not an instance of a generic alias (go1.22) +} + // Unalias is a wrapper of types.Unalias. func Unalias(t types.Type) types.Type { return types.Unalias(t) } diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go index 2acd85851e..b92e8e6eb3 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go @@ -23,9 +23,6 @@ type PkgDecoder struct { // version is the file format version. version uint32 - // aliases determines whether types.Aliases should be created - aliases bool - // sync indicates whether the file uses sync markers. sync bool @@ -76,7 +73,6 @@ func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } func NewPkgDecoder(pkgPath, input string) PkgDecoder { pr := PkgDecoder{ pkgPath: pkgPath, - //aliases: aliases.Enabled(), } // TODO(mdempsky): Implement direct indexing of input string to diff --git a/vendor/golang.org/x/tools/internal/versions/constraint.go b/vendor/golang.org/x/tools/internal/versions/constraint.go new file mode 100644 index 0000000000..179063d484 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/constraint.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +import "go/build/constraint" + +// ConstraintGoVersion is constraint.GoVersion (if built with go1.21+). +// Otherwise nil. +// +// Deprecate once x/tools is after go1.21. +var ConstraintGoVersion func(x constraint.Expr) string diff --git a/vendor/golang.org/x/tools/internal/versions/constraint_go121.go b/vendor/golang.org/x/tools/internal/versions/constraint_go121.go new file mode 100644 index 0000000000..38011407d5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/constraint_go121.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 +// +build go1.21 + +package versions + +import "go/build/constraint" + +func init() { + ConstraintGoVersion = constraint.GoVersion +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index 636edb460a..fe19e8f97a 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -1024,6 +1024,13 @@ type MethodSettings struct { // The fully qualified name of the method, for which the options below apply. // This is used to find the method to apply the options. + // + // Example: + // + // publishing: + // method_settings: + // - selector: google.storage.control.v2.StorageControl.CreateFolder + // # method settings for CreateFolder... Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` // Describes settings to use for long-running operations when generating // API methods for RPCs. Complements RPCs that use the annotations in @@ -1033,15 +1040,12 @@ type MethodSettings struct { // // publishing: // method_settings: - // - selector: google.cloud.speech.v2.Speech.BatchRecognize - // long_running: - // initial_poll_delay: - // seconds: 60 # 1 minute - // poll_delay_multiplier: 1.5 - // max_poll_delay: - // seconds: 360 # 6 minutes - // total_poll_timeout: - // seconds: 54000 # 90 minutes + // - selector: google.cloud.speech.v2.Speech.BatchRecognize + // long_running: + // initial_poll_delay: 60s # 1 minute + // poll_delay_multiplier: 1.5 + // max_poll_delay: 360s # 6 minutes + // total_poll_timeout: 54000s # 90 minutes LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"` // List of top-level fields of the request message, that should be // automatically populated by the client libraries based on their @@ -1051,9 +1055,9 @@ type MethodSettings struct { // // publishing: // method_settings: - // - selector: google.example.v1.ExampleService.CreateExample - // auto_populated_fields: - // - request_id + // - selector: google.example.v1.ExampleService.CreateExample + // auto_populated_fields: + // - request_id AutoPopulatedFields []string `protobuf:"bytes,3,rep,name=auto_populated_fields,json=autoPopulatedFields,proto3" json:"auto_populated_fields,omitempty"` } diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go index d339dfb02a..a462e7d013 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go @@ -121,6 +121,11 @@ type FieldInfo struct { // any API consumer, just documents the API's format for the field it is // applied to. Format FieldInfo_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.api.FieldInfo_Format" json:"format,omitempty"` + // The type(s) that the annotated, generic field may represent. + // + // Currently, this must only be used on fields of type `google.protobuf.Any`. + // Supporting other generic types may be considered in the future. + ReferencedTypes []*TypeReference `protobuf:"bytes,2,rep,name=referenced_types,json=referencedTypes,proto3" json:"referenced_types,omitempty"` } func (x *FieldInfo) Reset() { @@ -162,6 +167,70 @@ func (x *FieldInfo) GetFormat() FieldInfo_Format { return FieldInfo_FORMAT_UNSPECIFIED } +func (x *FieldInfo) GetReferencedTypes() []*TypeReference { + if x != nil { + return x.ReferencedTypes + } + return nil +} + +// A reference to a message type, for use in [FieldInfo][google.api.FieldInfo]. +type TypeReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the type that the annotated, generic field may represent. + // If the type is in the same protobuf package, the value can be the simple + // message name e.g., `"MyMessage"`. Otherwise, the value must be the + // fully-qualified message name e.g., `"google.library.v1.Book"`. + // + // If the type(s) are unknown to the service (e.g. the field accepts generic + // user input), use the wildcard `"*"` to denote this behavior. + // + // See [AIP-202](https://google.aip.dev/202#type-references) for more details. + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` +} + +func (x *TypeReference) Reset() { + *x = TypeReference{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_field_info_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypeReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypeReference) ProtoMessage() {} + +func (x *TypeReference) ProtoReflect() protoreflect.Message { + mi := &file_google_api_field_info_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypeReference.ProtoReflect.Descriptor instead. +func (*TypeReference) Descriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{1} +} + +func (x *TypeReference) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + var file_google_api_field_info_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.FieldOptions)(nil), @@ -185,6 +254,13 @@ var ( // string actual_ip_address = 4 [ // (google.api.field_info).format = IPV4_OR_IPV6 // ]; + // google.protobuf.Any generic_field = 5 [ + // (google.api.field_info).referenced_types = {type_name: "ActualType"}, + // (google.api.field_info).referenced_types = {type_name: "OtherType"}, + // ]; + // google.protobuf.Any generic_user_input = 5 [ + // (google.api.field_info).referenced_types = {type_name: "*"}, + // ]; // // optional google.api.FieldInfo field_info = 291403980; E_FieldInfo = &file_google_api_field_info_proto_extTypes[0] @@ -197,30 +273,37 @@ var file_google_api_field_info_proto_rawDesc = []byte{ 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x09, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x01, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x34, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, - 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, - 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, - 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, - 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, - 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, - 0x10, 0x04, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, - 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x6c, 0x0a, 0x0e, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x44, 0x0a, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, + 0x54, 0x79, 0x70, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, + 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, + 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, + 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x04, 0x22, 0x2c, 0x0a, 0x0d, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, + 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, + 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x42, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -236,21 +319,23 @@ func file_google_api_field_info_proto_rawDescGZIP() []byte { } var file_google_api_field_info_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_google_api_field_info_proto_goTypes = []interface{}{ (FieldInfo_Format)(0), // 0: google.api.FieldInfo.Format (*FieldInfo)(nil), // 1: google.api.FieldInfo - (*descriptorpb.FieldOptions)(nil), // 2: google.protobuf.FieldOptions + (*TypeReference)(nil), // 2: google.api.TypeReference + (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions } var file_google_api_field_info_proto_depIdxs = []int32{ 0, // 0: google.api.FieldInfo.format:type_name -> google.api.FieldInfo.Format - 2, // 1: google.api.field_info:extendee -> google.protobuf.FieldOptions - 1, // 2: google.api.field_info:type_name -> google.api.FieldInfo - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 2, // [2:3] is the sub-list for extension type_name - 1, // [1:2] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 2, // 1: google.api.FieldInfo.referenced_types:type_name -> google.api.TypeReference + 3, // 2: google.api.field_info:extendee -> google.protobuf.FieldOptions + 1, // 3: google.api.field_info:type_name -> google.api.FieldInfo + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 3, // [3:4] is the sub-list for extension type_name + 2, // [2:3] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_google_api_field_info_proto_init() } @@ -271,6 +356,18 @@ func file_google_api_field_info_proto_init() { return nil } } + file_google_api_field_info_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypeReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -278,7 +375,7 @@ func file_google_api_field_info_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_field_info_proto_rawDesc, NumEnums: 1, - NumMessages: 1, + NumMessages: 2, NumExtensions: 1, NumServices: 0, }, diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index 76ea76df33..ffb5838cb1 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -102,7 +102,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { return false } -// # gRPC Transcoding +// gRPC Transcoding // // gRPC Transcoding is a feature for mapping between a gRPC method and one or // more HTTP REST endpoints. It allows developers to build a single API service @@ -143,9 +143,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // This enables an HTTP REST to gRPC mapping as below: // -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(name: "messages/123456")` // // Any fields in the request message which are not bound by the path template // automatically become HTTP query parameters if there is no HTTP request body. @@ -169,11 +168,9 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // This enables a HTTP JSON to RPC mapping as below: // -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | -// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: -// "foo"))` +// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo` +// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub: +// SubMessage(subfield: "foo"))` // // Note that fields which are mapped to URL query parameters must have a // primitive type or a repeated primitive type or a non-repeated message type. @@ -203,10 +200,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // representation of the JSON in the request body is determined by // protos JSON encoding: // -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" message { text: "Hi!" })` +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })` // // The special name `*` can be used in the body mapping to define that // every field not bound by the path template should be mapped to the @@ -228,10 +223,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // The following HTTP JSON to RPC mapping is enabled: // -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" text: "Hi!")` +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")` // // Note that when using `*` in the body mapping, it is not possible to // have HTTP parameters, as all fields not bound by the path end in @@ -259,13 +252,13 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // This enables the following two alternative HTTP JSON to RPC mappings: // -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: -// "123456")` +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(message_id: "123456")` // -// ## Rules for HTTP mapping +// - HTTP: `GET /v1/users/me/messages/123456` +// - gRPC: `GetMessage(user_id: "me" message_id: "123456")` +// +// # Rules for HTTP mapping // // 1. Leaf request fields (recursive expansion nested messages in the request // message) are classified into three categories: @@ -284,7 +277,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // request body, all // fields are passed via URL path and URL query parameters. // -// ### Path template syntax +// Path template syntax // // Template = "/" Segments [ Verb ] ; // Segments = Segment { "/" Segment } ; @@ -323,7 +316,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // Document](https://developers.google.com/discovery/v1/reference/apis) as // `{+var}`. // -// ## Using gRPC API Service Configuration +// # Using gRPC API Service Configuration // // gRPC API Service Configuration (service config) is a configuration language // for configuring a gRPC service to become a user-facing product. The @@ -338,15 +331,14 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // specified in the service config will override any matching transcoding // configuration in the proto. // -// Example: +// The following example selects a gRPC method and applies an `HttpRule` to it: // // http: // rules: -// # Selects a gRPC method and applies HttpRule to it. // - selector: example.v1.Messaging.GetMessage // get: /v1/messages/{message_id}/{sub.subfield} // -// ## Special notes +// # Special notes // // When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the // proto to JSON conversion must follow the [proto3 diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go index 7a3fd93fcd..b5db279aeb 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -253,8 +253,13 @@ type ResourceDescriptor struct { History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"` // The plural name used in the resource name and permission names, such as // 'projects' for the resource name of 'projects/{project}' and the permission - // name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same - // concept of the `plural` field in k8s CRD spec + // name of 'cloudresourcemanager.googleapis.com/projects.get'. One exception + // to this is for Nested Collections that have stuttering names, as defined + // in [AIP-122](https://google.aip.dev/122#nested-collections), where the + // collection ID in the resource name pattern does not necessarily directly + // match the `plural` value. + // + // It is the same concept of the `plural` field in k8s CRD spec // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ // // Note: The plural form is required even for singleton resources. See diff --git a/vendor/modules.txt b/vendor/modules.txt index b6d0989366..44fdab088c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -341,7 +341,7 @@ github.com/facette/natsort # github.com/fatih/color v1.16.0 ## explicit; go 1.17 github.com/fatih/color -# github.com/felixge/fgprof v0.9.4 +# github.com/felixge/fgprof v0.9.5 ## explicit; go 1.14 github.com/felixge/fgprof # github.com/felixge/httpsnoop v1.0.4 @@ -489,8 +489,8 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da -## explicit; go 1.19 +# github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 +## explicit; go 1.22 github.com/google/pprof/profile # github.com/google/s2a-go v0.1.7 ## explicit; go 1.19 @@ -542,8 +542,8 @@ github.com/grpc-ecosystem/go-grpc-middleware # github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 ## explicit; go 1.19 github.com/grpc-ecosystem/go-grpc-middleware/v2 -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 -## explicit; go 1.20 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 +## explicit; go 1.21 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities @@ -659,7 +659,7 @@ github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a ## explicit; go 1.14 github.com/metalmatze/signal/server/signalhttp -# github.com/miekg/dns v1.1.61 +# github.com/miekg/dns v1.1.62 ## explicit; go 1.19 github.com/miekg/dns # github.com/minio/md5-simd v1.1.2 @@ -790,10 +790,12 @@ github.com/prometheus/alertmanager/template github.com/prometheus/alertmanager/timeinterval github.com/prometheus/alertmanager/types github.com/prometheus/alertmanager/ui -# github.com/prometheus/client_golang v1.19.1 +# github.com/prometheus/client_golang v1.20.2 ## explicit; go 1.20 github.com/prometheus/client_golang/api github.com/prometheus/client_golang/api/prometheus/v1 +github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil +github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/collectors/version @@ -807,7 +809,7 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint/validations # github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.55.0 +# github.com/prometheus/common v0.58.0 ## explicit; go 1.20 github.com/prometheus/common/config github.com/prometheus/common/expfmt @@ -967,7 +969,7 @@ github.com/thanos-io/promql-engine/query github.com/thanos-io/promql-engine/ringbuffer github.com/thanos-io/promql-engine/storage github.com/thanos-io/promql-engine/storage/prometheus -# github.com/thanos-io/thanos v0.35.2-0.20240722172812-990a60b72647 +# github.com/thanos-io/thanos v0.35.2-0.20240904173945-09db52562de0 ## explicit; go 1.22.0 github.com/thanos-io/thanos/pkg/api/query/querypb github.com/thanos-io/thanos/pkg/block @@ -1159,19 +1161,19 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/inte go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/contrib/propagators/autoprop v0.53.0 +# go.opentelemetry.io/contrib/propagators/autoprop v0.54.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/propagators/autoprop # go.opentelemetry.io/contrib/propagators/aws v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/propagators/aws/xray -# go.opentelemetry.io/contrib/propagators/b3 v1.28.0 +# go.opentelemetry.io/contrib/propagators/b3 v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/propagators/b3 -# go.opentelemetry.io/contrib/propagators/jaeger v1.28.0 +# go.opentelemetry.io/contrib/propagators/jaeger v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/propagators/jaeger -# go.opentelemetry.io/contrib/propagators/ot v1.28.0 +# go.opentelemetry.io/contrib/propagators/ot v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/propagators/ot # go.opentelemetry.io/otel v1.29.0 @@ -1192,7 +1194,7 @@ go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.21.0 go.opentelemetry.io/otel/semconv/v1.24.0 go.opentelemetry.io/otel/semconv/v1.26.0 -# go.opentelemetry.io/otel/bridge/opentracing v1.28.0 +# go.opentelemetry.io/otel/bridge/opentracing v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel/bridge/opentracing go.opentelemetry.io/otel/bridge/opentracing/migration @@ -1200,7 +1202,7 @@ go.opentelemetry.io/otel/bridge/opentracing/migration ## explicit; go 1.21 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal @@ -1277,7 +1279,7 @@ golang.org/x/crypto/pkcs12/internal/rc2 golang.org/x/exp/constraints golang.org/x/exp/maps golang.org/x/exp/slices -# golang.org/x/mod v0.19.0 +# golang.org/x/mod v0.20.0 ## explicit; go 1.18 golang.org/x/mod/semver # golang.org/x/net v0.28.0 @@ -1298,7 +1300,7 @@ golang.org/x/net/ipv6 golang.org/x/net/netutil golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.21.0 +# golang.org/x/oauth2 v0.22.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -1337,7 +1339,7 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.6.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.23.0 +# golang.org/x/tools v0.24.0 ## explicit; go 1.19 golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/packages @@ -1382,13 +1384,13 @@ google.golang.org/api/transport/http/internal/propagation ## explicit; go 1.20 google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr -# google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d -## explicit; go 1.20 +# google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd +## explicit; go 1.21 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b -## explicit; go 1.20 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd +## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status