@@ -85,7 +85,7 @@ func init() {
85
85
}
86
86
87
87
type Server struct {
88
- ResponseMutext sync.RWMutex
88
+ ResponseMutex sync.RWMutex
89
89
90
90
// Responses has the last response for each node keyed by the node name
91
91
Responses map [string ]* api.ListMetricsResponse `json:"responses" yaml:"responses"`
@@ -106,7 +106,7 @@ type Server struct {
106
106
grpcServer * grpc.Server
107
107
}
108
108
109
- func (c * Server ) Collect (ch chan <- prometheus.Metric , metrics map [string ]* api.ListMetricsResponse ) {
109
+ func (s * Server ) Collect (ch chan <- prometheus.Metric , metrics map [string ]* api.ListMetricsResponse ) {
110
110
responseAgeSeconds .Reset ()
111
111
for _ , v := range metrics {
112
112
responseAgeSeconds .WithLabelValues (v .NodeName , v .PodName , v .Reason ).Set (
@@ -246,7 +246,7 @@ func (s *Server) Check(ctx context.Context, _ *grpc_health_v1.HealthCheckRequest
246
246
return & grpc_health_v1.HealthCheckResponse {Status : grpc_health_v1 .HealthCheckResponse_SERVING }, nil
247
247
}
248
248
249
- // Ready returns success if the service should be accepting traffic
249
+ // IsReady returns success if the service should be accepting traffic
250
250
func (s * Server ) IsReady (context.Context , * grpc_health_v1.HealthCheckRequest ) (* grpc_health_v1.HealthCheckResponse , error ) {
251
251
if ! s .IsReadyResult .Load () {
252
252
return & grpc_health_v1.HealthCheckResponse {Status : grpc_health_v1 .HealthCheckResponse_NOT_SERVING }, fmt .Errorf ("not-ready" )
@@ -317,7 +317,7 @@ func (s *Server) PushMetrics(req api.MetricsCollector_PushMetricsServer) error {
317
317
318
318
// GetContainerUsageSummary maps containers to their cached metric values. metrics are passed as an argument to
319
319
// reduce lock contention.
320
- func (c * Server ) GetContainerUsageSummary (metrics map [string ]* api.ListMetricsResponse ) map [sampler.ContainerKey ]* api.ContainerMetrics {
320
+ func (s * Server ) GetContainerUsageSummary (metrics map [string ]* api.ListMetricsResponse ) map [sampler.ContainerKey ]* api.ContainerMetrics {
321
321
// Transform map of node -> utilization to map of container -> utilization by pulling the containers
322
322
// out of each node response
323
323
var values = map [sampler.ContainerKey ]* api.ContainerMetrics {}
@@ -373,8 +373,8 @@ func (s *Server) GetMetrics() map[string]*api.ListMetricsResponse {
373
373
func (s * Server ) GetNodeNames () sets.String {
374
374
nodes := sets .NewString ()
375
375
func () {
376
- s .ResponseMutext .Lock ()
377
- defer s .ResponseMutext .Unlock ()
376
+ s .ResponseMutex .Lock ()
377
+ defer s .ResponseMutex .Unlock ()
378
378
for k , v := range s .Responses {
379
379
if time .Since (v .Timestamp .AsTime ()) > s .ttl {
380
380
continue
@@ -389,8 +389,8 @@ func (s *Server) GetNodeNames() sets.String {
389
389
func (s * Server ) getMetrics (filterExpired bool ) map [string ]* api.ListMetricsResponse {
390
390
values := make (map [string ]* api.ListMetricsResponse , len (s .Responses ))
391
391
func () {
392
- s .ResponseMutext .Lock ()
393
- defer s .ResponseMutext .Unlock ()
392
+ s .ResponseMutex .Lock ()
393
+ defer s .ResponseMutex .Unlock ()
394
394
for k , v := range s .Responses {
395
395
values [k ] = v
396
396
}
@@ -411,14 +411,14 @@ func (s *Server) getMetrics(filterExpired bool) map[string]*api.ListMetricsRespo
411
411
412
412
// ClearMetrics deletes the metrics with the given key from the cache
413
413
func (s * Server ) ClearMetrics (key string ) {
414
- s .ResponseMutext .Lock ()
415
- defer s .ResponseMutext .Unlock ()
414
+ s .ResponseMutex .Lock ()
415
+ defer s .ResponseMutex .Unlock ()
416
416
delete (s .Responses , key )
417
417
}
418
418
419
419
// CacheMetrics caches msg
420
420
func (s * Server ) CacheMetrics (msg * api.ListMetricsResponse ) {
421
- s .ResponseMutext .Lock ()
422
- defer s .ResponseMutext .Unlock ()
421
+ s .ResponseMutex .Lock ()
422
+ defer s .ResponseMutex .Unlock ()
423
423
s .Responses [msg .NodeName ] = msg
424
424
}
0 commit comments