Skip to content

Commit b1443be

Browse files
committed
Ran linter
1 parent 9c3766f commit b1443be

File tree

7 files changed

+517
-301
lines changed

7 files changed

+517
-301
lines changed

exporter/opentelemetry-exporter-prometheus-remote-write/examples/sampleapp.py

+5-6
Original file line numberDiff line numberDiff line change
@@ -7,15 +7,14 @@
77
import psutil
88

99
from opentelemetry import metrics
10-
10+
from opentelemetry.exporter.prometheus_remote_write import (
11+
PrometheusRemoteWriteMetricsExporter,
12+
)
1113
from opentelemetry.metrics import (
1214
Observation,
1315
get_meter_provider,
1416
set_meter_provider,
1517
)
16-
from opentelemetry.exporter.prometheus_remote_write import (
17-
PrometheusRemoteWriteMetricsExporter,
18-
)
1918
from opentelemetry.sdk.metrics import MeterProvider
2019
from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader
2120

@@ -29,7 +28,7 @@
2928
endpoint="http://cortex:9009/api/prom/push",
3029
headers={"X-Scope-Org-ID": "5"},
3130
)
32-
reader = PeriodicExportingMetricReader(exporter,1000)
31+
reader = PeriodicExportingMetricReader(exporter, 1000)
3332
provider = MeterProvider(metric_readers=[reader])
3433
metrics.set_meter_provider(provider)
3534
meter = metrics.get_meter(__name__)
@@ -98,7 +97,7 @@ def get_ram_usage_callback(observer):
9897
# updown counter
9998
requests_active.add(num % 7231 + 200, testing_labels)
10099

101-
request_latency.record(num % 92,testing_labels)
100+
request_latency.record(num % 92, testing_labels)
102101
logger.log(level=INFO, msg="completed metrics collection cycle")
103102
time.sleep(1)
104103
num += 9791

exporter/opentelemetry-exporter-prometheus-remote-write/src/opentelemetry/exporter/prometheus_remote_write/__init__.py

+65-58
Original file line numberDiff line numberDiff line change
@@ -14,10 +14,10 @@
1414

1515
import logging
1616
import re
17-
from typing import Dict, Sequence
18-
1917
from collections import defaultdict
2018
from itertools import chain
19+
from typing import Dict, Sequence
20+
2121
import requests
2222
import snappy
2323

@@ -29,29 +29,30 @@
2929
Sample,
3030
TimeSeries,
3131
)
32+
from opentelemetry.sdk.metrics import Counter
33+
from opentelemetry.sdk.metrics import Histogram as ClientHistogram
34+
from opentelemetry.sdk.metrics import (
35+
ObservableCounter,
36+
ObservableGauge,
37+
ObservableUpDownCounter,
38+
UpDownCounter,
39+
)
3240
from opentelemetry.sdk.metrics.export import (
3341
AggregationTemporality,
3442
Gauge,
35-
Sum,
3643
Histogram,
44+
Metric,
3745
MetricExporter,
3846
MetricExportResult,
3947
MetricsData,
40-
Metric,
41-
)
42-
from opentelemetry.sdk.metrics import (
43-
Counter,
44-
Histogram as ClientHistogram,
45-
ObservableCounter,
46-
ObservableGauge,
47-
ObservableUpDownCounter,
48-
UpDownCounter,
48+
Sum,
4949
)
5050

5151
logger = logging.getLogger(__name__)
5252

53-
PROMETHEUS_NAME_REGEX = re.compile(r'[^\w:]')
54-
PROMETHEUS_LABEL_REGEX = re.compile(r'[^\w]')
53+
PROMETHEUS_NAME_REGEX = re.compile(r"[^\w:]")
54+
PROMETHEUS_LABEL_REGEX = re.compile(r"[^\w]")
55+
5556

5657
class PrometheusRemoteWriteMetricsExporter(MetricExporter):
5758
"""
@@ -74,7 +75,7 @@ def __init__(
7475
timeout: int = 30,
7576
tls_config: Dict = None,
7677
proxies: Dict = None,
77-
resources_as_labels : bool = True,
78+
resources_as_labels: bool = True,
7879
preferred_temporality: Dict[type, AggregationTemporality] = None,
7980
preferred_aggregation: Dict = None,
8081
):
@@ -95,9 +96,8 @@ def __init__(
9596
ObservableUpDownCounter: AggregationTemporality.CUMULATIVE,
9697
ObservableGauge: AggregationTemporality.CUMULATIVE,
9798
}
98-
logger.error("Calling MetricExporter")
9999

100-
super().__init__(preferred_temporality,preferred_aggregation)
100+
super().__init__(preferred_temporality, preferred_aggregation)
101101

102102
@property
103103
def endpoint(self):
@@ -180,9 +180,9 @@ def headers(self, headers: Dict):
180180

181181
def export(
182182
self,
183-
metrics_data : MetricsData,
183+
metrics_data: MetricsData,
184184
timeout_millis: float = 10_000,
185-
) ->MetricExportResult:
185+
) -> MetricExportResult:
186186
if not metrics_data:
187187
return MetricExportResult.SUCCESS
188188
timeseries = self._translate_data(metrics_data)
@@ -203,121 +203,129 @@ def _translate_data(self, data: MetricsData) -> Sequence[TimeSeries]:
203203
# OTLP Data model suggests combining some attrs into job/instance
204204
# Should we do that here?
205205
if self.resources_as_labels:
206-
resource_labels = [ (n,str(v)) for n,v in resource.attributes.items() ]
206+
resource_labels = [
207+
(n, str(v)) for n, v in resource.attributes.items()
208+
]
207209
else:
208210
resource_labels = []
209211
# Scope name/version probably not too useful from a labeling perspective
210212
for scope_metrics in resource_metrics.scope_metrics:
211213
for metric in scope_metrics.metrics:
212-
rw_timeseries.extend( self._parse_metric(metric,resource_labels) )
214+
rw_timeseries.extend(
215+
self._parse_metric(metric, resource_labels)
216+
)
213217
return rw_timeseries
214218

215-
def _parse_metric(self, metric: Metric, resource_labels: Sequence) -> Sequence[TimeSeries]:
219+
def _parse_metric(
220+
self, metric: Metric, resource_labels: Sequence
221+
) -> Sequence[TimeSeries]:
216222
"""
217223
Parses the Metric & lower objects, then converts the output into
218224
OM TimeSeries. Returns a List of TimeSeries objects based on one Metric
219225
"""
220226

221-
222227
# Create the metric name, will be a label later
223228
if metric.unit:
224-
#Prom. naming guidelines add unit to the name
225-
name =f"{metric.name}_{metric.unit}"
229+
# Prom. naming guidelines add unit to the name
230+
name = f"{metric.name}_{metric.unit}"
226231
else:
227232
name = metric.name
228233

229234
# datapoints have attributes associated with them. these would be sent
230235
# to RW as different metrics: name & labels is a unique time series
231236
sample_sets = defaultdict(list)
232-
if isinstance(metric.data,(Gauge,Sum)):
237+
if isinstance(metric.data, (Gauge, Sum)):
233238
for dp in metric.data.data_points:
234-
attrs,sample = self._parse_data_point(dp,name)
239+
attrs, sample = self._parse_data_point(dp, name)
235240
sample_sets[attrs].append(sample)
236-
elif isinstance(metric.data,Histogram):
241+
elif isinstance(metric.data, Histogram):
237242
for dp in metric.data.data_points:
238-
dp_result = self._parse_histogram_data_point(dp,name)
239-
for attrs,sample in dp_result:
243+
dp_result = self._parse_histogram_data_point(dp, name)
244+
for attrs, sample in dp_result:
240245
sample_sets[attrs].append(sample)
241246
else:
242-
logger.warn("Unsupported Metric Type: %s",type(metric.data))
247+
logger.warn("Unsupported Metric Type: %s", type(metric.data))
243248
return []
244249

245250
timeseries = []
246251
for labels, samples in sample_sets.items():
247252
ts = TimeSeries()
248-
for label_name,label_value in chain(resource_labels,labels):
253+
for label_name, label_value in chain(resource_labels, labels):
249254
# Previous implementation did not str() the names...
250-
ts.labels.append(self._label(label_name,str(label_value)))
251-
for value,timestamp in samples:
252-
ts.samples.append(self._sample(value,timestamp))
255+
ts.labels.append(self._label(label_name, str(label_value)))
256+
for value, timestamp in samples:
257+
ts.samples.append(self._sample(value, timestamp))
253258
timeseries.append(ts)
254259
return timeseries
255260

256-
def _sample(self,value: int,timestamp :int) -> Sample:
261+
def _sample(self, value: int, timestamp: int) -> Sample:
257262
sample = Sample()
258263
sample.value = value
259264
sample.timestamp = timestamp
260265
return sample
261266

262-
def _label(self,name:str,value:str) -> Label:
267+
def _label(self, name: str, value: str) -> Label:
263268
label = Label()
264-
label.name = PROMETHEUS_LABEL_REGEX.sub("_",name)
269+
label.name = PROMETHEUS_LABEL_REGEX.sub("_", name)
265270
label.value = value
266271
return label
267272

268-
def _sanitize_name(self,name):
273+
def _sanitize_name(self, name):
269274
# I Think Prometheus requires names to NOT start with a number this
270275
# would not catch that, but do cover the other cases. The naming rules
271276
# don't explicit say this, but the supplied regex implies it.
272277
# Got a little weird trying to do substitution with it, but can be
273278
# fixed if we allow numeric beginnings to metric names
274-
return PROMETHEUS_NAME_REGEX.sub("_",name)
279+
return PROMETHEUS_NAME_REGEX.sub("_", name)
275280

276281
def _parse_histogram_data_point(self, data_point, name):
277282

278-
#if (len(data_point.explicit_bounds)+1) != len(data_point.bucket_counts):
283+
# if (len(data_point.explicit_bounds)+1) != len(data_point.bucket_counts):
279284
# raise ValueError("Number of buckets must be 1 more than the explicit bounds!")
280285

281286
sample_attr_pairs = []
282287

283-
base_attrs = [(n,v) for n,v in data_point.attributes.items()]
288+
base_attrs = [(n, v) for n, v in data_point.attributes.items()]
284289
timestamp = data_point.time_unix_nano // 1_000_000
285290

286-
287-
def handle_bucket(value,bound=None,name_override=None):
291+
def handle_bucket(value, bound=None, name_override=None):
288292
# Metric Level attributes + the bucket boundry attribute + name
289293
ts_attrs = base_attrs.copy()
290-
ts_attrs.append(("__name__",self._sanitize_name(name_override or name)))
294+
ts_attrs.append(
295+
("__name__", self._sanitize_name(name_override or name))
296+
)
291297
if bound:
292-
ts_attrs.append(("le",str(bound)))
298+
ts_attrs.append(("le", str(bound)))
293299
# Value is count of values in each bucket
294-
ts_sample = (value,timestamp)
300+
ts_sample = (value, timestamp)
295301
return tuple(ts_attrs), ts_sample
296302

297-
for bound_pos,bound in enumerate(data_point.explicit_bounds):
303+
for bound_pos, bound in enumerate(data_point.explicit_bounds):
298304
sample_attr_pairs.append(
299-
handle_bucket(data_point.bucket_counts[bound_pos],bound)
305+
handle_bucket(data_point.bucket_counts[bound_pos], bound)
300306
)
301307

302308
# Add the last label for implicit +inf bucket
303309
sample_attr_pairs.append(
304-
handle_bucket(data_point.bucket_counts[-1],bound="+Inf")
310+
handle_bucket(data_point.bucket_counts[-1], bound="+Inf")
305311
)
306312

307-
#Lastly, add series for count & sum
313+
# Lastly, add series for count & sum
308314
sample_attr_pairs.append(
309-
handle_bucket(data_point.sum,name_override=f"{name}_sum")
315+
handle_bucket(data_point.sum, name_override=f"{name}_sum")
310316
)
311317
sample_attr_pairs.append(
312-
handle_bucket(data_point.count,name_override=f"{name}_count")
318+
handle_bucket(data_point.count, name_override=f"{name}_count")
313319
)
314320
return sample_attr_pairs
315321

316-
def _parse_data_point(self, data_point,name=None):
322+
def _parse_data_point(self, data_point, name=None):
317323

318-
attrs = tuple(data_point.attributes.items()) + (("__name__",self._sanitize_name(name)),)
319-
sample = (data_point.value,(data_point.time_unix_nano // 1_000_000))
320-
return attrs,sample
324+
attrs = tuple(data_point.attributes.items()) + (
325+
("__name__", self._sanitize_name(name)),
326+
)
327+
sample = (data_point.value, (data_point.time_unix_nano // 1_000_000))
328+
return attrs, sample
321329

322330
# pylint: disable=no-member,no-self-use
323331
def _build_message(self, timeseries: Sequence[TimeSeries]) -> bytes:
@@ -383,4 +391,3 @@ def force_flush(self, timeout_millis: float = 10_000) -> bool:
383391

384392
def shutdown(self) -> None:
385393
pass
386-

0 commit comments

Comments
 (0)