Skip to content

Commit 82979a5

Browse files
committed
Add some tests & test infra.
1 parent 30ecfab commit 82979a5

File tree

3 files changed

+210
-62
lines changed

3 files changed

+210
-62
lines changed

exporter/opentelemetry-exporter-prometheus-remote-write/src/opentelemetry/exporter/prometheus_remote_write/__init__.py

+86-55
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,8 @@
1616
import re
1717
from typing import Dict, Sequence
1818

19+
from collections import defaultdict
20+
from itertools import chain
1921
import requests
2022
import snappy
2123

@@ -30,10 +32,12 @@
3032
from opentelemetry.sdk.metrics.export import (
3133
MetricExporter,
3234
MetricExportResult,
33-
AggregationTemporality,
3435
Gauge,
3536
Sum,
3637
Histogram,
38+
MetricExportResult,
39+
MetricsData,
40+
Metric,
3741
)
3842
#from opentelemetry.sdk.metrics.export.aggregate import (
3943
# HistogramAggregator,
@@ -162,8 +166,8 @@ def headers(self, headers: Dict):
162166
self._headers = headers
163167

164168
def export(
165-
self, export_records: Sequence[ExportRecord]
166-
) -> MetricsExportResult:
169+
self, export_records
170+
) ->MetricExportResult:
167171
if not export_records:
168172
return MetricsExportResult.SUCCESS
169173
timeseries = self._convert_to_timeseries(export_records)
@@ -181,9 +185,82 @@ def shutdown(self) -> None:
181185

182186
def _translate_data(self, data: MetricsData):
183187
rw_timeseries = []
184-
188+
189+
for resource_metrics in data.resource_metrics:
190+
resource = resource_metrics.resource
191+
# OTLP Data model suggests combining some attrs into job/instance
192+
# Should we do that here?
193+
resource_labels = self._get_resource_labels(resource.attributes)
194+
# Scope name/version probably not too useful from a labeling perspective
195+
for scope_metrics in resource_metrics.scope_metrics:
196+
for metric in scope_metrics.metrics:
197+
rw_timeseries.extend( self._parse_metric(metric,resource_labels) )
198+
199+
def _get_resource_labels(self,attrs):
200+
""" Converts Resource Attributes to Prometheus Labels based on
201+
OTLP Metric Data Model's recommendations on Resource Attributes
202+
"""
203+
return [ (n,str(v)) for n,v in resource.attributes.items() ]
204+
205+
def _parse_metric(self, metric: Metric, resource_labels: Sequence) -> Sequence[TimeSeries]:
206+
"""
207+
Parses the Metric & lower objects, then converts the output into
208+
OM TimeSeries. Returns a List of TimeSeries objects based on one Metric
209+
"""
210+
# datapoints have attributes associated with them. these would be sent
211+
# to RW as different metrics: name & labels is a unique time series
212+
sample_sets = defaultdict(list)
213+
if isinstance(metric.data,(Gauge,Sum)):
214+
for dp in metric.data.data_points:
215+
attrs,sample = self._parse_data_point(dp)
216+
sample_sets[attrs].append(sample)
217+
elif isinstance(metric.data,(HistogramType)):
218+
raise NotImplementedError("Coming sooN!")
219+
else:
220+
logger.warn("Unsupported Metric Type: %s",type(metric.data))
221+
return []
222+
223+
# Create the metric name, will be a label later
224+
if metric.unit:
225+
#Prom. naming guidelines add unit to the name
226+
name =f"{metric.name}_{metric.unit}"
227+
else:
228+
name = metric.name
229+
230+
timeseries = []
231+
for labels, samples in sample_sets.items():
232+
ts = TimeSeries()
233+
ts.labels.append(self._label("__name__",name))
234+
for label_name,label_value in chain(resource_labels,labels):
235+
# Previous implementation did not str() the names...
236+
ts.labels.append(self._label(label_name,str(label_value)))
237+
for value,timestamp in samples:
238+
ts.samples.append(self._sample(value,timestamp))
239+
timeseries.append(ts)
240+
return timeseries
241+
242+
def _sample(self,value,timestamp :int):
243+
sample = Sample()
244+
sample.value = value
245+
sample.timestamp = timestamp
246+
return sample
247+
248+
def _label(self,name:str,value:str):
249+
label = Label()
250+
label.name = name
251+
label.value = value
252+
return label
253+
254+
def _parse_data_point(self, data_point):
255+
256+
attrs = tuple(data_point.attributes.items())
257+
#TODO: Optimize? create Sample here
258+
# remote write time is in milliseconds
259+
sample = (data_point.value,(data_point.time_unix_nano // 1_000_000))
260+
return attrs,sample
261+
185262
def _convert_to_timeseries(
186-
self, export_records: Sequence[ExportRecord]
263+
self, export_records
187264
) -> Sequence[TimeSeries]:
188265
timeseries = []
189266
for export_record in export_records:
@@ -199,7 +276,7 @@ def _convert_to_timeseries(
199276
return timeseries
200277

201278
def _convert_from_sum(
202-
self, sum_record: ExportRecord
279+
self, sum_record
203280
) -> Sequence[TimeSeries]:
204281
return [
205282
self._create_timeseries(
@@ -211,22 +288,9 @@ def _convert_from_sum(
211288

212289
def _convert_from_gauge(self, gauge_record):
213290
raise NotImplementedError("Do this")
214-
def _convert_from_min_max_sum_count(
215-
self, min_max_sum_count_record: ExportRecord
216-
) -> Sequence[TimeSeries]:
217-
timeseries = []
218-
for agg_type in ["min", "max", "sum", "count"]:
219-
name = min_max_sum_count_record.instrument.name + "_" + agg_type
220-
value = getattr(
221-
min_max_sum_count_record.aggregator.checkpoint, agg_type
222-
)
223-
timeseries.append(
224-
self._create_timeseries(min_max_sum_count_record, name, value)
225-
)
226-
return timeseries
227291

228292
def _convert_from_histogram(
229-
self, histogram_record: ExportRecord
293+
self, histogram_record
230294
) -> Sequence[TimeSeries]:
231295
timeseries = []
232296
for bound in histogram_record.aggregator.checkpoint.keys():
@@ -242,43 +306,10 @@ def _convert_from_histogram(
242306
)
243307
return timeseries
244308

245-
def _convert_from_last_value(
246-
self, last_value_record: ExportRecord
247-
) -> Sequence[TimeSeries]:
248-
return [
249-
self._create_timeseries(
250-
last_value_record,
251-
last_value_record.instrument.name + "_last",
252-
last_value_record.aggregator.checkpoint,
253-
)
254-
]
255-
256-
def _convert_from_value_observer(
257-
self, value_observer_record: ExportRecord
258-
) -> Sequence[TimeSeries]:
259-
timeseries = []
260-
for agg_type in ["min", "max", "sum", "count", "last"]:
261-
timeseries.append(
262-
self._create_timeseries(
263-
value_observer_record,
264-
value_observer_record.instrument.name + "_" + agg_type,
265-
getattr(
266-
value_observer_record.aggregator.checkpoint, agg_type
267-
),
268-
)
269-
)
270-
return timeseries
271-
272-
# TODO: Implement convert from quantile once supported by SDK for Prometheus Summaries
273-
def _convert_from_quantile(
274-
self, summary_record: ExportRecord
275-
) -> Sequence[TimeSeries]:
276-
raise NotImplementedError()
277-
278309
# pylint: disable=no-member,no-self-use
279310
def _create_timeseries(
280311
self,
281-
export_record: ExportRecord,
312+
export_record,
282313
name: str,
283314
value: float,
284315
extra_label: (str, str) = None,
@@ -344,7 +375,7 @@ def _build_headers(self) -> Dict:
344375

345376
def _send_message(
346377
self, message: bytes, headers: Dict
347-
) -> MetricsExportResult:
378+
) -> MetricExportResult:
348379
auth = None
349380
if self.basic_auth:
350381
auth = (self.basic_auth["username"], self.basic_auth["password"])
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
2+
3+
import random
4+
import pytest
5+
6+
import opentelemetry.test.metrictestutil as metric_util#import _generate_gauge, _generate_sum
7+
8+
from opentelemetry.sdk.metrics.export import (
9+
AggregationTemporality,
10+
Histogram,
11+
HistogramDataPoint,
12+
Sum,
13+
Gauge,
14+
MetricExportResult,
15+
MetricsData,
16+
ResourceMetrics,
17+
ScopeMetrics,
18+
Metric,
19+
)
20+
21+
from opentelemetry.exporter.prometheus_remote_write import (
22+
PrometheusRemoteWriteMetricsExporter,
23+
)
24+
@pytest.fixture
25+
def prom_rw():
26+
return PrometheusRemoteWriteMetricsExporter("http://victoria:8428/api/v1/write")
27+
28+
29+
30+
@pytest.fixture
31+
def generate_metrics_data(data):
32+
pass
33+
34+
35+
36+
@pytest.fixture
37+
def metric_histogram():
38+
dp = HistogramDataPoint(
39+
attributes={"foo": "bar", "baz": 42},
40+
start_time_unix_nano=1641946016139533244,
41+
time_unix_nano=1641946016139533244,
42+
count=random.randint(1,10),
43+
sum=random.randint(42,420),
44+
bucket_counts=[1, 4],
45+
explicit_bounds=[10.0, 20.0],
46+
min=8,
47+
max=18,
48+
)
49+
data = Histogram(
50+
[dp],
51+
AggregationTemporality.CUMULATIVE,
52+
)
53+
return Metric(
54+
"test_histogram",
55+
"foo",
56+
"tu",
57+
data=data,
58+
)
59+
60+
@pytest.fixture
61+
def metric(request):
62+
if request.param == "gauge":
63+
return metric_util._generate_gauge("test_gauge",random.randint(0,100))
64+
elif request.param == "sum":
65+
return metric_util._generate_sum("test_sum",random.randint(0,9_999_999_999))
66+

exporter/opentelemetry-exporter-prometheus-remote-write/tests/test_prometheus_remote_write_exporter.py

+58-7
Original file line numberDiff line numberDiff line change
@@ -23,17 +23,68 @@
2323
TimeSeries,
2424
)
2525
from opentelemetry.sdk.metrics import Counter
26-
from opentelemetry.sdk.metrics.export import ExportRecord, MetricsExportResult
27-
from opentelemetry.sdk.metrics.export.aggregate import (
28-
HistogramAggregator,
29-
LastValueAggregator,
30-
MinMaxSumCountAggregator,
31-
SumAggregator,
32-
ValueObserverAggregator,
26+
#from opentelemetry.sdk.metrics.export import ExportRecord, MetricExportResult
27+
#from opentelemetry.sdk.metrics.export.aggregate import (
28+
# HistogramAggregator,
29+
# LastValueAggregator,
30+
# MinMaxSumCountAggregator,
31+
# SumAggregator,
32+
# ValueObserverAggregator,
33+
#)
34+
35+
from opentelemetry.sdk.metrics.export import (
36+
NumberDataPoint,
3337
)
3438
from opentelemetry.sdk.resources import Resource
3539
from opentelemetry.sdk.util import get_dict_as_key
3640

41+
import pytest
42+
43+
def test_parse_data_point(prom_rw):
44+
45+
attrs = {"Foo" : "Bar","Baz" : 42}
46+
timestamp = 1641946016139533244
47+
value = 242.42
48+
dp = NumberDataPoint(
49+
attrs,
50+
0,
51+
timestamp,
52+
value
53+
)
54+
labels, sample = prom_rw._parse_data_point(dp)
55+
assert labels == (("Foo", "Bar"),("Baz", 42))
56+
assert sample == (value,timestamp // 1_000_000)
57+
58+
@pytest.mark.parametrize("metric",[
59+
"gauge",
60+
"sum",
61+
],indirect=["metric"])
62+
def test_parse_metric(metric,prom_rw):
63+
# We have 1 data point & 5 labels total
64+
attributes = {
65+
"service" : "foo",
66+
"id" : 42,
67+
}
68+
69+
series = prom_rw._parse_metric(metric,tuple(attributes.items()))
70+
assert len(series) == 1
71+
72+
#Build out the expected attributes and check they all made it as labels
73+
proto_out = series[0]
74+
number_data_point = metric.data.data_points[0]
75+
attributes.update(number_data_point.attributes)
76+
attributes["__name__"] = metric.name +f"_{metric.unit}"
77+
78+
for label in proto_out.labels:
79+
assert label.value == str(attributes[label.name])
80+
81+
# Ensure we have one sample with the correct time & value
82+
assert len(series.samples) == 1
83+
sample = proto_out.samples[0]
84+
assert sample.timestamp == (number_data_point.time_unix_nano // 1_000_000)
85+
assert sample.value == number_data_point.value
86+
87+
3788

3889
class TestValidation(unittest.TestCase):
3990
# Test cases to ensure exporter parameter validation works as intended

0 commit comments

Comments
 (0)