Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 1ce0712

Browse files
committedSep 27, 2022
Update test cases
1 parent 82979a5 commit 1ce0712

File tree

4 files changed

+240
-382
lines changed

4 files changed

+240
-382
lines changed
 

‎exporter/opentelemetry-exporter-prometheus-remote-write/setup.cfg

+1-3
Original file line numberDiff line numberDiff line change
@@ -34,13 +34,11 @@ classifiers =
3434
Programming Language :: Python :: 3.8
3535

3636
[options]
37-
python_requires = >=3.5
37+
python_requires = >=3.8
3838
package_dir=
3939
=src
4040
packages=find_namespace:
4141
install_requires =
42-
snappy >= 2.8
43-
#protobuf >= 3.13.0
4442
protobuf == 3.20.0
4543
requests == 2.25.0
4644
opentelemetry-api == 1.12.0rc2

‎exporter/opentelemetry-exporter-prometheus-remote-write/src/opentelemetry/exporter/prometheus_remote_write/__init__.py

+82-56
Original file line numberDiff line numberDiff line change
@@ -71,19 +71,15 @@ def __init__(
7171
timeout: int = 30,
7272
tls_config: Dict = None,
7373
proxies: Dict = None,
74+
resources_as_labels : bool = True,
7475
):
7576
self.endpoint = endpoint
7677
self.basic_auth = basic_auth
7778
self.headers = headers
7879
self.timeout = timeout
7980
self.tls_config = tls_config
8081
self.proxies = proxies
81-
82-
self.converter_map = {
83-
Sum: self._convert_from_sum,
84-
Histogram: self._convert_from_histogram,
85-
Gauge: self._convert_from_gauge,
86-
}
82+
self.resources_as_labels = resources_as_labels
8783

8884

8985
@property
@@ -166,71 +162,73 @@ def headers(self, headers: Dict):
166162
self._headers = headers
167163

168164
def export(
169-
self, export_records
165+
self,metrics_data : MetricsData
170166
) ->MetricExportResult:
171-
if not export_records:
172-
return MetricsExportResult.SUCCESS
173-
timeseries = self._convert_to_timeseries(export_records)
167+
if not metrics_data:
168+
return MetricExportResult.SUCCESS
169+
timeseries = self._translate_data(metrics_data)
174170
if not timeseries:
175171
logger.error(
176172
"All records contain unsupported aggregators, export aborted"
177173
)
178-
return MetricsExportResult.FAILURE
174+
return MetricExportResult.FAILURE
179175
message = self._build_message(timeseries)
180176
headers = self._build_headers()
181177
return self._send_message(message, headers)
182178

183179
def shutdown(self) -> None:
184180
pass
185181

186-
def _translate_data(self, data: MetricsData):
182+
def _translate_data(self, data: MetricsData) -> Sequence[TimeSeries]:
187183
rw_timeseries = []
188184

189185
for resource_metrics in data.resource_metrics:
190186
resource = resource_metrics.resource
191187
# OTLP Data model suggests combining some attrs into job/instance
192188
# Should we do that here?
193-
resource_labels = self._get_resource_labels(resource.attributes)
189+
if self.resources_as_labels:
190+
resource_labels = [ (n,str(v)) for n,v in resource.attributes.items() ]
191+
else:
192+
resource_labels = []
194193
# Scope name/version probably not too useful from a labeling perspective
195194
for scope_metrics in resource_metrics.scope_metrics:
196195
for metric in scope_metrics.metrics:
197196
rw_timeseries.extend( self._parse_metric(metric,resource_labels) )
198-
199-
def _get_resource_labels(self,attrs):
200-
""" Converts Resource Attributes to Prometheus Labels based on
201-
OTLP Metric Data Model's recommendations on Resource Attributes
202-
"""
203-
return [ (n,str(v)) for n,v in resource.attributes.items() ]
197+
return rw_timeseries
204198

205199
def _parse_metric(self, metric: Metric, resource_labels: Sequence) -> Sequence[TimeSeries]:
206200
"""
207201
Parses the Metric & lower objects, then converts the output into
208202
OM TimeSeries. Returns a List of TimeSeries objects based on one Metric
209203
"""
204+
205+
206+
# Create the metric name, will be a label later
207+
if metric.unit:
208+
#Prom. naming guidelines add unit to the name
209+
name =f"{metric.name}_{metric.unit}"
210+
else:
211+
name = metric.name
212+
210213
# datapoints have attributes associated with them. these would be sent
211214
# to RW as different metrics: name & labels is a unique time series
212215
sample_sets = defaultdict(list)
213216
if isinstance(metric.data,(Gauge,Sum)):
214217
for dp in metric.data.data_points:
215-
attrs,sample = self._parse_data_point(dp)
218+
attrs,sample = self._parse_data_point(dp,name)
216219
sample_sets[attrs].append(sample)
217-
elif isinstance(metric.data,(HistogramType)):
218-
raise NotImplementedError("Coming sooN!")
220+
elif isinstance(metric.data,Histogram):
221+
for dp in metric.data.data_points:
222+
dp_result = self._parse_histogram_data_point(dp,name)
223+
for attrs,sample in dp_result:
224+
sample_sets[attrs].append(sample)
219225
else:
220226
logger.warn("Unsupported Metric Type: %s",type(metric.data))
221227
return []
222228

223-
# Create the metric name, will be a label later
224-
if metric.unit:
225-
#Prom. naming guidelines add unit to the name
226-
name =f"{metric.name}_{metric.unit}"
227-
else:
228-
name = metric.name
229-
230229
timeseries = []
231230
for labels, samples in sample_sets.items():
232231
ts = TimeSeries()
233-
ts.labels.append(self._label("__name__",name))
234232
for label_name,label_value in chain(resource_labels,labels):
235233
# Previous implementation did not str() the names...
236234
ts.labels.append(self._label(label_name,str(label_value)))
@@ -239,23 +237,61 @@ def _parse_metric(self, metric: Metric, resource_labels: Sequence) -> Sequence[T
239237
timeseries.append(ts)
240238
return timeseries
241239

242-
def _sample(self,value,timestamp :int):
240+
def _sample(self,value: int,timestamp :int) -> Sample:
243241
sample = Sample()
244242
sample.value = value
245243
sample.timestamp = timestamp
246244
return sample
247245

248-
def _label(self,name:str,value:str):
246+
def _label(self,name:str,value:str) -> Label:
249247
label = Label()
250248
label.name = name
251249
label.value = value
252250
return label
253251

254-
def _parse_data_point(self, data_point):
252+
def _parse_histogram_data_point(self, data_point, name):
253+
254+
#if (len(data_point.explicit_bounds)+1) != len(data_point.bucket_counts):
255+
# raise ValueError("Number of buckets must be 1 more than the explicit bounds!")
256+
257+
sample_attr_pairs = []
258+
259+
base_attrs = [(n,v) for n,v in data_point.attributes.items()]
260+
timestamp = data_point.time_unix_nano // 1_000_000
261+
262+
263+
def handle_bucket(value,bound=None,name_override=None):
264+
# Metric Level attributes + the bucket boundry attribute + name
265+
ts_attrs = base_attrs.copy()
266+
ts_attrs.append(("__name__",name_override or name))
267+
if bound:
268+
ts_attrs.append(("le",str(bound)))
269+
# Value is count of values in each bucket
270+
ts_sample = (value,timestamp)
271+
return tuple(ts_attrs), ts_sample
255272

256-
attrs = tuple(data_point.attributes.items())
257-
#TODO: Optimize? create Sample here
258-
# remote write time is in milliseconds
273+
for bound_pos,bound in enumerate(data_point.explicit_bounds):
274+
sample_attr_pairs.append(
275+
handle_bucket(data_point.bucket_counts[bound_pos],bound)
276+
)
277+
278+
# Add the last label for implicit +inf bucket
279+
sample_attr_pairs.append(
280+
handle_bucket(data_point.bucket_counts[-1],bound="+Inf")
281+
)
282+
283+
#Lastly, add series for count & sum
284+
sample_attr_pairs.append(
285+
handle_bucket(data_point.sum,name_override=f"{name}_sum")
286+
)
287+
sample_attr_pairs.append(
288+
handle_bucket(data_point.count,name_override=f"{name}_count")
289+
)
290+
return sample_attr_pairs
291+
292+
def _parse_data_point(self, data_point,name=None):
293+
294+
attrs = tuple(data_point.attributes.items()) + (("__name__",name),)
259295
sample = (data_point.value,(data_point.time_unix_nano // 1_000_000))
260296
return attrs,sample
261297

@@ -275,27 +311,17 @@ def _convert_to_timeseries(
275311
)
276312
return timeseries
277313

278-
def _convert_from_sum(
279-
self, sum_record
280-
) -> Sequence[TimeSeries]:
281-
return [
282-
self._create_timeseries(
283-
sum_record,
284-
sum_record.instrument.name + "_sum",
285-
sum_record.aggregator.checkpoint,
286-
)
287-
]
288-
289-
def _convert_from_gauge(self, gauge_record):
290-
raise NotImplementedError("Do this")
291-
292314
def _convert_from_histogram(
293-
self, histogram_record
315+
self, histogram: Histogram,
294316
) -> Sequence[TimeSeries]:
295-
timeseries = []
296-
for bound in histogram_record.aggregator.checkpoint.keys():
317+
sample_sets = defaultdict(list)
318+
319+
base_attrs = [self._label(n,v) for n,v in histogram.attributes]
320+
for bound in histogram.explicit_bounds:
297321
bound_str = "+Inf" if bound == float("inf") else str(bound)
298-
value = histogram_record.aggregator.checkpoint[bound]
322+
# General attributes apply
323+
ts_attrs = base_attrs.copy.append(self._label("le",str(bound)))
324+
sample_sets[attrs].append(sample)
299325
timeseries.append(
300326
self._create_timeseries(
301327
histogram_record,
@@ -411,5 +437,5 @@ def _send_message(
411437
response.raise_for_status()
412438
except requests.exceptions.RequestException as err:
413439
logger.error("Export POST request failed with reason: %s", err)
414-
return MetricsExportResult.FAILURE
415-
return MetricsExportResult.SUCCESS
440+
return MetricExportResult.FAILURE
441+
return MetricExportResult.SUCCESS

‎exporter/opentelemetry-exporter-prometheus-remote-write/tests/conftest.py

+17-19
Original file line numberDiff line numberDiff line change
@@ -26,41 +26,39 @@ def prom_rw():
2626
return PrometheusRemoteWriteMetricsExporter("http://victoria:8428/api/v1/write")
2727

2828

29-
3029
@pytest.fixture
31-
def generate_metrics_data(data):
32-
pass
33-
34-
30+
def metric(request):
31+
if hasattr(request,"param"):
32+
type_ = request.param
33+
else:
34+
type_ = random.choice(["gauge","sum"])
35+
if type_ == "gauge":
36+
return metric_util._generate_gauge("test_gauge",random.randint(0,100))
37+
elif type_ == "sum":
38+
return metric_util._generate_sum("test_sum",random.randint(0,9_999_999_999))
39+
elif type_ == "histogram":
40+
return _generate_histogram("test_histogram")
3541

36-
@pytest.fixture
37-
def metric_histogram():
42+
def _generate_histogram(name):
3843
dp = HistogramDataPoint(
3944
attributes={"foo": "bar", "baz": 42},
4045
start_time_unix_nano=1641946016139533244,
4146
time_unix_nano=1641946016139533244,
42-
count=random.randint(1,10),
43-
sum=random.randint(42,420),
47+
count=5,
48+
sum=420,
4449
bucket_counts=[1, 4],
45-
explicit_bounds=[10.0, 20.0],
50+
explicit_bounds=[10.0],
4651
min=8,
47-
max=18,
52+
max=80,
4853
)
4954
data = Histogram(
5055
[dp],
5156
AggregationTemporality.CUMULATIVE,
5257
)
5358
return Metric(
54-
"test_histogram",
59+
name,
5560
"foo",
5661
"tu",
5762
data=data,
5863
)
5964

60-
@pytest.fixture
61-
def metric(request):
62-
if request.param == "gauge":
63-
return metric_util._generate_gauge("test_gauge",random.randint(0,100))
64-
elif request.param == "sum":
65-
return metric_util._generate_sum("test_sum",random.randint(0,9_999_999_999))
66-

‎exporter/opentelemetry-exporter-prometheus-remote-write/tests/test_prometheus_remote_write_exporter.py

+140-304
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414

1515
import unittest
1616
from unittest.mock import patch
17+
import snappy
1718

1819
from opentelemetry.exporter.prometheus_remote_write import (
1920
PrometheusRemoteWriteMetricsExporter,
@@ -23,7 +24,7 @@
2324
TimeSeries,
2425
)
2526
from opentelemetry.sdk.metrics import Counter
26-
#from opentelemetry.sdk.metrics.export import ExportRecord, MetricExportResult
27+
#from opentelemetry.sdk.metrics.export import MetricExportResult
2728
#from opentelemetry.sdk.metrics.export.aggregate import (
2829
# HistogramAggregator,
2930
# LastValueAggregator,
@@ -34,7 +35,15 @@
3435

3536
from opentelemetry.sdk.metrics.export import (
3637
NumberDataPoint,
38+
HistogramDataPoint,
39+
Histogram,
40+
MetricsData,
41+
ScopeMetrics,
42+
ResourceMetrics,
43+
MetricExportResult,
3744
)
45+
46+
from opentelemetry.sdk.util.instrumentation import InstrumentationScope
3847
from opentelemetry.sdk.resources import Resource
3948
from opentelemetry.sdk.util import get_dict_as_key
4049

@@ -51,39 +60,89 @@ def test_parse_data_point(prom_rw):
5160
timestamp,
5261
value
5362
)
54-
labels, sample = prom_rw._parse_data_point(dp)
55-
assert labels == (("Foo", "Bar"),("Baz", 42))
63+
name = "abc123_42"
64+
labels, sample = prom_rw._parse_data_point(dp,name)
65+
66+
assert labels == (("Foo", "Bar"),("Baz", 42),("__name__",name))
5667
assert sample == (value,timestamp // 1_000_000)
5768

69+
def test_parse_histogram_dp(prom_rw):
70+
attrs = {"foo": "bar", "baz": 42}
71+
timestamp = 1641946016139533244
72+
bounds = [10.0, 20.0]
73+
dp = HistogramDataPoint(
74+
attributes=attrs,
75+
start_time_unix_nano=1641946016139533244,
76+
time_unix_nano=timestamp,
77+
count=9,
78+
sum=180,
79+
bucket_counts=[1, 4, 4],
80+
explicit_bounds=bounds,
81+
min=8,
82+
max=80,
83+
)
84+
name = "foo_histogram"
85+
label_sample_pairs = prom_rw._parse_histogram_data_point(dp,name)
86+
timestamp = timestamp // 1_000_000
87+
bounds.append("+Inf")
88+
for pos,bound in enumerate(bounds):
89+
# We have to attributes, we kinda assume the bucket label is last...
90+
assert ("le",str(bound)) == label_sample_pairs[pos][0][-1]
91+
# Check and make sure we are putting the bucket counts in there
92+
assert (dp.bucket_counts[pos],timestamp) == label_sample_pairs[pos][1]
93+
94+
# Last two are the sum & total count
95+
pos +=1
96+
assert ("__name__",f"{name}_sum") in label_sample_pairs[pos][0]
97+
assert (dp.sum,timestamp) == label_sample_pairs[pos][1]
98+
99+
pos +=1
100+
assert ("__name__",f"{name}_count") in label_sample_pairs[pos][0]
101+
assert (dp.count,timestamp) == label_sample_pairs[pos][1]
102+
58103
@pytest.mark.parametrize("metric",[
59104
"gauge",
60105
"sum",
106+
"histogram",
61107
],indirect=["metric"])
62108
def test_parse_metric(metric,prom_rw):
63-
# We have 1 data point & 5 labels total
109+
"""
110+
Ensures output from parse_metrics are TimeSeries with expected data/size
111+
"""
64112
attributes = {
65113
"service" : "foo",
66-
"id" : 42,
114+
"bool" : True,
67115
}
68116

117+
assert len(metric.data.data_points) == 1, "We can only support a single datapoint in tests"
69118
series = prom_rw._parse_metric(metric,tuple(attributes.items()))
70-
assert len(series) == 1
71-
72-
#Build out the expected attributes and check they all made it as labels
73-
proto_out = series[0]
74-
number_data_point = metric.data.data_points[0]
75-
attributes.update(number_data_point.attributes)
76-
attributes["__name__"] = metric.name +f"_{metric.unit}"
77-
78-
for label in proto_out.labels:
79-
assert label.value == str(attributes[label.name])
80-
81-
# Ensure we have one sample with the correct time & value
82-
assert len(series.samples) == 1
83-
sample = proto_out.samples[0]
84-
assert sample.timestamp == (number_data_point.time_unix_nano // 1_000_000)
85-
assert sample.value == number_data_point.value
86-
119+
timestamp = metric.data.data_points[0].time_unix_nano // 1_000_000
120+
for single_series in series:
121+
labels = str(single_series.labels)
122+
# Its a bit easier to validate these stringified where we dont have to
123+
# worry about ordering and protobuf TimeSeries object structure
124+
# This doesn't guarantee the labels aren't mixed up, but our other
125+
# test cases already do.
126+
assert "__name__" in labels
127+
assert metric.name in labels
128+
combined_attrs = list(attributes.items()) + list(metric.data.data_points[0].attributes.items())
129+
for name,value in combined_attrs:
130+
assert name in labels
131+
assert str(value) in labels
132+
if isinstance(metric.data,Histogram):
133+
values = [
134+
metric.data.data_points[0].count,
135+
metric.data.data_points[0].sum,
136+
metric.data.data_points[0].bucket_counts[0],
137+
metric.data.data_points[0].bucket_counts[1],
138+
]
139+
else:
140+
values = [
141+
metric.data.data_points[0].value,
142+
]
143+
for sample in single_series.samples:
144+
assert sample.timestamp == timestamp
145+
assert sample.value in values
87146

88147

89148
class TestValidation(unittest.TestCase):
@@ -179,287 +238,64 @@ def test_invalid_tls_config_key_only_param(self):
179238
)
180239

181240

182-
class TestConversion(unittest.TestCase):
183-
# Initializes test data that is reused across tests
184-
def setUp(self):
185-
self.exporter = PrometheusRemoteWriteMetricsExporter(
186-
endpoint="/prom/test_endpoint"
187-
)
188-
189-
# Ensures conversion to timeseries function works with valid aggregation types
190-
def test_valid_convert_to_timeseries(self):
191-
test_records = [
192-
ExportRecord(
193-
Counter("testname", "testdesc", "testunit", int, None),
194-
None,
195-
SumAggregator(),
196-
Resource({}),
197-
),
198-
ExportRecord(
199-
Counter("testname", "testdesc", "testunit", int, None),
200-
None,
201-
MinMaxSumCountAggregator(),
202-
Resource({}),
203-
),
204-
ExportRecord(
205-
Counter("testname", "testdesc", "testunit", int, None),
206-
None,
207-
HistogramAggregator(),
208-
Resource({}),
209-
),
210-
ExportRecord(
211-
Counter("testname", "testdesc", "testunit", int, None),
212-
None,
213-
LastValueAggregator(),
214-
Resource({}),
215-
),
216-
ExportRecord(
217-
Counter("testname", "testdesc", "testunit", int, None),
218-
None,
219-
ValueObserverAggregator(),
220-
Resource({}),
221-
),
222-
]
223-
for record in test_records:
224-
record.aggregator.update(5)
225-
record.aggregator.take_checkpoint()
226-
data = self.exporter._convert_to_timeseries(test_records)
227-
self.assertIsInstance(data, list)
228-
self.assertEqual(len(data), 13)
229-
for timeseries in data:
230-
self.assertIsInstance(timeseries, TimeSeries)
231-
232-
# Ensures conversion to timeseries fails for unsupported aggregation types
233-
def test_invalid_convert_to_timeseries(self):
234-
data = self.exporter._convert_to_timeseries(
235-
[ExportRecord(None, None, None, Resource({}))]
236-
)
237-
self.assertIsInstance(data, list)
238-
self.assertEqual(len(data), 0)
239-
240-
# Ensures sum aggregator is correctly converted to timeseries
241-
def test_convert_from_sum(self):
242-
sum_record = ExportRecord(
243-
Counter("testname", "testdesc", "testunit", int, None),
244-
None,
245-
SumAggregator(),
246-
Resource({}),
247-
)
248-
sum_record.aggregator.update(3)
249-
sum_record.aggregator.update(2)
250-
sum_record.aggregator.take_checkpoint()
251-
252-
expected_timeseries = self.exporter._create_timeseries(
253-
sum_record, "testname_sum", 5.0
254-
)
255-
timeseries = self.exporter._convert_from_sum(sum_record)
256-
self.assertEqual(timeseries[0], expected_timeseries)
257-
258-
# Ensures sum min_max_count aggregator is correctly converted to timeseries
259-
def test_convert_from_min_max_sum_count(self):
260-
min_max_sum_count_record = ExportRecord(
261-
Counter("testname", "testdesc", "testunit", int, None),
262-
None,
263-
MinMaxSumCountAggregator(),
264-
Resource({}),
265-
)
266-
min_max_sum_count_record.aggregator.update(5)
267-
min_max_sum_count_record.aggregator.update(1)
268-
min_max_sum_count_record.aggregator.take_checkpoint()
269-
270-
expected_min_timeseries = self.exporter._create_timeseries(
271-
min_max_sum_count_record, "testname_min", 1.0
272-
)
273-
expected_max_timeseries = self.exporter._create_timeseries(
274-
min_max_sum_count_record, "testname_max", 5.0
275-
)
276-
expected_sum_timeseries = self.exporter._create_timeseries(
277-
min_max_sum_count_record, "testname_sum", 6.0
278-
)
279-
expected_count_timeseries = self.exporter._create_timeseries(
280-
min_max_sum_count_record, "testname_count", 2.0
281-
)
282241

283-
timeseries = self.exporter._convert_from_min_max_sum_count(
284-
min_max_sum_count_record
285-
)
286-
self.assertEqual(timeseries[0], expected_min_timeseries)
287-
self.assertEqual(timeseries[1], expected_max_timeseries)
288-
self.assertEqual(timeseries[2], expected_sum_timeseries)
289-
self.assertEqual(timeseries[3], expected_count_timeseries)
290-
291-
# Ensures histogram aggregator is correctly converted to timeseries
292-
def test_convert_from_histogram(self):
293-
histogram_record = ExportRecord(
294-
Counter("testname", "testdesc", "testunit", int, None),
295-
None,
296-
HistogramAggregator(),
297-
Resource({}),
298-
)
299-
histogram_record.aggregator.update(5)
300-
histogram_record.aggregator.update(2)
301-
histogram_record.aggregator.update(-1)
302-
histogram_record.aggregator.take_checkpoint()
242+
# Ensures export is successful with valid export_records and config
243+
@patch("requests.post")
244+
def test_valid_export(mock_post,prom_rw,metric):
245+
metric = metric
246+
mock_post.return_value.configure_mock(**{"status_code": 200})
247+
labels = get_dict_as_key({"environment": "testing"})
303248

304-
expected_le_0_timeseries = self.exporter._create_timeseries(
305-
histogram_record, "testname_histogram", 1.0, ("le", "0")
306-
)
307-
expected_le_inf_timeseries = self.exporter._create_timeseries(
308-
histogram_record, "testname_histogram", 2.0, ("le", "+Inf")
309-
)
310-
timeseries = self.exporter._convert_from_histogram(histogram_record)
311-
self.assertEqual(timeseries[0], expected_le_0_timeseries)
312-
self.assertEqual(timeseries[1], expected_le_inf_timeseries)
313-
314-
# Ensures last value aggregator is correctly converted to timeseries
315-
def test_convert_from_last_value(self):
316-
last_value_record = ExportRecord(
317-
Counter("testname", "testdesc", "testunit", int, None),
318-
None,
319-
LastValueAggregator(),
320-
Resource({}),
321-
)
322-
last_value_record.aggregator.update(1)
323-
last_value_record.aggregator.update(5)
324-
last_value_record.aggregator.take_checkpoint()
325-
326-
expected_timeseries = self.exporter._create_timeseries(
327-
last_value_record, "testname_last", 5.0
328-
)
329-
timeseries = self.exporter._convert_from_last_value(last_value_record)
330-
self.assertEqual(timeseries[0], expected_timeseries)
331-
332-
# Ensures value observer aggregator is correctly converted to timeseries
333-
def test_convert_from_value_observer(self):
334-
value_observer_record = ExportRecord(
335-
Counter("testname", "testdesc", "testunit", int, None),
336-
None,
337-
ValueObserverAggregator(),
338-
Resource({}),
339-
)
340-
value_observer_record.aggregator.update(5)
341-
value_observer_record.aggregator.update(1)
342-
value_observer_record.aggregator.update(2)
343-
value_observer_record.aggregator.take_checkpoint()
344-
345-
expected_min_timeseries = self.exporter._create_timeseries(
346-
value_observer_record, "testname_min", 1.0
347-
)
348-
expected_max_timeseries = self.exporter._create_timeseries(
349-
value_observer_record, "testname_max", 5.0
350-
)
351-
expected_sum_timeseries = self.exporter._create_timeseries(
352-
value_observer_record, "testname_sum", 8.0
353-
)
354-
expected_count_timeseries = self.exporter._create_timeseries(
355-
value_observer_record, "testname_count", 3.0
356-
)
357-
expected_last_timeseries = self.exporter._create_timeseries(
358-
value_observer_record, "testname_last", 2.0
359-
)
360-
timeseries = self.exporter._convert_from_value_observer(
361-
value_observer_record
362-
)
363-
self.assertEqual(timeseries[0], expected_min_timeseries)
364-
self.assertEqual(timeseries[1], expected_max_timeseries)
365-
self.assertEqual(timeseries[2], expected_sum_timeseries)
366-
self.assertEqual(timeseries[3], expected_count_timeseries)
367-
self.assertEqual(timeseries[4], expected_last_timeseries)
368-
369-
# Ensures quantile aggregator is correctly converted to timeseries
370-
# TODO: Add test_convert_from_quantile once method is implemented
371-
372-
# Ensures timeseries produced contains appropriate sample and labels
373-
def test_create_timeseries(self):
374-
def create_label(name, value):
375-
label = Label()
376-
label.name = name
377-
label.value = value
378-
return label
379-
380-
sum_aggregator = SumAggregator()
381-
sum_aggregator.update(5)
382-
sum_aggregator.take_checkpoint()
383-
export_record = ExportRecord(
384-
Counter("testname", "testdesc", "testunit", int, None),
385-
get_dict_as_key({"record_name": "record_value"}),
386-
sum_aggregator,
387-
Resource({"resource_name": "resource_value"}),
388-
)
389-
390-
expected_timeseries = TimeSeries()
391-
expected_timeseries.labels.append( # pylint:disable=E1101
392-
create_label("__name__", "testname")
393-
)
394-
expected_timeseries.labels.append( # pylint:disable=E1101
395-
create_label("resource_name", "resource_value")
396-
)
397-
expected_timeseries.labels.append( # pylint:disable=E1101
398-
create_label("record_name", "record_value")
399-
)
400-
401-
sample = expected_timeseries.samples.add() # pylint:disable=E1101
402-
sample.timestamp = int(sum_aggregator.last_update_timestamp / 1000000)
403-
sample.value = 5.0
404-
405-
timeseries = self.exporter._create_timeseries(
406-
export_record, "testname", 5.0
407-
)
408-
self.assertEqual(timeseries, expected_timeseries)
409-
410-
411-
class TestExport(unittest.TestCase):
412-
# Initializes test data that is reused across tests
413-
def setUp(self):
414-
self.exporter = PrometheusRemoteWriteMetricsExporter(
415-
endpoint="/prom/test_endpoint"
416-
)
417-
418-
# Ensures export is successful with valid export_records and config
419-
@patch("requests.post")
420-
def test_valid_export(self, mock_post):
421-
mock_post.return_value.configure_mock(**{"status_code": 200})
422-
test_metric = Counter("testname", "testdesc", "testunit", int, None)
423-
labels = get_dict_as_key({"environment": "testing"})
424-
record = ExportRecord(
425-
test_metric, labels, SumAggregator(), Resource({})
426-
)
427-
result = self.exporter.export([record])
428-
self.assertIs(result, MetricsExportResult.SUCCESS)
429-
self.assertEqual(mock_post.call_count, 1)
430-
431-
result = self.exporter.export([])
432-
self.assertIs(result, MetricsExportResult.SUCCESS)
433-
434-
def test_invalid_export(self):
435-
record = ExportRecord(None, None, None, None)
436-
result = self.exporter.export([record])
437-
self.assertIs(result, MetricsExportResult.FAILURE)
438-
439-
@patch("requests.post")
440-
def test_valid_send_message(self, mock_post):
441-
mock_post.return_value.configure_mock(**{"ok": True})
442-
result = self.exporter._send_message(bytes(), {})
443-
self.assertEqual(mock_post.call_count, 1)
444-
self.assertEqual(result, MetricsExportResult.SUCCESS)
445-
446-
def test_invalid_send_message(self):
447-
result = self.exporter._send_message(bytes(), {})
448-
self.assertEqual(result, MetricsExportResult.FAILURE)
449-
450-
# Verifies that build_message calls snappy.compress and returns SerializedString
451-
@patch("snappy.compress", return_value=bytes())
452-
def test_build_message(self, mock_compress):
453-
message = self.exporter._build_message([TimeSeries()])
454-
self.assertEqual(mock_compress.call_count, 1)
455-
self.assertIsInstance(message, bytes)
456-
457-
# Ensure correct headers are added when valid config is provided
458-
def test_build_headers(self):
459-
self.exporter.headers = {"Custom Header": "test_header"}
460-
461-
headers = self.exporter._build_headers()
462-
self.assertEqual(headers["Content-Encoding"], "snappy")
463-
self.assertEqual(headers["Content-Type"], "application/x-protobuf")
464-
self.assertEqual(headers["X-Prometheus-Remote-Write-Version"], "0.1.0")
465-
self.assertEqual(headers["Custom Header"], "test_header")
249+
# Assumed a "None" for Scope or Resource aren't valid, so build them here
250+
scope = ScopeMetrics(
251+
InstrumentationScope(name="prom-rw-test"),
252+
[metric],
253+
None
254+
)
255+
resource = ResourceMetrics(
256+
Resource({"service.name" : "foo"}),
257+
[scope],
258+
None
259+
)
260+
record = MetricsData([resource])
261+
262+
result = prom_rw.export(record)
263+
assert result == MetricExportResult.SUCCESS
264+
assert mock_post.call_count == 1
265+
266+
result = prom_rw.export([])
267+
assert result == MetricExportResult.SUCCESS
268+
269+
def test_invalid_export(prom_rw):
270+
record = MetricsData([])
271+
272+
result = prom_rw.export(record)
273+
assert result == MetricExportResult.FAILURE
274+
275+
@patch("requests.post")
276+
def test_valid_send_message(mock_post,prom_rw):
277+
mock_post.return_value.configure_mock(**{"ok": True})
278+
result = prom_rw._send_message(bytes(), {})
279+
assert mock_post.call_count == 1
280+
assert result == MetricExportResult.SUCCESS
281+
282+
def test_invalid_send_message(prom_rw):
283+
result = prom_rw._send_message(bytes(), {})
284+
assert result == MetricExportResult.FAILURE
285+
286+
# Verifies that build_message calls snappy.compress and returns SerializedString
287+
@patch("snappy.compress", return_value=bytes())
288+
def test_build_message(mock_compress,prom_rw):
289+
message = prom_rw._build_message([TimeSeries()])
290+
assert mock_compress.call_count == 1
291+
assert isinstance(message, bytes)
292+
293+
# Ensure correct headers are added when valid config is provided
294+
def test_build_headers(prom_rw):
295+
prom_rw.headers = {"Custom Header": "test_header"}
296+
297+
headers = prom_rw._build_headers()
298+
assert headers["Content-Encoding"] == "snappy"
299+
assert headers["Content-Type"] == "application/x-protobuf"
300+
assert headers["X-Prometheus-Remote-Write-Version"] == "0.1.0"
301+
assert headers["Custom Header"] == "test_header"

0 commit comments

Comments
 (0)
Please sign in to comment.