14
14
15
15
import unittest
16
16
from unittest .mock import patch
17
+ import snappy
17
18
18
19
from opentelemetry .exporter .prometheus_remote_write import (
19
20
PrometheusRemoteWriteMetricsExporter ,
23
24
TimeSeries ,
24
25
)
25
26
from opentelemetry .sdk .metrics import Counter
26
- #from opentelemetry.sdk.metrics.export import ExportRecord, MetricExportResult
27
+ #from opentelemetry.sdk.metrics.export import MetricExportResult
27
28
#from opentelemetry.sdk.metrics.export.aggregate import (
28
29
# HistogramAggregator,
29
30
# LastValueAggregator,
34
35
35
36
from opentelemetry .sdk .metrics .export import (
36
37
NumberDataPoint ,
38
+ HistogramDataPoint ,
39
+ Histogram ,
40
+ MetricsData ,
41
+ ScopeMetrics ,
42
+ ResourceMetrics ,
43
+ MetricExportResult ,
37
44
)
45
+
46
+ from opentelemetry .sdk .util .instrumentation import InstrumentationScope
38
47
from opentelemetry .sdk .resources import Resource
39
48
from opentelemetry .sdk .util import get_dict_as_key
40
49
@@ -51,39 +60,89 @@ def test_parse_data_point(prom_rw):
51
60
timestamp ,
52
61
value
53
62
)
54
- labels , sample = prom_rw ._parse_data_point (dp )
55
- assert labels == (("Foo" , "Bar" ),("Baz" , 42 ))
63
+ name = "abc123_42"
64
+ labels , sample = prom_rw ._parse_data_point (dp ,name )
65
+
66
+ assert labels == (("Foo" , "Bar" ),("Baz" , 42 ),("__name__" ,name ))
56
67
assert sample == (value ,timestamp // 1_000_000 )
57
68
69
+ def test_parse_histogram_dp (prom_rw ):
70
+ attrs = {"foo" : "bar" , "baz" : 42 }
71
+ timestamp = 1641946016139533244
72
+ bounds = [10.0 , 20.0 ]
73
+ dp = HistogramDataPoint (
74
+ attributes = attrs ,
75
+ start_time_unix_nano = 1641946016139533244 ,
76
+ time_unix_nano = timestamp ,
77
+ count = 9 ,
78
+ sum = 180 ,
79
+ bucket_counts = [1 , 4 , 4 ],
80
+ explicit_bounds = bounds ,
81
+ min = 8 ,
82
+ max = 80 ,
83
+ )
84
+ name = "foo_histogram"
85
+ label_sample_pairs = prom_rw ._parse_histogram_data_point (dp ,name )
86
+ timestamp = timestamp // 1_000_000
87
+ bounds .append ("+Inf" )
88
+ for pos ,bound in enumerate (bounds ):
89
+ # We have to attributes, we kinda assume the bucket label is last...
90
+ assert ("le" ,str (bound )) == label_sample_pairs [pos ][0 ][- 1 ]
91
+ # Check and make sure we are putting the bucket counts in there
92
+ assert (dp .bucket_counts [pos ],timestamp ) == label_sample_pairs [pos ][1 ]
93
+
94
+ # Last two are the sum & total count
95
+ pos += 1
96
+ assert ("__name__" ,f"{ name } _sum" ) in label_sample_pairs [pos ][0 ]
97
+ assert (dp .sum ,timestamp ) == label_sample_pairs [pos ][1 ]
98
+
99
+ pos += 1
100
+ assert ("__name__" ,f"{ name } _count" ) in label_sample_pairs [pos ][0 ]
101
+ assert (dp .count ,timestamp ) == label_sample_pairs [pos ][1 ]
102
+
58
103
@pytest .mark .parametrize ("metric" ,[
59
104
"gauge" ,
60
105
"sum" ,
106
+ "histogram" ,
61
107
],indirect = ["metric" ])
62
108
def test_parse_metric (metric ,prom_rw ):
63
- # We have 1 data point & 5 labels total
109
+ """
110
+ Ensures output from parse_metrics are TimeSeries with expected data/size
111
+ """
64
112
attributes = {
65
113
"service" : "foo" ,
66
- "id " : 42 ,
114
+ "bool " : True ,
67
115
}
68
116
117
+ assert len (metric .data .data_points ) == 1 , "We can only support a single datapoint in tests"
69
118
series = prom_rw ._parse_metric (metric ,tuple (attributes .items ()))
70
- assert len (series ) == 1
71
-
72
- #Build out the expected attributes and check they all made it as labels
73
- proto_out = series [0 ]
74
- number_data_point = metric .data .data_points [0 ]
75
- attributes .update (number_data_point .attributes )
76
- attributes ["__name__" ] = metric .name + f"_{ metric .unit } "
77
-
78
- for label in proto_out .labels :
79
- assert label .value == str (attributes [label .name ])
80
-
81
- # Ensure we have one sample with the correct time & value
82
- assert len (series .samples ) == 1
83
- sample = proto_out .samples [0 ]
84
- assert sample .timestamp == (number_data_point .time_unix_nano // 1_000_000 )
85
- assert sample .value == number_data_point .value
86
-
119
+ timestamp = metric .data .data_points [0 ].time_unix_nano // 1_000_000
120
+ for single_series in series :
121
+ labels = str (single_series .labels )
122
+ # Its a bit easier to validate these stringified where we dont have to
123
+ # worry about ordering and protobuf TimeSeries object structure
124
+ # This doesn't guarantee the labels aren't mixed up, but our other
125
+ # test cases already do.
126
+ assert "__name__" in labels
127
+ assert metric .name in labels
128
+ combined_attrs = list (attributes .items ()) + list (metric .data .data_points [0 ].attributes .items ())
129
+ for name ,value in combined_attrs :
130
+ assert name in labels
131
+ assert str (value ) in labels
132
+ if isinstance (metric .data ,Histogram ):
133
+ values = [
134
+ metric .data .data_points [0 ].count ,
135
+ metric .data .data_points [0 ].sum ,
136
+ metric .data .data_points [0 ].bucket_counts [0 ],
137
+ metric .data .data_points [0 ].bucket_counts [1 ],
138
+ ]
139
+ else :
140
+ values = [
141
+ metric .data .data_points [0 ].value ,
142
+ ]
143
+ for sample in single_series .samples :
144
+ assert sample .timestamp == timestamp
145
+ assert sample .value in values
87
146
88
147
89
148
class TestValidation (unittest .TestCase ):
@@ -179,287 +238,64 @@ def test_invalid_tls_config_key_only_param(self):
179
238
)
180
239
181
240
182
- class TestConversion (unittest .TestCase ):
183
- # Initializes test data that is reused across tests
184
- def setUp (self ):
185
- self .exporter = PrometheusRemoteWriteMetricsExporter (
186
- endpoint = "/prom/test_endpoint"
187
- )
188
-
189
- # Ensures conversion to timeseries function works with valid aggregation types
190
- def test_valid_convert_to_timeseries (self ):
191
- test_records = [
192
- ExportRecord (
193
- Counter ("testname" , "testdesc" , "testunit" , int , None ),
194
- None ,
195
- SumAggregator (),
196
- Resource ({}),
197
- ),
198
- ExportRecord (
199
- Counter ("testname" , "testdesc" , "testunit" , int , None ),
200
- None ,
201
- MinMaxSumCountAggregator (),
202
- Resource ({}),
203
- ),
204
- ExportRecord (
205
- Counter ("testname" , "testdesc" , "testunit" , int , None ),
206
- None ,
207
- HistogramAggregator (),
208
- Resource ({}),
209
- ),
210
- ExportRecord (
211
- Counter ("testname" , "testdesc" , "testunit" , int , None ),
212
- None ,
213
- LastValueAggregator (),
214
- Resource ({}),
215
- ),
216
- ExportRecord (
217
- Counter ("testname" , "testdesc" , "testunit" , int , None ),
218
- None ,
219
- ValueObserverAggregator (),
220
- Resource ({}),
221
- ),
222
- ]
223
- for record in test_records :
224
- record .aggregator .update (5 )
225
- record .aggregator .take_checkpoint ()
226
- data = self .exporter ._convert_to_timeseries (test_records )
227
- self .assertIsInstance (data , list )
228
- self .assertEqual (len (data ), 13 )
229
- for timeseries in data :
230
- self .assertIsInstance (timeseries , TimeSeries )
231
-
232
- # Ensures conversion to timeseries fails for unsupported aggregation types
233
- def test_invalid_convert_to_timeseries (self ):
234
- data = self .exporter ._convert_to_timeseries (
235
- [ExportRecord (None , None , None , Resource ({}))]
236
- )
237
- self .assertIsInstance (data , list )
238
- self .assertEqual (len (data ), 0 )
239
-
240
- # Ensures sum aggregator is correctly converted to timeseries
241
- def test_convert_from_sum (self ):
242
- sum_record = ExportRecord (
243
- Counter ("testname" , "testdesc" , "testunit" , int , None ),
244
- None ,
245
- SumAggregator (),
246
- Resource ({}),
247
- )
248
- sum_record .aggregator .update (3 )
249
- sum_record .aggregator .update (2 )
250
- sum_record .aggregator .take_checkpoint ()
251
-
252
- expected_timeseries = self .exporter ._create_timeseries (
253
- sum_record , "testname_sum" , 5.0
254
- )
255
- timeseries = self .exporter ._convert_from_sum (sum_record )
256
- self .assertEqual (timeseries [0 ], expected_timeseries )
257
-
258
- # Ensures sum min_max_count aggregator is correctly converted to timeseries
259
- def test_convert_from_min_max_sum_count (self ):
260
- min_max_sum_count_record = ExportRecord (
261
- Counter ("testname" , "testdesc" , "testunit" , int , None ),
262
- None ,
263
- MinMaxSumCountAggregator (),
264
- Resource ({}),
265
- )
266
- min_max_sum_count_record .aggregator .update (5 )
267
- min_max_sum_count_record .aggregator .update (1 )
268
- min_max_sum_count_record .aggregator .take_checkpoint ()
269
-
270
- expected_min_timeseries = self .exporter ._create_timeseries (
271
- min_max_sum_count_record , "testname_min" , 1.0
272
- )
273
- expected_max_timeseries = self .exporter ._create_timeseries (
274
- min_max_sum_count_record , "testname_max" , 5.0
275
- )
276
- expected_sum_timeseries = self .exporter ._create_timeseries (
277
- min_max_sum_count_record , "testname_sum" , 6.0
278
- )
279
- expected_count_timeseries = self .exporter ._create_timeseries (
280
- min_max_sum_count_record , "testname_count" , 2.0
281
- )
282
241
283
- timeseries = self .exporter ._convert_from_min_max_sum_count (
284
- min_max_sum_count_record
285
- )
286
- self .assertEqual (timeseries [0 ], expected_min_timeseries )
287
- self .assertEqual (timeseries [1 ], expected_max_timeseries )
288
- self .assertEqual (timeseries [2 ], expected_sum_timeseries )
289
- self .assertEqual (timeseries [3 ], expected_count_timeseries )
290
-
291
- # Ensures histogram aggregator is correctly converted to timeseries
292
- def test_convert_from_histogram (self ):
293
- histogram_record = ExportRecord (
294
- Counter ("testname" , "testdesc" , "testunit" , int , None ),
295
- None ,
296
- HistogramAggregator (),
297
- Resource ({}),
298
- )
299
- histogram_record .aggregator .update (5 )
300
- histogram_record .aggregator .update (2 )
301
- histogram_record .aggregator .update (- 1 )
302
- histogram_record .aggregator .take_checkpoint ()
242
+ # Ensures export is successful with valid export_records and config
243
+ @patch ("requests.post" )
244
+ def test_valid_export (mock_post ,prom_rw ,metric ):
245
+ metric = metric
246
+ mock_post .return_value .configure_mock (** {"status_code" : 200 })
247
+ labels = get_dict_as_key ({"environment" : "testing" })
303
248
304
- expected_le_0_timeseries = self .exporter ._create_timeseries (
305
- histogram_record , "testname_histogram" , 1.0 , ("le" , "0" )
306
- )
307
- expected_le_inf_timeseries = self .exporter ._create_timeseries (
308
- histogram_record , "testname_histogram" , 2.0 , ("le" , "+Inf" )
309
- )
310
- timeseries = self .exporter ._convert_from_histogram (histogram_record )
311
- self .assertEqual (timeseries [0 ], expected_le_0_timeseries )
312
- self .assertEqual (timeseries [1 ], expected_le_inf_timeseries )
313
-
314
- # Ensures last value aggregator is correctly converted to timeseries
315
- def test_convert_from_last_value (self ):
316
- last_value_record = ExportRecord (
317
- Counter ("testname" , "testdesc" , "testunit" , int , None ),
318
- None ,
319
- LastValueAggregator (),
320
- Resource ({}),
321
- )
322
- last_value_record .aggregator .update (1 )
323
- last_value_record .aggregator .update (5 )
324
- last_value_record .aggregator .take_checkpoint ()
325
-
326
- expected_timeseries = self .exporter ._create_timeseries (
327
- last_value_record , "testname_last" , 5.0
328
- )
329
- timeseries = self .exporter ._convert_from_last_value (last_value_record )
330
- self .assertEqual (timeseries [0 ], expected_timeseries )
331
-
332
- # Ensures value observer aggregator is correctly converted to timeseries
333
- def test_convert_from_value_observer (self ):
334
- value_observer_record = ExportRecord (
335
- Counter ("testname" , "testdesc" , "testunit" , int , None ),
336
- None ,
337
- ValueObserverAggregator (),
338
- Resource ({}),
339
- )
340
- value_observer_record .aggregator .update (5 )
341
- value_observer_record .aggregator .update (1 )
342
- value_observer_record .aggregator .update (2 )
343
- value_observer_record .aggregator .take_checkpoint ()
344
-
345
- expected_min_timeseries = self .exporter ._create_timeseries (
346
- value_observer_record , "testname_min" , 1.0
347
- )
348
- expected_max_timeseries = self .exporter ._create_timeseries (
349
- value_observer_record , "testname_max" , 5.0
350
- )
351
- expected_sum_timeseries = self .exporter ._create_timeseries (
352
- value_observer_record , "testname_sum" , 8.0
353
- )
354
- expected_count_timeseries = self .exporter ._create_timeseries (
355
- value_observer_record , "testname_count" , 3.0
356
- )
357
- expected_last_timeseries = self .exporter ._create_timeseries (
358
- value_observer_record , "testname_last" , 2.0
359
- )
360
- timeseries = self .exporter ._convert_from_value_observer (
361
- value_observer_record
362
- )
363
- self .assertEqual (timeseries [0 ], expected_min_timeseries )
364
- self .assertEqual (timeseries [1 ], expected_max_timeseries )
365
- self .assertEqual (timeseries [2 ], expected_sum_timeseries )
366
- self .assertEqual (timeseries [3 ], expected_count_timeseries )
367
- self .assertEqual (timeseries [4 ], expected_last_timeseries )
368
-
369
- # Ensures quantile aggregator is correctly converted to timeseries
370
- # TODO: Add test_convert_from_quantile once method is implemented
371
-
372
- # Ensures timeseries produced contains appropriate sample and labels
373
- def test_create_timeseries (self ):
374
- def create_label (name , value ):
375
- label = Label ()
376
- label .name = name
377
- label .value = value
378
- return label
379
-
380
- sum_aggregator = SumAggregator ()
381
- sum_aggregator .update (5 )
382
- sum_aggregator .take_checkpoint ()
383
- export_record = ExportRecord (
384
- Counter ("testname" , "testdesc" , "testunit" , int , None ),
385
- get_dict_as_key ({"record_name" : "record_value" }),
386
- sum_aggregator ,
387
- Resource ({"resource_name" : "resource_value" }),
388
- )
389
-
390
- expected_timeseries = TimeSeries ()
391
- expected_timeseries .labels .append ( # pylint:disable=E1101
392
- create_label ("__name__" , "testname" )
393
- )
394
- expected_timeseries .labels .append ( # pylint:disable=E1101
395
- create_label ("resource_name" , "resource_value" )
396
- )
397
- expected_timeseries .labels .append ( # pylint:disable=E1101
398
- create_label ("record_name" , "record_value" )
399
- )
400
-
401
- sample = expected_timeseries .samples .add () # pylint:disable=E1101
402
- sample .timestamp = int (sum_aggregator .last_update_timestamp / 1000000 )
403
- sample .value = 5.0
404
-
405
- timeseries = self .exporter ._create_timeseries (
406
- export_record , "testname" , 5.0
407
- )
408
- self .assertEqual (timeseries , expected_timeseries )
409
-
410
-
411
- class TestExport (unittest .TestCase ):
412
- # Initializes test data that is reused across tests
413
- def setUp (self ):
414
- self .exporter = PrometheusRemoteWriteMetricsExporter (
415
- endpoint = "/prom/test_endpoint"
416
- )
417
-
418
- # Ensures export is successful with valid export_records and config
419
- @patch ("requests.post" )
420
- def test_valid_export (self , mock_post ):
421
- mock_post .return_value .configure_mock (** {"status_code" : 200 })
422
- test_metric = Counter ("testname" , "testdesc" , "testunit" , int , None )
423
- labels = get_dict_as_key ({"environment" : "testing" })
424
- record = ExportRecord (
425
- test_metric , labels , SumAggregator (), Resource ({})
426
- )
427
- result = self .exporter .export ([record ])
428
- self .assertIs (result , MetricsExportResult .SUCCESS )
429
- self .assertEqual (mock_post .call_count , 1 )
430
-
431
- result = self .exporter .export ([])
432
- self .assertIs (result , MetricsExportResult .SUCCESS )
433
-
434
- def test_invalid_export (self ):
435
- record = ExportRecord (None , None , None , None )
436
- result = self .exporter .export ([record ])
437
- self .assertIs (result , MetricsExportResult .FAILURE )
438
-
439
- @patch ("requests.post" )
440
- def test_valid_send_message (self , mock_post ):
441
- mock_post .return_value .configure_mock (** {"ok" : True })
442
- result = self .exporter ._send_message (bytes (), {})
443
- self .assertEqual (mock_post .call_count , 1 )
444
- self .assertEqual (result , MetricsExportResult .SUCCESS )
445
-
446
- def test_invalid_send_message (self ):
447
- result = self .exporter ._send_message (bytes (), {})
448
- self .assertEqual (result , MetricsExportResult .FAILURE )
449
-
450
- # Verifies that build_message calls snappy.compress and returns SerializedString
451
- @patch ("snappy.compress" , return_value = bytes ())
452
- def test_build_message (self , mock_compress ):
453
- message = self .exporter ._build_message ([TimeSeries ()])
454
- self .assertEqual (mock_compress .call_count , 1 )
455
- self .assertIsInstance (message , bytes )
456
-
457
- # Ensure correct headers are added when valid config is provided
458
- def test_build_headers (self ):
459
- self .exporter .headers = {"Custom Header" : "test_header" }
460
-
461
- headers = self .exporter ._build_headers ()
462
- self .assertEqual (headers ["Content-Encoding" ], "snappy" )
463
- self .assertEqual (headers ["Content-Type" ], "application/x-protobuf" )
464
- self .assertEqual (headers ["X-Prometheus-Remote-Write-Version" ], "0.1.0" )
465
- self .assertEqual (headers ["Custom Header" ], "test_header" )
249
+ # Assumed a "None" for Scope or Resource aren't valid, so build them here
250
+ scope = ScopeMetrics (
251
+ InstrumentationScope (name = "prom-rw-test" ),
252
+ [metric ],
253
+ None
254
+ )
255
+ resource = ResourceMetrics (
256
+ Resource ({"service.name" : "foo" }),
257
+ [scope ],
258
+ None
259
+ )
260
+ record = MetricsData ([resource ])
261
+
262
+ result = prom_rw .export (record )
263
+ assert result == MetricExportResult .SUCCESS
264
+ assert mock_post .call_count == 1
265
+
266
+ result = prom_rw .export ([])
267
+ assert result == MetricExportResult .SUCCESS
268
+
269
+ def test_invalid_export (prom_rw ):
270
+ record = MetricsData ([])
271
+
272
+ result = prom_rw .export (record )
273
+ assert result == MetricExportResult .FAILURE
274
+
275
+ @patch ("requests.post" )
276
+ def test_valid_send_message (mock_post ,prom_rw ):
277
+ mock_post .return_value .configure_mock (** {"ok" : True })
278
+ result = prom_rw ._send_message (bytes (), {})
279
+ assert mock_post .call_count == 1
280
+ assert result == MetricExportResult .SUCCESS
281
+
282
+ def test_invalid_send_message (prom_rw ):
283
+ result = prom_rw ._send_message (bytes (), {})
284
+ assert result == MetricExportResult .FAILURE
285
+
286
+ # Verifies that build_message calls snappy.compress and returns SerializedString
287
+ @patch ("snappy.compress" , return_value = bytes ())
288
+ def test_build_message (mock_compress ,prom_rw ):
289
+ message = prom_rw ._build_message ([TimeSeries ()])
290
+ assert mock_compress .call_count == 1
291
+ assert isinstance (message , bytes )
292
+
293
+ # Ensure correct headers are added when valid config is provided
294
+ def test_build_headers (prom_rw ):
295
+ prom_rw .headers = {"Custom Header" : "test_header" }
296
+
297
+ headers = prom_rw ._build_headers ()
298
+ assert headers ["Content-Encoding" ] == "snappy"
299
+ assert headers ["Content-Type" ] == "application/x-protobuf"
300
+ assert headers ["X-Prometheus-Remote-Write-Version" ] == "0.1.0"
301
+ assert headers ["Custom Header" ] == "test_header"
0 commit comments