Skip to content

Commit 695761c

Browse files
committed
feat(logger): add flush_metrics
1 parent 0d2d061 commit 695761c

File tree

4 files changed

+55
-18
lines changed

4 files changed

+55
-18
lines changed

Diff for: aws_lambda_powertools/metrics/base.py

+23-10
Original file line numberDiff line numberDiff line change
@@ -328,6 +328,28 @@ def clear_metrics(self) -> None:
328328
self.dimension_set.clear()
329329
self.metadata_set.clear()
330330

331+
def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None:
332+
"""Manually flushes the metrics. This is normally not necessary,
333+
unless you're running on other runtimes besides Lambda, where the @log_metrics
334+
decorator already handles things for you.
335+
336+
Parameters
337+
----------
338+
raise_on_empty_metrics : bool, optional
339+
raise exception if no metrics are emitted, by default False
340+
"""
341+
if not raise_on_empty_metrics and not self.metric_set:
342+
warnings.warn(
343+
"No application metrics to publish. The cold-start metric may be published if enabled. "
344+
"If application metrics should never be empty, consider using 'raise_on_empty_metrics'",
345+
stacklevel=2,
346+
)
347+
else:
348+
logger.debug("Flushing existing metrics")
349+
metrics = self.serialize_metric_set()
350+
print(json.dumps(metrics, separators=(",", ":")))
351+
self.clear_metrics()
352+
331353
def log_metrics(
332354
self,
333355
lambda_handler: Union[Callable[[Dict, Any], Any], Optional[Callable[[Dict, Any, Optional[Dict]], Any]]] = None,
@@ -390,16 +412,7 @@ def decorate(event, context):
390412
if capture_cold_start_metric:
391413
self._add_cold_start_metric(context=context)
392414
finally:
393-
if not raise_on_empty_metrics and not self.metric_set:
394-
warnings.warn(
395-
"No application metrics to publish. The cold-start metric may be published if enabled. "
396-
"If application metrics should never be empty, consider using 'raise_on_empty_metrics'",
397-
stacklevel=2,
398-
)
399-
else:
400-
metrics = self.serialize_metric_set()
401-
self.clear_metrics()
402-
print(json.dumps(metrics, separators=(",", ":")))
415+
self.flush_metrics(raise_on_empty_metrics=raise_on_empty_metrics)
403416

404417
return response
405418

Diff for: docs/core/metrics.md

+11-3
Original file line numberDiff line numberDiff line change
@@ -256,9 +256,17 @@ If you prefer not to use `log_metrics` because you might want to encapsulate add
256256
???+ warning
257257
Metrics, dimensions and namespace validation still applies
258258

259-
```python hl_lines="11-14" title="Manually flushing and clearing metrics from memory"
260-
--8<-- "examples/metrics/src/single_metric.py"
261-
```
259+
=== "Regular Metrics"
260+
261+
```python hl_lines="10"
262+
--8<-- "examples/metrics/src/flush_metrics.py"
263+
```
264+
265+
=== "Single Metrics"
266+
267+
```python hl_lines="11-14"
268+
--8<-- "examples/metrics/src/single_metric.py"
269+
```
262270

263271
### Metrics isolation
264272

Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
import json
2-
31
from aws_lambda_powertools import Metrics
42
from aws_lambda_powertools.metrics import MetricUnit
53
from aws_lambda_powertools.utilities.typing import LambdaContext
@@ -9,6 +7,4 @@
97

108
def lambda_handler(event: dict, context: LambdaContext):
119
metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1)
12-
your_metrics_object = metrics.serialize_metric_set()
13-
metrics.clear_metrics()
14-
print(json.dumps(your_metrics_object))
10+
metrics.flush_metrics()

Diff for: tests/functional/test_metrics.py

+20
Original file line numberDiff line numberDiff line change
@@ -249,6 +249,26 @@ def lambda_handler(evt, ctx):
249249
assert expected == output
250250

251251

252+
def test_log_metrics_manual_flush(capsys, metrics, dimensions, namespace):
253+
# GIVEN Metrics is initialized
254+
my_metrics = Metrics(namespace=namespace)
255+
for metric in metrics:
256+
my_metrics.add_metric(**metric)
257+
for dimension in dimensions:
258+
my_metrics.add_dimension(**dimension)
259+
260+
# WHEN we manually the metrics
261+
my_metrics.flush_metrics()
262+
263+
output = capture_metrics_output(capsys)
264+
expected = serialize_metrics(metrics=metrics, dimensions=dimensions, namespace=namespace)
265+
266+
# THEN we should have no exceptions
267+
# and a valid EMF object should be flushed correctly
268+
remove_timestamp(metrics=[output, expected])
269+
assert expected == output
270+
271+
252272
def test_namespace_env_var(monkeypatch, capsys, metric, dimension, namespace):
253273
# GIVEN POWERTOOLS_METRICS_NAMESPACE is set
254274
monkeypatch.setenv("POWERTOOLS_METRICS_NAMESPACE", namespace)

0 commit comments

Comments
 (0)