From c9409662703c80fa06c1fc3fbbf30bcf00f2887c Mon Sep 17 00:00:00 2001 From: Roger Zhang Date: Tue, 1 Aug 2023 15:42:55 -0700 Subject: [PATCH 01/37] add datadog provider refactor base and exception to resolve a circular import issue add datadog provider tests --- aws_lambda_powertools/metrics/exceptions.py | 14 +- .../metrics/provider/__init__.py | 3 + .../metrics/provider/base/__init__.py | 11 + .../metrics/provider/{ => base}/base.py | 0 .../metrics/provider/base/exceptions.py | 10 + .../provider/cloudwatch_emf/cloudwatch.py | 2 +- .../metrics/provider/datadog/__init__.py | 6 + .../metrics/provider/datadog/datadog.py | 261 ++++++++++++++++++ tests/functional/test_metrics.py | 92 ++++++ 9 files changed, 385 insertions(+), 14 deletions(-) create mode 100644 aws_lambda_powertools/metrics/provider/base/__init__.py rename aws_lambda_powertools/metrics/provider/{ => base}/base.py (100%) create mode 100644 aws_lambda_powertools/metrics/provider/base/exceptions.py create mode 100644 aws_lambda_powertools/metrics/provider/datadog/__init__.py create mode 100644 aws_lambda_powertools/metrics/provider/datadog/datadog.py diff --git a/aws_lambda_powertools/metrics/exceptions.py b/aws_lambda_powertools/metrics/exceptions.py index 30a4996d67e..9e9ed5bb0db 100644 --- a/aws_lambda_powertools/metrics/exceptions.py +++ b/aws_lambda_powertools/metrics/exceptions.py @@ -1,16 +1,4 @@ +from aws_lambda_powertools.metrics.provider.base.exceptions import MetricValueError, SchemaValidationError from aws_lambda_powertools.metrics.provider.cloudwatch_emf.exceptions import MetricResolutionError, MetricUnitError - -class SchemaValidationError(Exception): - """When serialization fail schema validation""" - - pass - - -class MetricValueError(Exception): - """When metric value isn't a valid number""" - - pass - - __all__ = ["MetricUnitError", "MetricResolutionError", "SchemaValidationError", "MetricValueError"] diff --git a/aws_lambda_powertools/metrics/provider/__init__.py b/aws_lambda_powertools/metrics/provider/__init__.py index 814812c135b..e57b25a806b 100644 --- a/aws_lambda_powertools/metrics/provider/__init__.py +++ b/aws_lambda_powertools/metrics/provider/__init__.py @@ -1,6 +1,9 @@ from aws_lambda_powertools.metrics.provider.base import MetricsBase, MetricsProviderBase +from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics, DatadogProvider __all__ = [ "MetricsBase", "MetricsProviderBase", + "DatadogMetrics", + "DatadogProvider", ] diff --git a/aws_lambda_powertools/metrics/provider/base/__init__.py b/aws_lambda_powertools/metrics/provider/base/__init__.py new file mode 100644 index 00000000000..80e8be96904 --- /dev/null +++ b/aws_lambda_powertools/metrics/provider/base/__init__.py @@ -0,0 +1,11 @@ +from aws_lambda_powertools.metrics.provider.base.base import ( + MetricsBase, + MetricsProviderBase, + reset_cold_start_flag_provider, +) + +__all__ = [ + "MetricsBase", + "MetricsProviderBase", + "reset_cold_start_flag_provider", +] diff --git a/aws_lambda_powertools/metrics/provider/base.py b/aws_lambda_powertools/metrics/provider/base/base.py similarity index 100% rename from aws_lambda_powertools/metrics/provider/base.py rename to aws_lambda_powertools/metrics/provider/base/base.py diff --git a/aws_lambda_powertools/metrics/provider/base/exceptions.py b/aws_lambda_powertools/metrics/provider/base/exceptions.py new file mode 100644 index 00000000000..7ec8170399f --- /dev/null +++ b/aws_lambda_powertools/metrics/provider/base/exceptions.py @@ -0,0 +1,10 @@ +class SchemaValidationError(Exception): + """When serialization fail schema validation""" + + pass + + +class MetricValueError(Exception): + """When metric value isn't a valid number""" + + pass diff --git a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py index 921fcee6045..6ccc1edb06d 100644 --- a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py +++ b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py @@ -11,8 +11,8 @@ from typing import Any, Callable, Dict, List, Optional, Union from aws_lambda_powertools.metrics.base import single_metric -from aws_lambda_powertools.metrics.exceptions import MetricValueError, SchemaValidationError from aws_lambda_powertools.metrics.provider import MetricsProviderBase +from aws_lambda_powertools.metrics.provider.base.exceptions import MetricValueError, SchemaValidationError from aws_lambda_powertools.metrics.provider.cloudwatch_emf import cold_start from aws_lambda_powertools.metrics.provider.cloudwatch_emf.constants import MAX_DIMENSIONS, MAX_METRICS from aws_lambda_powertools.metrics.provider.cloudwatch_emf.exceptions import ( diff --git a/aws_lambda_powertools/metrics/provider/datadog/__init__.py b/aws_lambda_powertools/metrics/provider/datadog/__init__.py new file mode 100644 index 00000000000..ecd497e420e --- /dev/null +++ b/aws_lambda_powertools/metrics/provider/datadog/__init__.py @@ -0,0 +1,6 @@ +from aws_lambda_powertools.metrics.provider.datadog.datadog import DatadogMetrics, DatadogProvider + +__all__ = [ + "DatadogMetrics", + "DatadogProvider", +] diff --git a/aws_lambda_powertools/metrics/provider/datadog/datadog.py b/aws_lambda_powertools/metrics/provider/datadog/datadog.py new file mode 100644 index 00000000000..e44bd64df1f --- /dev/null +++ b/aws_lambda_powertools/metrics/provider/datadog/datadog.py @@ -0,0 +1,261 @@ +from __future__ import annotations + +import json +import logging +import numbers +import os +import time +import warnings +from typing import Any, List, Optional + +from aws_lambda_powertools.metrics.provider import MetricsBase +from aws_lambda_powertools.metrics.provider.base.exceptions import MetricValueError, SchemaValidationError + +logger = logging.getLogger(__name__) + +# Check if using datadog layer +try: + from datadog_lambda.metric import lambda_metric # type: ignore +except ImportError: + lambda_metric = None + +DEFAULT_NAMESPACE = "default" + + +class DatadogProvider: + """ + Class for datadog provider. This Class should only be used inside DatadogMetrics + all datadog metric data will be stored as + { + "m": metric_name, + "v": value, + "e": timestamp + "t": List["tag:value","tag2:value2"] + } + see https://github.com/Datadog/datadog-lambda-python/blob/main/datadog_lambda/metric.py#L77 + + Examples + -------- + + """ + + def __init__(self, namespace: str = DEFAULT_NAMESPACE, flush_to_log: bool = False): + """ + + Parameters + ---------- + namespace: str + For datadog, namespace will be appended in front of the metrics name in metrics exported. + (namespace.metrics_name) + flush_to_log: bool + Flush datadog metrics to log (collect with log forwarder) rather than using datadog extension + """ + self.metrics: List = [] + self.namespace: str = namespace + # either is true then flush to log + self.flush_to_log = (os.environ.get("DD_FLUSH_TO_LOG", "").lower() == "true") or flush_to_log + super().__init__() + + # adding name,value,timestamp,tags + def add_metric( + self, + name: str, + value: float, + timestamp: Optional[int] = None, + tags: Optional[List] = None, + **kwargs: Any, + ) -> None: + """ + The add_metrics function that will be used by metrics class. + + Parameters + ---------- + name: str + Name/Key for the metrics + value: float + Value for the metrics + timestamp: int + Timestamp in int for the metrics, default = time.time() + tags: List[str] + In format like List["tag:value","tag2:value2"] + args: Any + extra args will be dropped for compatibility + kwargs: Any + extra kwargs will be converted into tags, e.g., add_metrics(sales=sam) -> tags=['sales:sam'] + + Examples + -------- + >>> provider = DatadogProvider() + >>> + >>> provider.add_metric( + >>> name='coffee_house.order_value', + >>> value=12.45, + >>> tags=['product:latte', 'order:online'], + >>> sales='sam' + >>> ) + """ + if not isinstance(value, numbers.Real): + raise MetricValueError(f"{value} is not a valid number") + if tags is None: + tags = [] + if not timestamp: + timestamp = int(time.time()) + for k, w in kwargs.items(): + tags.append(f"{k}:{w}") + self.metrics.append({"m": name, "v": value, "e": timestamp, "t": tags}) + + def serialize(self) -> List: + output_list: List = [] + + for single_metric in self.metrics: + if self.namespace != DEFAULT_NAMESPACE: + metric_name = f"{self.namespace}.{single_metric['m']}" + else: + metric_name = single_metric["m"] + output_list.append( + { + "m": metric_name, + "v": single_metric["v"], + "e": single_metric["e"], + "t": single_metric["t"], + }, + ) + + return output_list + + # flush serialized data to output + def flush(self, metrics: List): + """ + + Parameters + ---------- + metrics: List[Dict] + [{ + "m": metric_name, + "v": value, + "e": timestamp + "t": List["tag:value","tag2:value2"] + }] + + Raises + ------- + SchemaValidationError + When metric object fails EMF schema validation + """ + if len(metrics) == 0: + raise SchemaValidationError("Must contain at least one metric.") + # submit through datadog extension + if lambda_metric and self.flush_to_log is False: + # use lambda_metric function from datadog package, submit metrics to datadog + for metric_item in metrics: + lambda_metric( + metric_name=metric_item["m"], + value=metric_item["v"], + timestamp=metric_item["e"], + tags=metric_item["t"], + ) + else: + # dd module not found: flush to log, this format can be recognized via datadog log forwarder + # https://github.com/Datadog/datadog-lambda-python/blob/main/datadog_lambda/metric.py#L77 + for metric_item in metrics: + print(json.dumps(metric_item, separators=(",", ":"))) + + def clear_metrics(self): + self.metrics = [] + + +class DatadogMetrics(MetricsBase): + """ + Class for datadog metrics + + Parameters + ---------- + provider: DatadogProvider + The datadog provider which will be used to process metrics data + + Example + ------- + **Creates a few metrics and publish at the end of a function execution** + + >>> from aws_lambda_powertools.metrics.provider import DatadogMetrics, DatadogProvider + >>> + >>> dd_provider = DatadogProvider(namespace="Serverlesspresso") + >>> metrics = DatadogMetrics(provider=dd_provider) + >>> + >>> @metrics.log_metrics(capture_cold_start_metric=True, raise_on_empty_metrics=False) + >>> def lambda_handler(event, context): + >>> metrics.add_metric(name="item_sold",value=1,tags=['product:latte', 'order:online']) + """ + + # `log_metrics` and `_add_cold_start_metric` are directly inherited from `MetricsBase` + def __init__(self, provider: DatadogProvider): + self.provider = provider + super().__init__() + + # drop additional kwargs to keep same experience + def add_metric( + self, + name: str, + value: float, + timestamp: Optional[int] = None, + tags: Optional[List] = None, + *args, + **kwargs, + ): + """ + The add_metrics function that will be used by metrics class. + + Parameters + ---------- + name: str + Name/Key for the metrics + value: float + Value for the metrics + timestamp: int + Timestamp in int for the metrics, default = time.time() + tags: List[str] + In format like List["tag:value","tag2:value2"], + args: Any + extra args will be dropped + kwargs: Any + extra kwargs will be converted into tags, e.g., add_metrics(sales=sam) -> tags=['sales:sam'] + + Examples + -------- + >>> from aws_lambda_powertools.metrics.provider import DatadogMetrics, DatadogProvider + >>> + >>> metrics = DatadogMetrics(provider=DatadogProvider()) + >>> metrics.add_metric( + >>> name='coffee_house.order_value', + >>> value=12.45, + >>> tags=['product:latte', 'order:online'] + >>> ) + """ + self.provider.add_metric(name=name, value=value, timestamp=timestamp, tags=tags, **kwargs) + + def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: + """ + Manually flushes the metrics. This is normally not necessary, + unless you're running on other runtimes besides Lambda, where the @log_metrics + decorator already handles things for you. + + Parameters + ---------- + raise_on_empty_metrics: bool + raise exception if no metrics are emitted, by default False + """ + metrics = self.provider.serialize() + if not metrics and not raise_on_empty_metrics: + warnings.warn( + "No application metrics to publish. The cold-start metric may be published if enabled. " + "If application metrics should never be empty, consider using 'raise_on_empty_metrics'", + stacklevel=2, + ) + else: + # will raise on empty metrics + self.provider.flush(metrics) + self.provider.clear_metrics() + + def add_cold_start_metric(self, metric_name: str, function_name: str) -> None: + logger.debug("Adding cold start metric and function_name tagging") + self.add_metric(name="ColdStart", value=1, function_name=function_name) diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index 1eed6c82294..d90c9fda1b8 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -1,4 +1,5 @@ import json +import os import warnings from collections import namedtuple from typing import Any, Dict, List, Union @@ -17,6 +18,8 @@ single_metric, ) from aws_lambda_powertools.metrics.provider import ( + DatadogMetrics, + DatadogProvider, MetricsBase, MetricsProviderBase, ) @@ -1407,3 +1410,92 @@ def lambda_handler(evt, context): assert output["ColdStart"] == [1.0] assert output["function_name"] == "example_fn" assert output["service"] == service + + +def test_datadog_coldstart(capsys): + reset_cold_start_flag_provider() + dd_provider = DatadogProvider(namespace="Serverlesspresso", flush_to_log=True) + metrics = DatadogMetrics(provider=dd_provider) + + LambdaContext = namedtuple("LambdaContext", "function_name") + + @metrics.log_metrics(capture_cold_start_metric=True, raise_on_empty_metrics=True) + def lambda_handler(event, context): + metrics.add_metric(name="item_sold", value=1, tags=["product:latte", "order:online"]) + + lambda_handler({}, LambdaContext("example_fn2")) + logs = capsys.readouterr().out.strip() + assert "ColdStart" in logs + + +def test_datadog_write_to_log(capsys): + os.environ["DD_FLUSH_TO_LOG"] = "True" + dd_provider = DatadogProvider(namespace="Serverlesspresso") + metrics = DatadogMetrics(provider=dd_provider) + metrics.add_metric(name="item_sold", value=1, tags=["product:latte", "order:online"]) + metrics.flush_metrics() + logs = capture_metrics_output(capsys) + logs["e"] = "" + assert logs == json.loads('{"m":"Serverlesspresso.item_sold","v":1,"e":"","t":["product:latte","order:online"]}') + + +def test_datadog_namespace(capsys): + dd_provider = DatadogProvider(namespace="Serverlesspresso", flush_to_log=True) + metrics = DatadogMetrics(provider=dd_provider) + + LambdaContext = namedtuple("LambdaContext", "function_name") + + @metrics.log_metrics(capture_cold_start_metric=True, raise_on_empty_metrics=True) + def lambda_handler(event, context): + metrics.add_metric(name="item_sold", value=1, tags=["product:latte", "order:online"]) + + lambda_handler({}, LambdaContext("example_fn")) + logs = capsys.readouterr().out.strip() + assert "Serverlesspresso" in logs + + +def test_datadog_raise_on_empty(): + dd_provider = DatadogProvider(namespace="Serverlesspresso", flush_to_log=True) + metrics = DatadogMetrics(provider=dd_provider) + + LambdaContext = namedtuple("LambdaContext", "function_name") + + @metrics.log_metrics(capture_cold_start_metric=False, raise_on_empty_metrics=True) + def lambda_handler(event, context): + pass + + with pytest.raises(SchemaValidationError, match="Must contain at least one metric."): + lambda_handler({}, LambdaContext("example_fn")) + + +def test_datadog_args(capsys): + dd_provider = DatadogProvider(namespace="Serverlesspresso", flush_to_log=True) + metrics = DatadogMetrics(provider=dd_provider) + metrics.add_metric("order_valve", 12.45, sales="sam") + metrics.flush_metrics() + logs = capsys.readouterr().out.strip() + log_dict = json.loads(logs) + tag_list = log_dict.get("t") + assert "sales:sam" in tag_list + + +def test_datadog_kwargs(capsys): + dd_provider = DatadogProvider(namespace="Serverlesspresso", flush_to_log=True) + metrics = DatadogMetrics(provider=dd_provider) + metrics.add_metric( + name="order_valve", + value=12.45, + tags=["test:kwargs"], + str="str", + int=123, + float=45.6, + dict={"type": "termination identified"}, + ) + metrics.flush_metrics() + logs = capsys.readouterr().out.strip() + log_dict = json.loads(logs) + tag_list = log_dict.get("t") + assert "test:kwargs" in tag_list + assert "str:str" in tag_list + assert "int:123" in tag_list + assert "float:45.6" in tag_list From 8c21a9beba83325faf578d39ef81627e785f075f Mon Sep 17 00:00:00 2001 From: Roger Zhang Date: Tue, 1 Aug 2023 15:50:12 -0700 Subject: [PATCH 02/37] fix poetry lock --- poetry.lock | 271 +++++++++++++++++++++++++++++++++++++++++++++++-- pyproject.toml | 2 + 2 files changed, 264 insertions(+), 9 deletions(-) diff --git a/poetry.lock b/poetry.lock index 1f0da79064c..129167e2de3 100644 --- a/poetry.lock +++ b/poetry.lock @@ -291,17 +291,17 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "boto3" -version = "1.28.16" +version = "1.28.17" description = "The AWS SDK for Python" optional = false python-versions = ">= 3.7" files = [ - {file = "boto3-1.28.16-py3-none-any.whl", hash = "sha256:d8e31f69fb919025a5961f8fbeb51fe92e2f753beb37fc1853138667a231cdaa"}, - {file = "boto3-1.28.16.tar.gz", hash = "sha256:aea48aedf3e8676e598e3202e732295064a4fcad5f2d2d2a699368b8c3ab492c"}, + {file = "boto3-1.28.17-py3-none-any.whl", hash = "sha256:bca0526f819e0f19c0f1e6eba3e2d1d6b6a92a45129f98c0d716e5aab6d9444b"}, + {file = "boto3-1.28.17.tar.gz", hash = "sha256:90f7cfb5e1821af95b1fc084bc50e6c47fa3edc99f32de1a2591faa0c546bea7"}, ] [package.dependencies] -botocore = ">=1.31.16,<1.32.0" +botocore = ">=1.31.17,<1.32.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.6.0,<0.7.0" @@ -310,13 +310,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.31.16" +version = "1.31.17" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">= 3.7" files = [ - {file = "botocore-1.31.16-py3-none-any.whl", hash = "sha256:92b240e2cb7b3afae5361651d2f48ee582f45d2dab53aef76eef7eec1d3ce582"}, - {file = "botocore-1.31.16.tar.gz", hash = "sha256:563e15979e763b93d78de58d0fc065f8615be12f41bab42f5ad9f412b6a224b3"}, + {file = "botocore-1.31.17-py3-none-any.whl", hash = "sha256:6ac34a1d34aa3750e78b77b8596617e2bab938964694d651939dba2cbde2c12b"}, + {file = "botocore-1.31.17.tar.gz", hash = "sha256:396459065dba4339eb4da4ec8b4e6599728eb89b7caaceea199e26f7d824a41c"}, ] [package.dependencies] @@ -327,6 +327,31 @@ urllib3 = ">=1.25.4,<1.27" [package.extras] crt = ["awscrt (==0.16.26)"] +[[package]] +name = "bytecode" +version = "0.13.0" +description = "Python module to generate and modify bytecode" +optional = false +python-versions = ">=3.6" +files = [ + {file = "bytecode-0.13.0-py3-none-any.whl", hash = "sha256:e69f92e7d27f99d5d7d76e6a824bd3d9ff857c72b59927aaf87e1a620f67fe50"}, + {file = "bytecode-0.13.0.tar.gz", hash = "sha256:6af3c2f0a31ce05dce41f7eea5cc380e33f5e8fbb7dcee3b52467a00acd52fcd"}, +] + +[[package]] +name = "bytecode" +version = "0.14.2" +description = "Python module to generate and modify bytecode" +optional = false +python-versions = ">=3.8" +files = [ + {file = "bytecode-0.14.2-py3-none-any.whl", hash = "sha256:e368a2b9bbd7c986133c951250db94fb32f774cfc49752a9db9073bcf9899762"}, + {file = "bytecode-0.14.2.tar.gz", hash = "sha256:386378d9025d68ddb144870ae74330a492717b11b8c9164c4034e88add808f0c"}, +] + +[package.dependencies] +typing-extensions = {version = "*", markers = "python_version < \"3.10\""} + [[package]] name = "cattrs" version = "23.1.2" @@ -598,6 +623,154 @@ tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.1 [package.extras] toml = ["tomli"] +[[package]] +name = "datadog" +version = "0.46.0" +description = "The Datadog Python library" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "datadog-0.46.0-py2.py3-none-any.whl", hash = "sha256:3d7bcda6177b43be4cdb52e16b4bdd4f9005716c0dd7cfea009e018c36bb7a3d"}, + {file = "datadog-0.46.0.tar.gz", hash = "sha256:e4fbc92a85e2b0919a226896ae45fc5e4b356c0c57f1c2659659dfbe0789c674"}, +] + +[package.dependencies] +requests = ">=2.6.0" + +[[package]] +name = "datadog-lambda" +version = "4.77.0" +description = "The Datadog AWS Lambda Library" +optional = false +python-versions = ">=3.7.0,<4" +files = [ + {file = "datadog_lambda-4.77.0-py3-none-any.whl", hash = "sha256:d583969bc2c7fcb358e35107d130a5a8ce86f8370f38e8d7dacdeab2ffcf0808"}, + {file = "datadog_lambda-4.77.0.tar.gz", hash = "sha256:2e34ab4a17dcc5d18b917a10c46efe7665eb3271eff4feca92d1709d3e3aa4af"}, +] + +[package.dependencies] +datadog = ">=0.41.0,<1.0.0" +ddtrace = "1.15.2" +importlib_metadata = {version = "*", markers = "python_version < \"3.8\""} +typing_extensions = {version = ">=4.0,<5.0", markers = "python_version < \"3.8\""} +urllib3 = "<2.0.0" +wrapt = ">=1.11.2,<2.0.0" + +[package.extras] +dev = ["boto3 (>=1.10.33,<2.0.0)", "flake8 (>=3.7.9,<4.0.0)", "httpretty (>=0.9.7,<0.10.0)", "nose2 (>=0.9.1,<0.10.0)", "requests (>=2.22.0,<3.0.0)"] + +[[package]] +name = "ddsketch" +version = "2.0.4" +description = "Distributed quantile sketches" +optional = false +python-versions = ">=2.7" +files = [ + {file = "ddsketch-2.0.4-py3-none-any.whl", hash = "sha256:3227a270fd686a29d3a7128f9352ccf852314410380fc11384356f1ae2a75938"}, + {file = "ddsketch-2.0.4.tar.gz", hash = "sha256:32f7314077fec8747d4faebaec2c854b5ffc399c5f552f73fa94024f48d74d64"}, +] + +[package.dependencies] +protobuf = {version = ">=3.0.0", markers = "python_version >= \"3.7\""} +six = "*" + +[[package]] +name = "ddtrace" +version = "1.15.2" +description = "Datadog APM client library" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "ddtrace-1.15.2-cp27-cp27m-macosx_11_0_x86_64.whl", hash = "sha256:ca0411333fbdb0fafa06d412bbd76ab8d2647cc9dcb8a7833952ce4fe09eb421"}, + {file = "ddtrace-1.15.2-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:e2603749f97a5191b32f710c8ec5248bb58f4f9a1cb337559f93c5f0f8cea33b"}, + {file = "ddtrace-1.15.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:8392087809e047f701e38ecc4f2990bcfe399a22c516a1dbcbdff50fb7382a79"}, + {file = "ddtrace-1.15.2-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:2f7649c24a7463be9b86d5f11ac6eaa2014896eaf409e67f3dc813a6bb0ed8b6"}, + {file = "ddtrace-1.15.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:dbdbc5bf3b2b56b8e61b241ee372d897b295344e269475f38e837c9bfe03ae2c"}, + {file = "ddtrace-1.15.2-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:23d39c72ad1844977a80d79206d773c3ec1f1346816b9e45427c25ef88597b4e"}, + {file = "ddtrace-1.15.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:da458bbbc4de14dd8e8f60aefe42a66c551a9f50c69c6e361acc7edab579a3e4"}, + {file = "ddtrace-1.15.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d07bb0c50d2df7ff9281bea83534db5127cee8ac2f94111c9544d03d49f60613"}, + {file = "ddtrace-1.15.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:499b3e3d0359e492792ecf8ab6efcf4b1991fbaa523338774333e9a2a66d9d37"}, + {file = "ddtrace-1.15.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eedd0937f83e0d7b261960365fec5771f39ced599c90f589548a1738a586799d"}, + {file = "ddtrace-1.15.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7643d20991cd7e1c21e65d8b5c292a9dca8d124f69f9e96cc2b5fb8d47802c3a"}, + {file = "ddtrace-1.15.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:3f6bb76fe33c2e4842236036f78b1bbdd4da0f2b846627ca7d72b01ac49b3076"}, + {file = "ddtrace-1.15.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ebc9b89501c8a557dab43170e4a12e90358130413a87a0276ccaa0038b0636a4"}, + {file = "ddtrace-1.15.2-cp310-cp310-win32.whl", hash = "sha256:c10ca0e3a63310d314ec7fa55d53f4b4434f06c4d321d64d757814679161bf5d"}, + {file = "ddtrace-1.15.2-cp310-cp310-win_amd64.whl", hash = "sha256:6208635442ea52ff3f97b9fc64ac25772cda8f105a607a385e55bf524bceefc5"}, + {file = "ddtrace-1.15.2-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:8524f460be02b402f63b11ad3b1177955c8608f814e1758b87f53f15bf9a7599"}, + {file = "ddtrace-1.15.2-cp311-cp311-macosx_11_0_x86_64.whl", hash = "sha256:5a2dd127a65e12189055818ab72d44d80587acaaf450c65624e0482d63ff9970"}, + {file = "ddtrace-1.15.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3350c647120fbc3355eb35ce054c88e63bc073d71949f377d59b1152a2ed0f4"}, + {file = "ddtrace-1.15.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:adb76713207f0ef688f68a539f9cb63e19cd149d48d36befb835f67f49395ed7"}, + {file = "ddtrace-1.15.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8ff5c250c5abfbbbd76a7d3167308a2373ad7e55ecf3c7c26a62fcd2be8a57"}, + {file = "ddtrace-1.15.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:264bed998870b63f548b96f57dd771014cd02ef0b21bb382e745900a7b72ef28"}, + {file = "ddtrace-1.15.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:20bfd8db155167d2ccfddc25b50649338534b12cb00f7ed08514af1eb6a4956e"}, + {file = "ddtrace-1.15.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:72479172bd10f5998188a05b0b4a109ccb2a93467a0aa1e6656d5396c83fb253"}, + {file = "ddtrace-1.15.2-cp311-cp311-win32.whl", hash = "sha256:23bee3d0eb971cc1565caa429620b82f2d69ef648e9c792046b9481188dba9ab"}, + {file = "ddtrace-1.15.2-cp311-cp311-win_amd64.whl", hash = "sha256:65a29d23ecfbc7cc4ca1069a5586aa836ae3978e64251414933432078bc29bc2"}, + {file = "ddtrace-1.15.2-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:3a2852da4a76503211ca8b77a50fc86df36ba15fab04b45a6a17faa386f53839"}, + {file = "ddtrace-1.15.2-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:244180c6decb244c7fda929dc5969b3a510e5a4857239063de1fae139fac9837"}, + {file = "ddtrace-1.15.2-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:46f9ba0c282a62953f03d1add8eae8c80613244bb93a1ff997dad71d07ce6c72"}, + {file = "ddtrace-1.15.2-cp35-cp35m-win32.whl", hash = "sha256:a39dbf1ca657cc3a876143301e5d775e2f9bcf2ed1e9b4366fb3cf9d6a345a82"}, + {file = "ddtrace-1.15.2-cp35-cp35m-win_amd64.whl", hash = "sha256:7cfd9514e82871321e86897fe567c7548fc45da523df591f3e5adc6633a5781c"}, + {file = "ddtrace-1.15.2-cp36-cp36m-macosx_11_0_x86_64.whl", hash = "sha256:3a2978b07d19d4ebf936fde1e455c61b3d88f103f1f9e360b9269fe1a1dc608a"}, + {file = "ddtrace-1.15.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e7795a7f65a6e844ab57a0b31d400e79c4a1f69d174fab8edc69e6d2db56962"}, + {file = "ddtrace-1.15.2-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aae5306b3b0ec48cb8ade3362629c31bd25999244addff0f4a2f6f3934509894"}, + {file = "ddtrace-1.15.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14fb33bd6d9fa70638d43de7b5170e1c9961d3fbc277314609941e108c45716d"}, + {file = "ddtrace-1.15.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:745ce3c9559fa331ef30208ff1ccaafe3ab3c02f2e01177c560c94acd6f4de27"}, + {file = "ddtrace-1.15.2-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:7706d35215d2cca0a89581ec11da56e25742914ae0865b928034ee9ad7278cf3"}, + {file = "ddtrace-1.15.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:0bc18e900d1495deb61093f1af38d94af6a1ca66dd541fd47bd093c3f3b80b4a"}, + {file = "ddtrace-1.15.2-cp36-cp36m-win32.whl", hash = "sha256:b13f4042ef3f391714aca5ca1f03ff3c24c1d201ab5af02f0405335aa5602ff5"}, + {file = "ddtrace-1.15.2-cp36-cp36m-win_amd64.whl", hash = "sha256:eb32e3b3d0f472447b3d427a075007135b3c39488c1fe0f1e097863f326a439b"}, + {file = "ddtrace-1.15.2-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:0953fd9a2555801d68674bb4905f64805efe1e02b3f11def21eb7655be046717"}, + {file = "ddtrace-1.15.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9436ec9cc00b70730d2c1777f11aca7f4863a49ddd27d0b1478e84c1a7667b6f"}, + {file = "ddtrace-1.15.2-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f7215b21c1eaf56b38bf46c66193db3736ecadeb9ae1b9ca780a91addbaa9853"}, + {file = "ddtrace-1.15.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15a5f7155b99fe9393bfa4f0e4ef2610ddf59e70aefcf99a95acae8b31e29cc4"}, + {file = "ddtrace-1.15.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:df103a600c2901dc54929ef58dee41887a0bb558efbf7e41a7489bd6264fcf44"}, + {file = "ddtrace-1.15.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:d51a73238ad8ceff4232ffa94b860d61187b325e7fab746044dafa312d6bc415"}, + {file = "ddtrace-1.15.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:bfc5777204c0c34465fc4ce38d8d1268d9f95ffcbf7e4025e9a5d3e87d3e17c3"}, + {file = "ddtrace-1.15.2-cp37-cp37m-win32.whl", hash = "sha256:9516dbfc974af9632d75e9c32b38e695b88ea18ebfa4580dd0f768bc05272fba"}, + {file = "ddtrace-1.15.2-cp37-cp37m-win_amd64.whl", hash = "sha256:a510252a3d5be6c29db2c69cbd2535268532e8d568fae06b295a06041e1b969d"}, + {file = "ddtrace-1.15.2-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:82995243719c87aefc85d7df0e1ae61bba8ae1f805d48cbaf2132beb215f1968"}, + {file = "ddtrace-1.15.2-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:ca5dd51910a81451d236fccdbf5d3ca8e284aa3be56f08db92644f85ef88c56e"}, + {file = "ddtrace-1.15.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d76f485769c035df3ede4ad9830bac06aa8b69ac4617f2eb1251b1094468009"}, + {file = "ddtrace-1.15.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4dd5f5e477021b8810b2b685e1e16ba5a99f31239e22abc71794688b7f3e6e4d"}, + {file = "ddtrace-1.15.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4ec73676c60cc3cf08430f19a59daccbbb5770edc74ad15a99bf4237a40d0fb"}, + {file = "ddtrace-1.15.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6b140f11b89d902174df05e8b9c1eb1b522a63e6c60c5d68ccac8913bb371bbb"}, + {file = "ddtrace-1.15.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c7c8a0e140d28e49cf8cd96cdec8e17232c5525ed5c154729b8afb6cb93a8e2b"}, + {file = "ddtrace-1.15.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0112d258c3a681a63e5f057b9e3ee8504b60d773d95baf195462d9ff4096caa9"}, + {file = "ddtrace-1.15.2-cp38-cp38-win32.whl", hash = "sha256:6ea7b80eb8019a70c999ef8cfd34fd6078a2ae154007d124d5e642531bf1a9d6"}, + {file = "ddtrace-1.15.2-cp38-cp38-win_amd64.whl", hash = "sha256:282b8c9b46d7a8450325299cf348a0f1d8f9f34d174a0ea402bc1a1df4ad7cf3"}, + {file = "ddtrace-1.15.2-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:53b171404b59c1e030ea614e194d1483fb42437a02ffdd7f4a45175613dd7cb4"}, + {file = "ddtrace-1.15.2-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:9ba06236dd8bd64776b7b734dd9421709670fef090857448e75c97acb30cdce7"}, + {file = "ddtrace-1.15.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6abe5ba4396c9f7633cab68d0e81c5fd94f7c77b046b3ee969eded068a522d7"}, + {file = "ddtrace-1.15.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:61a1b48f97a07e2f422ec01bb23861716300cebe4afd917ab36bb4db68904da4"}, + {file = "ddtrace-1.15.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86e186dc66802e2d71b94330c1635fd4c3f881a1bb71747be162a57b7602daaa"}, + {file = "ddtrace-1.15.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:762b5a0777454299c4ac62177578969ed551c973063f87a8825d9d073e5250ce"}, + {file = "ddtrace-1.15.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:889d359f4382fde41893ba5c00b412cbea8502e1b6bb6c83bf87fa6e63cbfabe"}, + {file = "ddtrace-1.15.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c4d3c9ce3456181e535b9da42bde7c850dc7224039fd083e95b05010c2ff9748"}, + {file = "ddtrace-1.15.2-cp39-cp39-win32.whl", hash = "sha256:69e47d28327a7afb263c16cc6bf1227e1b2bf1fdb2d559dce913a138a3f36807"}, + {file = "ddtrace-1.15.2-cp39-cp39-win_amd64.whl", hash = "sha256:da780fbfe6dd749ee571a468b8e86f1fd4f51626d35626c2356f8a440efe0dfa"}, + {file = "ddtrace-1.15.2.tar.gz", hash = "sha256:e5c1a5965ea8d8260586769102d79522bc7d9758a271252bb58ee05d6c5cd9a8"}, +] + +[package.dependencies] +attrs = {version = ">=20", markers = "python_version > \"2.7\""} +bytecode = [ + {version = ">=0.13.0,<0.14.0", markers = "python_version == \"3.7\""}, + {version = "*", markers = "python_version >= \"3.8\""}, +] +cattrs = {version = "*", markers = "python_version >= \"3.7\""} +ddsketch = ">=2.0.1" +envier = "*" +importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} +opentelemetry-api = {version = ">=1", markers = "python_version >= \"3.7\""} +protobuf = {version = ">=3", markers = "python_version >= \"3.7\""} +six = ">=1.12.0" +typing-extensions = "*" +xmltodict = ">=0.12" + +[package.extras] +opentracing = ["opentracing (>=2.0.0)"] + [[package]] name = "decorator" version = "5.1.1" @@ -609,6 +782,37 @@ files = [ {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, ] +[[package]] +name = "deprecated" +version = "1.2.14" +description = "Python @deprecated decorator to deprecate old python classes, functions or methods." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "Deprecated-1.2.14-py2.py3-none-any.whl", hash = "sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c"}, + {file = "Deprecated-1.2.14.tar.gz", hash = "sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3"}, +] + +[package.dependencies] +wrapt = ">=1.10,<2" + +[package.extras] +dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] + +[[package]] +name = "envier" +version = "0.4.0" +description = "Python application configuration via the environment" +optional = false +python-versions = ">=2.7" +files = [ + {file = "envier-0.4.0-py3-none-any.whl", hash = "sha256:7b91af0f16ea3e56d91ec082f038987e81b441fc19c657a8b8afe0909740a706"}, + {file = "envier-0.4.0.tar.gz", hash = "sha256:e68dcd1ed67d8b6313883e27dff3e701b7fba944d2ed4b7f53d0cc2e12364a82"}, +] + +[package.extras] +mypy = ["mypy"] + [[package]] name = "exceptiongroup" version = "1.1.2" @@ -1613,6 +1817,21 @@ doc = ["nb2plots (>=0.6)", "numpydoc (>=1.1)", "pillow (>=8.2)", "pydata-sphinx- extra = ["lxml (>=4.5)", "pydot (>=1.4.1)", "pygraphviz (>=1.7)"] test = ["codecov (>=2.1)", "pytest (>=6.2)", "pytest-cov (>=2.12)"] +[[package]] +name = "opentelemetry-api" +version = "1.19.0" +description = "OpenTelemetry Python API" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_api-1.19.0-py3-none-any.whl", hash = "sha256:dcd2a0ad34b691964947e1d50f9e8c415c32827a1d87f0459a72deb9afdf5597"}, + {file = "opentelemetry_api-1.19.0.tar.gz", hash = "sha256:db374fb5bea00f3c7aa290f5d94cea50b659e6ea9343384c5f6c2bb5d5e8db65"}, +] + +[package.dependencies] +deprecated = ">=1.2.6" +importlib-metadata = ">=6.0,<7.0" + [[package]] name = "packaging" version = "23.1" @@ -1708,6 +1927,28 @@ importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] +[[package]] +name = "protobuf" +version = "4.23.4" +description = "" +optional = false +python-versions = ">=3.7" +files = [ + {file = "protobuf-4.23.4-cp310-abi3-win32.whl", hash = "sha256:5fea3c64d41ea5ecf5697b83e41d09b9589e6f20b677ab3c48e5f242d9b7897b"}, + {file = "protobuf-4.23.4-cp310-abi3-win_amd64.whl", hash = "sha256:7b19b6266d92ca6a2a87effa88ecc4af73ebc5cfde194dc737cf8ef23a9a3b12"}, + {file = "protobuf-4.23.4-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8547bf44fe8cec3c69e3042f5c4fb3e36eb2a7a013bb0a44c018fc1e427aafbd"}, + {file = "protobuf-4.23.4-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:fee88269a090ada09ca63551bf2f573eb2424035bcf2cb1b121895b01a46594a"}, + {file = "protobuf-4.23.4-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:effeac51ab79332d44fba74660d40ae79985901ac21bca408f8dc335a81aa597"}, + {file = "protobuf-4.23.4-cp37-cp37m-win32.whl", hash = "sha256:c3e0939433c40796ca4cfc0fac08af50b00eb66a40bbbc5dee711998fb0bbc1e"}, + {file = "protobuf-4.23.4-cp37-cp37m-win_amd64.whl", hash = "sha256:9053df6df8e5a76c84339ee4a9f5a2661ceee4a0dab019e8663c50ba324208b0"}, + {file = "protobuf-4.23.4-cp38-cp38-win32.whl", hash = "sha256:e1c915778d8ced71e26fcf43c0866d7499891bca14c4368448a82edc61fdbc70"}, + {file = "protobuf-4.23.4-cp38-cp38-win_amd64.whl", hash = "sha256:351cc90f7d10839c480aeb9b870a211e322bf05f6ab3f55fcb2f51331f80a7d2"}, + {file = "protobuf-4.23.4-cp39-cp39-win32.whl", hash = "sha256:6dd9b9940e3f17077e820b75851126615ee38643c2c5332aa7a359988820c720"}, + {file = "protobuf-4.23.4-cp39-cp39-win_amd64.whl", hash = "sha256:0a5759f5696895de8cc913f084e27fd4125e8fb0914bb729a17816a33819f474"}, + {file = "protobuf-4.23.4-py3-none-any.whl", hash = "sha256:e9d0be5bf34b275b9f87ba7407796556abeeba635455d036c7351f7c183ef8ff"}, + {file = "protobuf-4.23.4.tar.gz", hash = "sha256:ccd9430c0719dce806b93f89c91de7977304729e55377f872a92465d548329a9"}, +] + [[package]] name = "publication" version = "0.0.3" @@ -2575,7 +2816,7 @@ watchmedo = ["PyYAML (>=3.10)"] name = "wrapt" version = "1.15.0" description = "Module for decorators, wrappers and monkey patching." -optional = true +optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" files = [ {file = "wrapt-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ca1cccf838cd28d5a0883b342474c630ac48cac5df0ee6eacc9c7290f76b11c1"}, @@ -2671,6 +2912,17 @@ PyYAML = ">=4.2b1,<7.0" radon = ">=4,<6" requests = ">=2.0,<3.0" +[[package]] +name = "xmltodict" +version = "0.13.0" +description = "Makes working with XML feel like you are working with JSON" +optional = false +python-versions = ">=3.4" +files = [ + {file = "xmltodict-0.13.0-py2.py3-none-any.whl", hash = "sha256:aa89e8fd76320154a40d19a0df04a4695fb9dc5ba977cbb68ab3e4eb225e7852"}, + {file = "xmltodict-0.13.0.tar.gz", hash = "sha256:341595a488e3e01a85a9d8911d8912fd922ede5fecc4dce437eb4b6c8d037e56"}, +] + [[package]] name = "zipp" version = "3.15.0" @@ -2689,6 +2941,7 @@ testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more [extras] all = ["aws-xray-sdk", "fastjsonschema", "pydantic"] aws-sdk = ["boto3"] +datadog = [] parser = ["pydantic"] tracer = ["aws-xray-sdk"] validation = ["fastjsonschema"] @@ -2696,4 +2949,4 @@ validation = ["fastjsonschema"] [metadata] lock-version = "2.0" python-versions = "^3.7.4" -content-hash = "e634f248e2f28160fd669018c4be9cc25b639f6a9c75c5c000386a19acff67db" +content-hash = "81deca8932f95b947e84ebf2b68a355a4e07c9d8fe170057dbdf901c7c7f16ed" diff --git a/pyproject.toml b/pyproject.toml index 2ec9d8f81f6..6be8b5d09c6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -76,6 +76,7 @@ ijson = "^3.2.2" typed-ast = { version = "^1.5.5", python = "< 3.8"} hvac = "^1.1.1" aws-requests-auth = "^0.4.3" +datadog-lambda = "^4.77.0" [tool.poetry.extras] parser = ["pydantic"] @@ -84,6 +85,7 @@ tracer = ["aws-xray-sdk"] all = ["pydantic", "aws-xray-sdk", "fastjsonschema"] # allow customers to run code locally without emulators (SAM CLI, etc.) aws-sdk = ["boto3"] +datadog=["datadog-lambda"] [tool.poetry.group.dev.dependencies] cfn-lint = "0.79.6" From 68ed6a3e95bfd978a2a36a02b6d937456c831c43 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Wed, 9 Aug 2023 18:56:12 +0100 Subject: [PATCH 03/37] Adding default tags method --- .../metrics/provider/base.py | 2 + .../metrics/provider/datadog/datadog.py | 56 ++++++++-- .../metrics/provider/datadog/metrics.py | 35 +++++- .../metrics/test_metrics_datadog.py | 103 +++++++++++++++++- 4 files changed, 180 insertions(+), 16 deletions(-) diff --git a/aws_lambda_powertools/metrics/provider/base.py b/aws_lambda_powertools/metrics/provider/base.py index 8bd2440658a..a5c8eec69c2 100644 --- a/aws_lambda_powertools/metrics/provider/base.py +++ b/aws_lambda_powertools/metrics/provider/base.py @@ -181,6 +181,7 @@ def handler(event, context): """ default_dimensions = kwargs.get("default_dimensions") + default_tags = kwargs.get("default_tags") # If handler is None we've been called with parameters # Return a partial function with args filled @@ -191,6 +192,7 @@ def handler(event, context): capture_cold_start_metric=capture_cold_start_metric, raise_on_empty_metrics=raise_on_empty_metrics, default_dimensions=default_dimensions, + default_tags=default_tags, ) @functools.wraps(lambda_handler) diff --git a/aws_lambda_powertools/metrics/provider/datadog/datadog.py b/aws_lambda_powertools/metrics/provider/datadog/datadog.py index 91aa1aa5f28..18f35d6a233 100644 --- a/aws_lambda_powertools/metrics/provider/datadog/datadog.py +++ b/aws_lambda_powertools/metrics/provider/datadog/datadog.py @@ -17,8 +17,8 @@ # Check if using datadog layer try: from datadog_lambda.metric import lambda_metric # type: ignore -except ImportError: - lambda_metric = None +except ImportError: # pragma: no cover + lambda_metric = None # pragma: no cover DEFAULT_NAMESPACE = "default" @@ -40,7 +40,13 @@ class DatadogProvider(BaseProvider): """ - def __init__(self, metric_set: List | None = None, namespace: str = DEFAULT_NAMESPACE, flush_to_log: bool = False): + def __init__( + self, + metric_set: List | None = None, + namespace: str = DEFAULT_NAMESPACE, + flush_to_log: bool = False, + default_tags: List | None = None, + ): """ Parameters @@ -53,6 +59,7 @@ def __init__(self, metric_set: List | None = None, namespace: str = DEFAULT_NAME """ self.metric_set = metric_set if metric_set is not None else [] self.namespace: str = namespace + self.default_tags = default_tags or [] # either is true then flush to log self.flush_to_log = (os.environ.get("DD_FLUSH_TO_LOG", "").lower() == "true") or flush_to_log @@ -96,12 +103,16 @@ def add_metric( """ if not isinstance(value, numbers.Real): raise MetricValueError(f"{value} is not a valid number") + if tags is None: tags = [] + if not timestamp: timestamp = int(time.time()) - for k, w in kwargs.items(): - tags.append(f"{k}:{w}") + + for tag_key, tag_value in kwargs.items(): + tags.append(f"{tag_key}:{tag_value}") + self.metric_set.append({"m": name, "v": value, "e": timestamp, "t": tags}) def serialize_metric_set(self, metrics: List | None = None) -> List: @@ -146,7 +157,7 @@ def serialize_metric_set(self, metrics: List | None = None) -> List: "m": metric_name, "v": single_metric["v"], "e": single_metric["e"], - "t": single_metric["t"], + "t": single_metric["t"] or list(self.default_tags), }, ) @@ -192,8 +203,8 @@ def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: # submit through datadog extension if lambda_metric and self.flush_to_log is False: # use lambda_metric function from datadog package, submit metrics to datadog - for metric_item in metrics: - lambda_metric( + for metric_item in metrics: # pragma: no cover + lambda_metric( # pragma: no cover metric_name=metric_item["m"], value=metric_item["v"], timestamp=metric_item["e"], @@ -263,9 +274,38 @@ def handler(event, context): Propagate error received """ + default_dimensions = kwargs.get("default_tags") + + if default_dimensions: + self.set_default_tags(**default_dimensions) + return super().log_metrics( lambda_handler=lambda_handler, capture_cold_start_metric=capture_cold_start_metric, raise_on_empty_metrics=raise_on_empty_metrics, **kwargs, ) + + def set_default_tags(self, **kwargs) -> None: + """Persist dimensions across Lambda invocations + + Parameters + ---------- + dimensions : Dict[str, Any], optional + metric dimensions as key=value + + Example + ------- + **Sets some default dimensions that will always be present across metrics and invocations** + + from aws_lambda_powertools import Metrics + + metrics = Metrics(namespace="ServerlessAirline", service="payment") + metrics.set_default_dimensions(environment="demo", another="one") + + @metrics.log_metrics() + def lambda_handler(): + return True + """ + for tag_key, tag_value in kwargs.items(): + self.default_tags.append(f"{tag_key}:{tag_value}") diff --git a/aws_lambda_powertools/metrics/provider/datadog/metrics.py b/aws_lambda_powertools/metrics/provider/datadog/metrics.py index bdd8870898c..bb141323688 100644 --- a/aws_lambda_powertools/metrics/provider/datadog/metrics.py +++ b/aws_lambda_powertools/metrics/provider/datadog/metrics.py @@ -13,9 +13,7 @@ class DatadogMetrics: # e.g., m1 and m2 add metric ProductCreated, however m1 has 'version' dimension but m2 doesn't # Result: ProductCreated is created twice as we now have 2 different EMF blobs _metrics: List = [] - _default_tags: Dict[str, Any] = {} - _metadata: Dict[str, Any] = {} - _default_dimensions: Dict[str, Any] = {} + _default_tags: List = [] def __init__( self, @@ -56,16 +54,47 @@ def log_metrics( lambda_handler: Callable[[Dict, Any], Any] | Optional[Callable[[Dict, Any, Optional[Dict]], Any]] = None, capture_cold_start_metric: bool = False, raise_on_empty_metrics: bool = False, + default_tags: List | None = None, ): return self.provider.log_metrics( lambda_handler=lambda_handler, capture_cold_start_metric=capture_cold_start_metric, raise_on_empty_metrics=raise_on_empty_metrics, + default_tags=default_tags, ) + def set_default_tags(self, **kwargs) -> None: + """Persist dimensions across Lambda invocations + + Parameters + ---------- + dimensions : Dict[str, Any], optional + metric dimensions as key=value + + Example + ------- + **Sets some default dimensions that will always be present across metrics and invocations** + + from aws_lambda_powertools import Metrics + + metrics = Metrics(namespace="ServerlessAirline", service="payment") + metrics.set_default_dimensions(environment="demo", another="one") + + @metrics.log_metrics() + def lambda_handler(): + return True + """ + self.provider.set_default_tags(**kwargs) + for tag_key, tag_value in kwargs.items(): + self.default_tags.append(f"{tag_key}:{tag_value}") + def clear_metrics(self) -> None: self.provider.clear_metrics() + def clear_default_tags(self) -> None: + self.provider.default_tags.clear() + self.default_tags.clear() + # We now allow customers to bring their own instance # of the AmazonCloudWatchEMFProvider provider # So we need to define getter/setter for namespace and service properties diff --git a/tests/functional/metrics/test_metrics_datadog.py b/tests/functional/metrics/test_metrics_datadog.py index 5b43cd8d0ba..192e52decf8 100644 --- a/tests/functional/metrics/test_metrics_datadog.py +++ b/tests/functional/metrics/test_metrics_datadog.py @@ -29,8 +29,7 @@ def lambda_handler(event, context): def test_datadog_write_to_log_with_env_variable(capsys, namespace): os.environ["DD_FLUSH_TO_LOG"] = "True" - dd_provider = DatadogProvider(namespace=namespace) - metrics = DatadogMetrics(provider=dd_provider) + metrics = DatadogMetrics(namespace=namespace) metrics.add_metric(name="item_sold", value=1, tags=["product:latte", "order:online"]) metrics.flush_metrics() logs = capture_metrics_output(capsys) @@ -107,7 +106,7 @@ def test_datadog_kwargs(capsys, namespace): assert "float:45.6" in tag_list -def test_log_metrics_clear_metrics_after_invocation(metric, service, namespace): +def test_metrics_clear_metrics_after_invocation(metric, service, namespace): # GIVEN Metrics is initialized my_metrics = DatadogMetrics(namespace=namespace, flush_to_log=True) my_metrics.add_metric(**metric) @@ -123,7 +122,7 @@ def lambda_handler(evt, context): assert my_metrics.metric_set == [] -def test_log_metrics_decorator_no_metrics_warning(dimensions, namespace, service): +def test_metrics_decorator_with_metrics_warning(dimensions, namespace, service): # GIVEN Metrics is initialized my_metrics = DatadogMetrics(namespace=namespace, flush_to_log=True) @@ -143,7 +142,7 @@ def lambda_handler(evt, context): ) -def test_log_metrics_with_default_namespace(metric, capsys, namespace): +def test_metrics_with_default_namespace(capsys, namespace): # GIVEN Metrics is initialized dd_provider = DatadogProvider(flush_to_log=True) metrics = DatadogMetrics(provider=dd_provider) @@ -157,3 +156,97 @@ def lambda_handler(event, context): lambda_handler({}, LambdaContext("example_fn2")) logs = capsys.readouterr().out.strip() assert namespace not in logs + + +def test_serialize_metrics(metric, namespace): + # GIVEN Metrics is initialized + my_metrics = DatadogMetrics(namespace=namespace, flush_to_log=True) + my_metrics.add_metric(**metric) + + my_metrics.serialize_metric_set() + + # THEN metric set should be empty after function has been run + assert my_metrics.metric_set[0]["m"] == "single_metric" + + +def test_clear_metrics(metric, namespace): + # GIVEN Metrics is initialized + my_metrics = DatadogMetrics(namespace=namespace, flush_to_log=True) + my_metrics.add_metric(**metric) + my_metrics.clear_metrics() + + # THEN metric set should be empty after function has been run + assert my_metrics.metric_set == [] + + +def test_get_namespace_property(namespace): + # GIVEN Metrics is initialized + my_metrics = DatadogMetrics(namespace=namespace, flush_to_log=True) + + assert my_metrics.namespace == namespace + + +def test_set_namespace_property(namespace): + # GIVEN Metrics is initialized + my_metrics = DatadogMetrics() + my_metrics.namespace = namespace + + assert my_metrics.namespace == namespace + + +def test_persist_default_tags(capsys, namespace): + # GIVEN Metrics is initialized and we persist a set of default dimensions + my_metrics = DatadogMetrics(namespace=namespace, flush_to_log=True) + my_metrics.set_default_tags(environment="test", log_group="/lambda/test") + + # WHEN we utilize log_metrics to serialize + # and flush metrics and clear all metrics and dimensions from memory + # at the end of a function execution + @my_metrics.log_metrics + def lambda_handler(evt, ctx): + my_metrics.add_metric(name="item_sold", value=1) + + lambda_handler({}, {}) + first_invocation = capsys.readouterr().out.strip() + + lambda_handler({}, {}) + second_invocation = capsys.readouterr().out.strip() + + # THEN we should have default dimensions in both outputs + assert "environment" in first_invocation + assert "environment" in second_invocation + + +def test_log_metrics_with_default_tags(capsys, namespace): + # GIVEN Metrics is initialized + my_metrics = DatadogMetrics(namespace=namespace, flush_to_log=True) + default_tags = {"environment": "test", "log_group": "/lambda/test"} + + # WHEN we utilize log_metrics with default dimensions to serialize + # and flush metrics and clear all metrics and dimensions from memory + # at the end of a function execution + @my_metrics.log_metrics(default_tags=default_tags) + def lambda_handler(evt, ctx): + my_metrics.add_metric(name="item_sold", value=1) + + lambda_handler({}, {}) + first_invocation = capsys.readouterr().out.strip() + + lambda_handler({}, {}) + second_invocation = capsys.readouterr().out.strip() + + # THEN we should have default dimensions in both outputs + assert "environment" in first_invocation + assert "environment" in second_invocation + + +def test_clear_default_tags(namespace): + # GIVEN Metrics is initialized and we persist a set of default dimensions + my_metrics = DatadogMetrics(namespace=namespace) + my_metrics.set_default_tags(environment="test", log_group="/lambda/test") + + # WHEN they are removed via clear_default_dimensions method + my_metrics.clear_default_tags() + + # THEN there should be no default dimensions + assert not my_metrics.default_tags From 7790986b3ca1928738728e22aebda8da1f60d255 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Wed, 9 Aug 2023 19:13:35 +0100 Subject: [PATCH 04/37] Cleaning tests + adding specific comments --- .../metrics/test_metrics_datadog.py | 182 +++++++++--------- 1 file changed, 96 insertions(+), 86 deletions(-) diff --git a/tests/functional/metrics/test_metrics_datadog.py b/tests/functional/metrics/test_metrics_datadog.py index 192e52decf8..693e8d5c482 100644 --- a/tests/functional/metrics/test_metrics_datadog.py +++ b/tests/functional/metrics/test_metrics_datadog.py @@ -11,104 +11,88 @@ from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics, DatadogProvider -def test_datadog_coldstart(capsys, namespace): +def test_datadog_coldstart(capsys): reset_cold_start_flag() - dd_provider = DatadogProvider(namespace=namespace, flush_to_log=True) + + # GIVEN DatadogMetrics is initialized + dd_provider = DatadogProvider(flush_to_log=True) metrics = DatadogMetrics(provider=dd_provider) LambdaContext = namedtuple("LambdaContext", "function_name") - @metrics.log_metrics(capture_cold_start_metric=True, raise_on_empty_metrics=True) + # WHEN log_metrics is used with capture_cold_start_metric + @metrics.log_metrics(capture_cold_start_metric=True) def lambda_handler(event, context): metrics.add_metric(name="item_sold", value=1, tags=["product:latte", "order:online"]) lambda_handler({}, LambdaContext("example_fn2")) logs = capsys.readouterr().out.strip() + + # THEN ColdStart metric and function_name and service dimension should be logged assert "ColdStart" in logs + assert "example_fn2" in logs -def test_datadog_write_to_log_with_env_variable(capsys, namespace): +def test_datadog_write_to_log_with_env_variable(capsys): + # GIVEN DD_FLUSH_TO_LOG env is configured os.environ["DD_FLUSH_TO_LOG"] = "True" - metrics = DatadogMetrics(namespace=namespace) + metrics = DatadogMetrics() + + # WHEN we add a metric metrics.add_metric(name="item_sold", value=1, tags=["product:latte", "order:online"]) metrics.flush_metrics() logs = capture_metrics_output(capsys) + + # THEN metrics is flushed to log logs["e"] = "" - assert logs == json.loads('{"m":"test_namespace.item_sold","v":1,"e":"","t":["product:latte","order:online"]}') + assert logs == json.loads('{"m":"item_sold","v":1,"e":"","t":["product:latte","order:online"]}') -def test_datadog_with_invalid_value(capsys, namespace): - dd_provider = DatadogProvider(namespace=namespace) - metrics = DatadogMetrics(provider=dd_provider) +def test_datadog_with_invalid_value(): + # GIVEN DatadogMetrics is initialized + metrics = DatadogMetrics() + # WHEN we pass an incorrect metric value (non-numeric) + # WHEN we attempt to serialize a valid Datadog metric + # THEN it should fail validation and raise MetricValueError with pytest.raises(MetricValueError, match=".*is not a valid number"): metrics.add_metric(name="item_sold", value="a", tags=["product:latte", "order:online"]) -def test_datadog_with_namespace(capsys, namespace): - metrics = DatadogMetrics(namespace=namespace, flush_to_log=True) +def test_datadog_raise_on_empty(): + # GIVEN DatadogMetrics is initialized + metrics = DatadogMetrics() LambdaContext = namedtuple("LambdaContext", "function_name") - @metrics.log_metrics(capture_cold_start_metric=True, raise_on_empty_metrics=True) - def lambda_handler(event, context): - metrics.add_metric(name="item_sold", value=1, tags=["product:latte", "order:online"]) - - lambda_handler({}, LambdaContext("example_fn")) - logs = capsys.readouterr().out.strip() - assert namespace in logs - - -def test_datadog_raise_on_empty(namespace): - dd_provider = DatadogProvider(namespace=namespace, flush_to_log=True) - metrics = DatadogMetrics(provider=dd_provider) - - LambdaContext = namedtuple("LambdaContext", "function_name") - - @metrics.log_metrics(capture_cold_start_metric=False, raise_on_empty_metrics=True) + # WHEN we set raise_on_empty_metrics to True + @metrics.log_metrics(raise_on_empty_metrics=True) def lambda_handler(event, context): pass + # THEN it should fail with no metric serialized with pytest.raises(SchemaValidationError, match="Must contain at least one metric."): lambda_handler({}, LambdaContext("example_fn")) -def test_datadog_args(capsys, namespace): - dd_provider = DatadogProvider(namespace=namespace, flush_to_log=True) - metrics = DatadogMetrics(provider=dd_provider) +def test_datadog_tags_using_kwargs(capsys): + # GIVEN DatadogMetrics is initialized + metrics = DatadogMetrics() + + # WHEN we add tags using kwargs metrics.add_metric("order_valve", 12.45, sales="sam") metrics.flush_metrics() logs = capsys.readouterr().out.strip() log_dict = json.loads(logs) tag_list = log_dict.get("t") - assert "sales:sam" in tag_list - -def test_datadog_kwargs(capsys, namespace): - dd_provider = DatadogProvider(namespace=namespace, flush_to_log=True) - metrics = DatadogMetrics(provider=dd_provider) - metrics.add_metric( - name="order_valve", - value=12.45, - tags=["test:kwargs"], - str="str", - int=123, - float=45.6, - dict={"type": "termination identified"}, - ) - metrics.flush_metrics() - logs = capsys.readouterr().out.strip() - log_dict = json.loads(logs) - tag_list = log_dict.get("t") - assert "test:kwargs" in tag_list - assert "str:str" in tag_list - assert "int:123" in tag_list - assert "float:45.6" in tag_list + # THEN tags must be present + assert "sales:sam" in tag_list -def test_metrics_clear_metrics_after_invocation(metric, service, namespace): - # GIVEN Metrics is initialized - my_metrics = DatadogMetrics(namespace=namespace, flush_to_log=True) +def test_metrics_clear_metrics_after_invocation(metric): + # GIVEN DatadogMetrics is initialized + my_metrics = DatadogMetrics(flush_to_log=True) my_metrics.add_metric(**metric) # WHEN log_metrics is used to flush metrics from memory @@ -122,9 +106,9 @@ def lambda_handler(evt, context): assert my_metrics.metric_set == [] -def test_metrics_decorator_with_metrics_warning(dimensions, namespace, service): - # GIVEN Metrics is initialized - my_metrics = DatadogMetrics(namespace=namespace, flush_to_log=True) +def test_metrics_decorator_with_metrics_warning(): + # GIVEN DatadogMetrics is initialized + my_metrics = DatadogMetrics(flush_to_log=True) # WHEN using the log_metrics decorator and no metrics have been added @my_metrics.log_metrics @@ -143,35 +127,56 @@ def lambda_handler(evt, context): def test_metrics_with_default_namespace(capsys, namespace): - # GIVEN Metrics is initialized - dd_provider = DatadogProvider(flush_to_log=True) - metrics = DatadogMetrics(provider=dd_provider) + # GIVEN DatadogMetrics is initialized with default namespace + metrics = DatadogMetrics(flush_to_log=True) LambdaContext = namedtuple("LambdaContext", "function_name") - @metrics.log_metrics(capture_cold_start_metric=True, raise_on_empty_metrics=True) + # WHEN we add metrics + @metrics.log_metrics def lambda_handler(event, context): metrics.add_metric(name="item_sold", value=1, tags=["product:latte", "order:online"]) lambda_handler({}, LambdaContext("example_fn2")) logs = capsys.readouterr().out.strip() + + # THEN default namespace must be assumed assert namespace not in logs -def test_serialize_metrics(metric, namespace): - # GIVEN Metrics is initialized - my_metrics = DatadogMetrics(namespace=namespace, flush_to_log=True) +def test_datadog_with_non_default_namespace(capsys, namespace): + # GIVEN DatadogMetrics is initialized with a non-default namespace + metrics = DatadogMetrics(namespace=namespace, flush_to_log=True) + + LambdaContext = namedtuple("LambdaContext", "function_name") + + # WHEN log_metrics is used + @metrics.log_metrics + def lambda_handler(event, context): + metrics.add_metric(name="item_sold", value=1, tags=["product:latte", "order:online"]) + + lambda_handler({}, LambdaContext("example_fn")) + logs = capsys.readouterr().out.strip() + + # THEN namespace must be present in logs + assert namespace in logs + + +def test_serialize_metrics(metric): + # GIVEN DatadogMetrics is initialized + my_metrics = DatadogMetrics(flush_to_log=True) my_metrics.add_metric(**metric) + # WHEN we serialize metrics my_metrics.serialize_metric_set() # THEN metric set should be empty after function has been run assert my_metrics.metric_set[0]["m"] == "single_metric" -def test_clear_metrics(metric, namespace): - # GIVEN Metrics is initialized - my_metrics = DatadogMetrics(namespace=namespace, flush_to_log=True) +def test_clear_metrics(metric): + # GIVEN DatadogMetrics is initialized + my_metrics = DatadogMetrics(flush_to_log=True) my_metrics.add_metric(**metric) my_metrics.clear_metrics() @@ -180,27 +185,32 @@ def test_clear_metrics(metric, namespace): def test_get_namespace_property(namespace): - # GIVEN Metrics is initialized - my_metrics = DatadogMetrics(namespace=namespace, flush_to_log=True) + # GIVEN DatadogMetrics is initialized + my_metrics = DatadogMetrics(namespace=namespace) + # WHEN we try to access the namespace property + # THEN namespace property must be present assert my_metrics.namespace == namespace def test_set_namespace_property(namespace): - # GIVEN Metrics is initialized + # GIVEN DatadogMetrics is initialized my_metrics = DatadogMetrics() + + # WHEN we set the namespace property after ther initialization my_metrics.namespace = namespace + # THEN namespace property must be present assert my_metrics.namespace == namespace -def test_persist_default_tags(capsys, namespace): - # GIVEN Metrics is initialized and we persist a set of default dimensions - my_metrics = DatadogMetrics(namespace=namespace, flush_to_log=True) +def test_persist_default_tags(capsys): + # GIVEN DatadogMetrics is initialized and we persist a set of default tags + my_metrics = DatadogMetrics(flush_to_log=True) my_metrics.set_default_tags(environment="test", log_group="/lambda/test") # WHEN we utilize log_metrics to serialize - # and flush metrics and clear all metrics and dimensions from memory + # and flush metrics and clear all metrics and tags from memory # at the end of a function execution @my_metrics.log_metrics def lambda_handler(evt, ctx): @@ -212,18 +222,18 @@ def lambda_handler(evt, ctx): lambda_handler({}, {}) second_invocation = capsys.readouterr().out.strip() - # THEN we should have default dimensions in both outputs + # THEN we should have default tags in both outputs assert "environment" in first_invocation assert "environment" in second_invocation -def test_log_metrics_with_default_tags(capsys, namespace): - # GIVEN Metrics is initialized - my_metrics = DatadogMetrics(namespace=namespace, flush_to_log=True) +def test_log_metrics_with_default_tags(capsys): + # GIVEN DatadogMetrics is initialized and we persist a set of default tags + my_metrics = DatadogMetrics(flush_to_log=True) default_tags = {"environment": "test", "log_group": "/lambda/test"} # WHEN we utilize log_metrics with default dimensions to serialize - # and flush metrics and clear all metrics and dimensions from memory + # and flush metrics and clear all metrics and tags from memory # at the end of a function execution @my_metrics.log_metrics(default_tags=default_tags) def lambda_handler(evt, ctx): @@ -235,18 +245,18 @@ def lambda_handler(evt, ctx): lambda_handler({}, {}) second_invocation = capsys.readouterr().out.strip() - # THEN we should have default dimensions in both outputs + # THEN we should have default tags in both outputs assert "environment" in first_invocation assert "environment" in second_invocation -def test_clear_default_tags(namespace): - # GIVEN Metrics is initialized and we persist a set of default dimensions - my_metrics = DatadogMetrics(namespace=namespace) +def test_clear_default_tags(): + # GIVEN DatadogMetrics is initialized and we persist a set of default tags + my_metrics = DatadogMetrics() my_metrics.set_default_tags(environment="test", log_group="/lambda/test") - # WHEN they are removed via clear_default_dimensions method + # WHEN they are removed via clear_default_tags method my_metrics.clear_default_tags() - # THEN there should be no default dimensions + # THEN there should be no default tags assert not my_metrics.default_tags From 4acacb7f39fbd6732db197cff29df5c5d064913e Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Wed, 9 Aug 2023 20:55:36 +0100 Subject: [PATCH 05/37] Fix small things + improving docstring --- aws_lambda_powertools/metrics/metrics.py | 2 + .../metrics/provider/datadog/datadog.py | 80 +++++++------------ .../metrics/provider/datadog/metrics.py | 72 +++++++++++------ aws_lambda_powertools/shared/constants.py | 2 + .../metrics/test_metrics_datadog.py | 34 +++++++- 5 files changed, 110 insertions(+), 80 deletions(-) diff --git a/aws_lambda_powertools/metrics/metrics.py b/aws_lambda_powertools/metrics/metrics.py index 900e0da7dd7..cb970fcfdc0 100644 --- a/aws_lambda_powertools/metrics/metrics.py +++ b/aws_lambda_powertools/metrics/metrics.py @@ -51,6 +51,8 @@ def lambda_handler(): service name to be used as metric dimension, by default "service_undefined" namespace : str, optional Namespace for metrics + provider: AmazonCloudWatchEMFProvider, optional + Pre-configured AmazonCloudWatchEMFProvider provider Raises ------ diff --git a/aws_lambda_powertools/metrics/provider/datadog/datadog.py b/aws_lambda_powertools/metrics/provider/datadog/datadog.py index 18f35d6a233..28bddba33a8 100644 --- a/aws_lambda_powertools/metrics/provider/datadog/datadog.py +++ b/aws_lambda_powertools/metrics/provider/datadog/datadog.py @@ -10,6 +10,8 @@ from aws_lambda_powertools.metrics.exceptions import MetricValueError, SchemaValidationError from aws_lambda_powertools.metrics.provider import BaseProvider +from aws_lambda_powertools.shared import constants +from aws_lambda_powertools.shared.functions import resolve_env_var_choice from aws_lambda_powertools.utilities.typing import LambdaContext logger = logging.getLogger(__name__) @@ -25,43 +27,36 @@ class DatadogProvider(BaseProvider): """ - Class for datadog provider. This Class should only be used inside DatadogMetrics - all datadog metric data will be stored as - { - "m": metric_name, - "v": value, - "e": timestamp - "t": List["tag:value","tag2:value2"] - } - see https://github.com/Datadog/datadog-lambda-python/blob/main/datadog_lambda/metric.py#L77 - - Examples - -------- + DatadogProvider creates metrics asynchronously via Datadog extension or exporter. + **Use `aws_lambda_powertools.DatadogMetrics` to create and metrics to Datadog.** + + Environment variables + --------------------- + POWERTOOLS_METRICS_NAMESPACE : str + metric namespace to be set for all metrics + + Raises + ------ + MetricValueError + When metric value isn't a number + SchemaValidationError + When metric object fails EMF schema validation """ def __init__( self, metric_set: List | None = None, - namespace: str = DEFAULT_NAMESPACE, - flush_to_log: bool = False, + namespace: str | None = None, + flush_to_log: bool | None = None, default_tags: List | None = None, ): - """ - - Parameters - ---------- - namespace: str - For datadog, namespace will be appended in front of the metrics name in metrics exported. - (namespace.metrics_name) - flush_to_log: bool - Flush datadog metrics to log (collect with log forwarder) rather than using datadog extension - """ self.metric_set = metric_set if metric_set is not None else [] - self.namespace: str = namespace + self.namespace = resolve_env_var_choice(choice=namespace, env=os.getenv(constants.METRICS_NAMESPACE_ENV)) + if self.namespace is None: + self.namespace = DEFAULT_NAMESPACE self.default_tags = default_tags or [] - # either is true then flush to log - self.flush_to_log = (os.environ.get("DD_FLUSH_TO_LOG", "").lower() == "true") or flush_to_log + self.flush_to_log = resolve_env_var_choice(choice=flush_to_log, env=os.getenv(constants.DATADOG_FLUSH_TO_LOG)) # adding name,value,timestamp,tags def add_metric( @@ -123,7 +118,7 @@ def serialize_metric_set(self, metrics: List | None = None) -> List: **Serialize metrics into Datadog format** metrics = DatadogMetric() - # ...add metrics, dimensions, namespace + # ...add metrics, tags, namespace ret = metrics.serialize_metric_set() Returns @@ -174,23 +169,6 @@ def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: raise_on_empty_metrics : bool, optional raise exception if no metrics are emitted, by default False """ - """ - - Parameters - ---------- - metrics: List[Dict] - [{ - "m": metric_name, - "v": value, - "e": timestamp - "t": List["tag:value","tag2:value2"] - }] - - Raises - ------- - SchemaValidationError - When metric object fails EMF schema validation - """ if not raise_on_empty_metrics and len(self.metric_set) == 0: warnings.warn( "No application metrics to publish. The cold-start metric may be published if enabled. " @@ -248,9 +226,9 @@ def log_metrics( ------- **Lambda function using tracer and metrics decorators** - from aws_lambda_powertools import Metrics, Tracer + from aws_lambda_powertools import DatadogMetrics, Tracer - metrics = Metrics(service="payment") + metrics = DatadogMetrics(namespace="powertools") tracer = Tracer(service="payment") @tracer.capture_lambda_handler @@ -287,12 +265,12 @@ def handler(event, context): ) def set_default_tags(self, **kwargs) -> None: - """Persist dimensions across Lambda invocations + """Persist tags across Lambda invocations Parameters ---------- - dimensions : Dict[str, Any], optional - metric dimensions as key=value + tags : **kwargs + tags as key=value Example ------- @@ -301,7 +279,7 @@ def set_default_tags(self, **kwargs) -> None: from aws_lambda_powertools import Metrics metrics = Metrics(namespace="ServerlessAirline", service="payment") - metrics.set_default_dimensions(environment="demo", another="one") + metrics.set_default_tags(environment="demo", another="one") @metrics.log_metrics() def lambda_handler(): diff --git a/aws_lambda_powertools/metrics/provider/datadog/metrics.py b/aws_lambda_powertools/metrics/provider/datadog/metrics.py index bb141323688..2c385bdd2d4 100644 --- a/aws_lambda_powertools/metrics/provider/datadog/metrics.py +++ b/aws_lambda_powertools/metrics/provider/datadog/metrics.py @@ -3,10 +3,50 @@ from typing import Any, Callable, Dict, List, Optional -from aws_lambda_powertools.metrics.provider.datadog.datadog import DEFAULT_NAMESPACE, DatadogProvider +from aws_lambda_powertools.metrics.provider.datadog.datadog import DatadogProvider class DatadogMetrics: + """ + DatadogProvider creates metrics asynchronously via Datadog extension or exporter. + + **Use `aws_lambda_powertools.DatadogMetrics` to create and metrics to Datadog.** + + Example + ------- + **Creates a few metrics and publish at the end of a function execution** + + from aws_lambda_powertools import DatadogMetrics + + metrics = DatadogMetrics(namespace="ServerlessAirline") + + @metrics.log_metrics(capture_cold_start_metric=True) + def lambda_handler(): + metrics.add_metric(name="item_sold", value=1, tags=["product:latte", "order:online"]) + return True + + Environment variables + --------------------- + POWERTOOLS_METRICS_NAMESPACE : str + metric namespace + + Parameters + ---------- + flush_to_log : bool, optional + Used when using export instead of extension + namespace : str, optional + Namespace for metrics + provider: DatadogProvider, optional + Pre-configured DatadogProvider provider + + Raises + ------ + MetricValueError + When metric value isn't a number + SchemaValidationError + When metric object fails EMF schema validation + """ + # NOTE: We use class attrs to share metrics data across instances # this allows customers to initialize Metrics() throughout their code base (and middlewares) # and not get caught by accident with metrics data loss, or data deduplication @@ -17,8 +57,8 @@ class DatadogMetrics: def __init__( self, - namespace: str = DEFAULT_NAMESPACE, - flush_to_log: bool = False, + namespace: str | None = None, + flush_to_log: bool | None = None, provider: DatadogProvider | None = None, ): self.metric_set = self._metrics @@ -64,26 +104,6 @@ def log_metrics( ) def set_default_tags(self, **kwargs) -> None: - """Persist dimensions across Lambda invocations - - Parameters - ---------- - dimensions : Dict[str, Any], optional - metric dimensions as key=value - - Example - ------- - **Sets some default dimensions that will always be present across metrics and invocations** - - from aws_lambda_powertools import Metrics - - metrics = Metrics(namespace="ServerlessAirline", service="payment") - metrics.set_default_dimensions(environment="demo", another="one") - - @metrics.log_metrics() - def lambda_handler(): - return True - """ self.provider.set_default_tags(**kwargs) for tag_key, tag_value in kwargs.items(): self.default_tags.append(f"{tag_key}:{tag_value}") @@ -96,9 +116,9 @@ def clear_default_tags(self) -> None: self.default_tags.clear() # We now allow customers to bring their own instance - # of the AmazonCloudWatchEMFProvider provider - # So we need to define getter/setter for namespace and service properties - # To access these attributes on the provider instance. + # of the DatadogProvider provider + # So we need to define getter/setter for namespace property + # To access this attribute on the provider instance. @property def namespace(self): return self.provider.namespace diff --git a/aws_lambda_powertools/shared/constants.py b/aws_lambda_powertools/shared/constants.py index 0cde7582976..20a7fbf47d2 100644 --- a/aws_lambda_powertools/shared/constants.py +++ b/aws_lambda_powertools/shared/constants.py @@ -10,6 +10,8 @@ METRICS_NAMESPACE_ENV: str = "POWERTOOLS_METRICS_NAMESPACE" +DATADOG_FLUSH_TO_LOG: str = "DD_FLUSH_TO_LOG" + SERVICE_NAME_ENV: str = "POWERTOOLS_SERVICE_NAME" XRAY_TRACE_ID_ENV: str = "_X_AMZN_TRACE_ID" LAMBDA_TASK_ROOT_ENV: str = "LAMBDA_TASK_ROOT" diff --git a/tests/functional/metrics/test_metrics_datadog.py b/tests/functional/metrics/test_metrics_datadog.py index 693e8d5c482..b1b2ace9bc7 100644 --- a/tests/functional/metrics/test_metrics_datadog.py +++ b/tests/functional/metrics/test_metrics_datadog.py @@ -1,5 +1,4 @@ import json -import os import warnings from collections import namedtuple @@ -33,9 +32,9 @@ def lambda_handler(event, context): assert "example_fn2" in logs -def test_datadog_write_to_log_with_env_variable(capsys): +def test_datadog_write_to_log_with_env_variable(capsys, monkeypatch): # GIVEN DD_FLUSH_TO_LOG env is configured - os.environ["DD_FLUSH_TO_LOG"] = "True" + monkeypatch.setenv("DD_FLUSH_TO_LOG", "True") metrics = DatadogMetrics() # WHEN we add a metric @@ -260,3 +259,32 @@ def test_clear_default_tags(): # THEN there should be no default tags assert not my_metrics.default_tags + + +def test_namespace_var_precedence(monkeypatch, namespace): + # GIVEN we use POWERTOOLS_METRICS_NAMESPACE + monkeypatch.setenv("POWERTOOLS_METRICS_NAMESPACE", "a_namespace") + my_metrics = DatadogMetrics(namespace=namespace, flush_to_log=True) + + # WHEN creating a metric and explicitly set a namespace + my_metrics.add_metric(name="item_sold", value=1) + + output = my_metrics.serialize_metric_set() + + # THEN namespace should match the explicitly passed variable and not the env var + assert output[0]["m"] == f"{namespace}.item_sold" + + +def test_namespace_env_var(monkeypatch): + # GIVEN POWERTOOLS_METRICS_NAMESPACE is set + env_namespace = "a_namespace" + monkeypatch.setenv("POWERTOOLS_METRICS_NAMESPACE", env_namespace) + my_metrics = DatadogMetrics(flush_to_log=True) + + # WHEN creating a metric and explicitly set a namespace + my_metrics.add_metric(name="item_sold", value=1) + + output = my_metrics.serialize_metric_set() + + # THEN namespace should match the explicitly passed variable and not the env var + assert output[0]["m"] == f"{env_namespace}.item_sold" From f60a3ba2e08d1757e276d28e5d5ecfcfe320ceee Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Wed, 9 Aug 2023 22:11:54 +0100 Subject: [PATCH 06/37] Fixing minor bugs --- aws_lambda_powertools/metrics/provider/base.py | 11 +++++++---- .../metrics/provider/datadog/datadog.py | 12 +++++++----- .../metrics/provider/datadog/metrics.py | 4 +++- tests/functional/metrics/test_metrics_datadog.py | 2 +- 4 files changed, 18 insertions(+), 11 deletions(-) diff --git a/aws_lambda_powertools/metrics/provider/base.py b/aws_lambda_powertools/metrics/provider/base.py index a5c8eec69c2..702b4b3d2ba 100644 --- a/aws_lambda_powertools/metrics/provider/base.py +++ b/aws_lambda_powertools/metrics/provider/base.py @@ -179,9 +179,13 @@ def handler(event, context): e Propagate error received """ + extra_args = {} - default_dimensions = kwargs.get("default_dimensions") - default_tags = kwargs.get("default_tags") + if kwargs.get("default_dimensions"): + extra_args.update({"default_dimensions": kwargs.get("default_dimensions")}) + + if kwargs.get("default_tags"): + extra_args.update({"default_tags": kwargs.get("default_tags")}) # If handler is None we've been called with parameters # Return a partial function with args filled @@ -191,8 +195,7 @@ def handler(event, context): self.log_metrics, capture_cold_start_metric=capture_cold_start_metric, raise_on_empty_metrics=raise_on_empty_metrics, - default_dimensions=default_dimensions, - default_tags=default_tags, + **extra_args, ) @functools.wraps(lambda_handler) diff --git a/aws_lambda_powertools/metrics/provider/datadog/datadog.py b/aws_lambda_powertools/metrics/provider/datadog/datadog.py index 28bddba33a8..3310743eebb 100644 --- a/aws_lambda_powertools/metrics/provider/datadog/datadog.py +++ b/aws_lambda_powertools/metrics/provider/datadog/datadog.py @@ -179,7 +179,7 @@ def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: else: metrics = self.serialize_metric_set() # submit through datadog extension - if lambda_metric and self.flush_to_log is False: + if lambda_metric and not self.flush_to_log: # use lambda_metric function from datadog package, submit metrics to datadog for metric_item in metrics: # pragma: no cover lambda_metric( # pragma: no cover @@ -252,10 +252,10 @@ def handler(event, context): Propagate error received """ - default_dimensions = kwargs.get("default_tags") + default_tags = kwargs.get("default_tags") - if default_dimensions: - self.set_default_tags(**default_dimensions) + if default_tags: + self.set_default_tags(**default_tags) return super().log_metrics( lambda_handler=lambda_handler, @@ -286,4 +286,6 @@ def lambda_handler(): return True """ for tag_key, tag_value in kwargs.items(): - self.default_tags.append(f"{tag_key}:{tag_value}") + tag = f"{tag_key}:{tag_value}" + if tag not in self.default_tags: + self.default_tags.append(tag) diff --git a/aws_lambda_powertools/metrics/provider/datadog/metrics.py b/aws_lambda_powertools/metrics/provider/datadog/metrics.py index 2c385bdd2d4..5ce6d1b94ca 100644 --- a/aws_lambda_powertools/metrics/provider/datadog/metrics.py +++ b/aws_lambda_powertools/metrics/provider/datadog/metrics.py @@ -106,7 +106,9 @@ def log_metrics( def set_default_tags(self, **kwargs) -> None: self.provider.set_default_tags(**kwargs) for tag_key, tag_value in kwargs.items(): - self.default_tags.append(f"{tag_key}:{tag_value}") + tag = f"{tag_key}:{tag_value}" + if tag not in self.default_tags: + self.default_tags.append(tag) def clear_metrics(self) -> None: self.provider.clear_metrics() diff --git a/tests/functional/metrics/test_metrics_datadog.py b/tests/functional/metrics/test_metrics_datadog.py index b1b2ace9bc7..7f99d2deb5c 100644 --- a/tests/functional/metrics/test_metrics_datadog.py +++ b/tests/functional/metrics/test_metrics_datadog.py @@ -76,7 +76,7 @@ def lambda_handler(event, context): def test_datadog_tags_using_kwargs(capsys): # GIVEN DatadogMetrics is initialized - metrics = DatadogMetrics() + metrics = DatadogMetrics(flush_to_log=True) # WHEN we add tags using kwargs metrics.add_metric("order_valve", 12.45, sales="sam") From b681d93ed2097f995ff8514f14ff05a192bbbe62 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Thu, 10 Aug 2023 15:48:18 +0100 Subject: [PATCH 07/37] Addressing feedback --- aws_lambda_powertools/metrics/functions.py | 8 ++++++- .../metrics/provider/datadog/datadog.py | 23 ++++++------------- .../metrics/provider/datadog/metrics.py | 20 +++++++--------- tests/functional/metrics/conftest.py | 5 ++++ .../metrics/test_metrics_datadog.py | 10 ++++---- 5 files changed, 32 insertions(+), 34 deletions(-) diff --git a/aws_lambda_powertools/metrics/functions.py b/aws_lambda_powertools/metrics/functions.py index d951c0749a3..ef57ee0fe6b 100644 --- a/aws_lambda_powertools/metrics/functions.py +++ b/aws_lambda_powertools/metrics/functions.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import List +from typing import Any, Dict, List from aws_lambda_powertools.metrics.provider.cloudwatch_emf.exceptions import ( MetricResolutionError, @@ -70,3 +70,9 @@ def extract_cloudwatch_metric_unit_value(metric_units: List, metric_valid_option unit = unit.value return unit + + +def serialize_datadog_tags(metric_tags: Dict[str, Any], default_tags: Dict[str, Any]) -> List[str]: + tags = metric_tags or default_tags + + return [f"{tag_key}:{tag_value}" for tag_key, tag_value in tags.items()] diff --git a/aws_lambda_powertools/metrics/provider/datadog/datadog.py b/aws_lambda_powertools/metrics/provider/datadog/datadog.py index 3310743eebb..98d5ea835c8 100644 --- a/aws_lambda_powertools/metrics/provider/datadog/datadog.py +++ b/aws_lambda_powertools/metrics/provider/datadog/datadog.py @@ -9,6 +9,7 @@ from typing import Any, Callable, Dict, List, Optional from aws_lambda_powertools.metrics.exceptions import MetricValueError, SchemaValidationError +from aws_lambda_powertools.metrics.functions import serialize_datadog_tags from aws_lambda_powertools.metrics.provider import BaseProvider from aws_lambda_powertools.shared import constants from aws_lambda_powertools.shared.functions import resolve_env_var_choice @@ -49,13 +50,13 @@ def __init__( metric_set: List | None = None, namespace: str | None = None, flush_to_log: bool | None = None, - default_tags: List | None = None, + default_tags: Dict | None = None, ): self.metric_set = metric_set if metric_set is not None else [] self.namespace = resolve_env_var_choice(choice=namespace, env=os.getenv(constants.METRICS_NAMESPACE_ENV)) if self.namespace is None: self.namespace = DEFAULT_NAMESPACE - self.default_tags = default_tags or [] + self.default_tags = default_tags or {} self.flush_to_log = resolve_env_var_choice(choice=flush_to_log, env=os.getenv(constants.DATADOG_FLUSH_TO_LOG)) # adding name,value,timestamp,tags @@ -64,8 +65,7 @@ def add_metric( name: str, value: float, timestamp: int | None = None, - tags: List | None = None, - **kwargs: Any, + **tags, ) -> None: """ The add_metrics function that will be used by metrics class. @@ -99,15 +99,9 @@ def add_metric( if not isinstance(value, numbers.Real): raise MetricValueError(f"{value} is not a valid number") - if tags is None: - tags = [] - if not timestamp: timestamp = int(time.time()) - for tag_key, tag_value in kwargs.items(): - tags.append(f"{tag_key}:{tag_value}") - self.metric_set.append({"m": name, "v": value, "e": timestamp, "t": tags}) def serialize_metric_set(self, metrics: List | None = None) -> List: @@ -152,7 +146,7 @@ def serialize_metric_set(self, metrics: List | None = None) -> List: "m": metric_name, "v": single_metric["v"], "e": single_metric["e"], - "t": single_metric["t"] or list(self.default_tags), + "t": serialize_datadog_tags(metric_tags=single_metric["t"], default_tags=self.default_tags), }, ) @@ -264,7 +258,7 @@ def handler(event, context): **kwargs, ) - def set_default_tags(self, **kwargs) -> None: + def set_default_tags(self, **tags) -> None: """Persist tags across Lambda invocations Parameters @@ -285,7 +279,4 @@ def set_default_tags(self, **kwargs) -> None: def lambda_handler(): return True """ - for tag_key, tag_value in kwargs.items(): - tag = f"{tag_key}:{tag_value}" - if tag not in self.default_tags: - self.default_tags.append(tag) + self.default_tags.update(**tags) diff --git a/aws_lambda_powertools/metrics/provider/datadog/metrics.py b/aws_lambda_powertools/metrics/provider/datadog/metrics.py index 5ce6d1b94ca..0e8ca1e8aac 100644 --- a/aws_lambda_powertools/metrics/provider/datadog/metrics.py +++ b/aws_lambda_powertools/metrics/provider/datadog/metrics.py @@ -22,7 +22,7 @@ class DatadogMetrics: @metrics.log_metrics(capture_cold_start_metric=True) def lambda_handler(): - metrics.add_metric(name="item_sold", value=1, tags=["product:latte", "order:online"]) + metrics.add_metric(name="item_sold", value=1, product="latte", order="online") return True Environment variables @@ -44,7 +44,7 @@ def lambda_handler(): MetricValueError When metric value isn't a number SchemaValidationError - When metric object fails EMF schema validation + When metric object fails Datadog schema validation """ # NOTE: We use class attrs to share metrics data across instances @@ -53,7 +53,7 @@ def lambda_handler(): # e.g., m1 and m2 add metric ProductCreated, however m1 has 'version' dimension but m2 doesn't # Result: ProductCreated is created twice as we now have 2 different EMF blobs _metrics: List = [] - _default_tags: List = [] + _default_tags: Dict = {} def __init__( self, @@ -78,10 +78,9 @@ def add_metric( name: str, value: float, timestamp: int | None = None, - tags: List | None = None, - **kwargs: Any, + **tags: Any, ) -> None: - self.provider.add_metric(name=name, value=value, timestamp=timestamp, tags=tags, **kwargs) + self.provider.add_metric(name=name, value=value, timestamp=timestamp, **tags) def serialize_metric_set(self, metrics: List | None = None) -> List: return self.provider.serialize_metric_set(metrics=metrics) @@ -103,12 +102,9 @@ def log_metrics( default_tags=default_tags, ) - def set_default_tags(self, **kwargs) -> None: - self.provider.set_default_tags(**kwargs) - for tag_key, tag_value in kwargs.items(): - tag = f"{tag_key}:{tag_value}" - if tag not in self.default_tags: - self.default_tags.append(tag) + def set_default_tags(self, **tags) -> None: + self.provider.set_default_tags(**tags) + self.default_tags.update(**tags) def clear_metrics(self) -> None: self.provider.clear_metrics() diff --git a/tests/functional/metrics/conftest.py b/tests/functional/metrics/conftest.py index cb0e083ca1f..2de3a0087c2 100644 --- a/tests/functional/metrics/conftest.py +++ b/tests/functional/metrics/conftest.py @@ -29,6 +29,11 @@ def metric() -> Dict[str, str]: return {"name": "single_metric", "unit": MetricUnit.Count, "value": 1} +@pytest.fixture +def metric_datadog() -> Dict[str, str]: + return {"name": "single_metric", "value": 1, "timestamp": 1691678198, "powertools": "datadog"} + + @pytest.fixture def metrics() -> List[Dict[str, str]]: return [ diff --git a/tests/functional/metrics/test_metrics_datadog.py b/tests/functional/metrics/test_metrics_datadog.py index 7f99d2deb5c..dd8f0b02112 100644 --- a/tests/functional/metrics/test_metrics_datadog.py +++ b/tests/functional/metrics/test_metrics_datadog.py @@ -38,7 +38,7 @@ def test_datadog_write_to_log_with_env_variable(capsys, monkeypatch): metrics = DatadogMetrics() # WHEN we add a metric - metrics.add_metric(name="item_sold", value=1, tags=["product:latte", "order:online"]) + metrics.add_metric(name="item_sold", value=1, product="latte", order="online") metrics.flush_metrics() logs = capture_metrics_output(capsys) @@ -89,10 +89,10 @@ def test_datadog_tags_using_kwargs(capsys): assert "sales:sam" in tag_list -def test_metrics_clear_metrics_after_invocation(metric): +def test_metrics_clear_metrics_after_invocation(metric_datadog): # GIVEN DatadogMetrics is initialized my_metrics = DatadogMetrics(flush_to_log=True) - my_metrics.add_metric(**metric) + my_metrics.add_metric(**metric_datadog) # WHEN log_metrics is used to flush metrics from memory @my_metrics.log_metrics @@ -161,10 +161,10 @@ def lambda_handler(event, context): assert namespace in logs -def test_serialize_metrics(metric): +def test_serialize_metrics(metric_datadog): # GIVEN DatadogMetrics is initialized my_metrics = DatadogMetrics(flush_to_log=True) - my_metrics.add_metric(**metric) + my_metrics.add_metric(**metric_datadog) # WHEN we serialize metrics my_metrics.serialize_metric_set() From 64e0bbbb0bfd369f54bf36284f14b805839687d5 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Thu, 10 Aug 2023 23:04:40 +0100 Subject: [PATCH 08/37] Documentation: adding creating metrics --- docs/core/metrics.md | 2 +- docs/core/metrics_datadog.md | 398 ++++++++++++++++++ examples/metrics_datadog/sam/template.yaml | 31 ++ .../src/add_metrics_with_provider.py | 10 + .../src/add_metrics_without_provider.py | 9 + mkdocs.yml | 4 +- 6 files changed, 452 insertions(+), 2 deletions(-) create mode 100644 docs/core/metrics_datadog.md create mode 100644 examples/metrics_datadog/sam/template.yaml create mode 100644 examples/metrics_datadog/src/add_metrics_with_provider.py create mode 100644 examples/metrics_datadog/src/add_metrics_without_provider.py diff --git a/docs/core/metrics.md b/docs/core/metrics.md index 2fd4cfc98d1..014a9726cb9 100644 --- a/docs/core/metrics.md +++ b/docs/core/metrics.md @@ -1,5 +1,5 @@ --- -title: Metrics +title: CloudWatch EMF description: Core utility --- diff --git a/docs/core/metrics_datadog.md b/docs/core/metrics_datadog.md new file mode 100644 index 00000000000..1f71b70abc1 --- /dev/null +++ b/docs/core/metrics_datadog.md @@ -0,0 +1,398 @@ +--- +title: Datadog +description: Core utility +--- + +Datadog provider creates custom metrics by flushing metrics to standard output and exporting metrics using [Datadog Forwarder](https://docs.datadoghq.com/logs/guide/forwarder/?tab=cloudformation){target="_blank" rel="nofollow"} or flushing metrics to [Datadog extension](https://docs.datadoghq.com/serverless/installation/python/?tab=datadogcli){target="_blank" rel="nofollow"} using Datadog SDK. + +These metrics can be visualized through [Datadog console](https://app.datadoghq.com/metric/explore){target="_blank" rel="nofollow"}. + +## Key features + +* Flush metrics to standard output +* Flush metrics to Datadog extension +* Validate against common metric definitions mistakes (values) +* Support to add default tags to all created metrics + +## Terminologies + +If you're new to Datadog custom metrics, we suggest you read the Datadog [official documentation](https://docs.datadoghq.com/metrics/custom_metrics/){target="_blank" rel="nofollow"} for custom metrics. + +## Getting started + +???+ tip + All examples shared in this documentation are available within the [project repository](https://github.com/aws-powertools/powertools-lambda-python/tree/develop/examples){target="_blank" }. + +Datadog provider has two global settings that will be used across all metrics emitted: + +| Setting | Description | Environment variable | Constructor parameter | +| -------------------- | ------------------------------------------------------------------------------- | ------------------------------ | --------------------- | +| **Metric namespace** | Logical container where all metrics will be placed e.g. `ServerlessAirline` | `POWERTOOLS_METRICS_NAMESPACE` | `namespace` | +| **Flush to log** | Use this when you want to flush metrics to be exported through Datadog Forwarder| `DD_FLUSH_TO_LOG` | `flush_to_log` | + +Experiment to use your application or main service as the metric namespace to easily group all metrics. + +### Install + +???+ note + If you are using Datadog Forwarder, you can skip this step step. + +To adhere to Lambda best practices and effectively minimize the size of your development package, we recommend using the official Datadog layers built specifically for the SDK and extension components. Below is the template that demonstrates how to configure a SAM template with this information. + +```yaml hl_lines="13" title="AWS Serverless Application Model (SAM) example" +--8<-- "examples/metrics_datadog/sam/template.yaml" +``` + +If you prefer not to utilize the Datadog SDK provided through the Datadog layer, add `aws-lambda-powertools[datadog]` as a dependency in your preferred tool: _e.g._, _requirements.txt_, _pyproject.toml_. This will ensure you have the required dependencies before using Datadog provider. + +### Creating metrics + +You can create metrics using `add_metric`. + +???+ tip + You can initialize DadatadogMetrics in any other module too. It'll keep track of your aggregate metrics in memory to optimize costs (one blob instead of multiples). + +=== "add_metrics_with_provider.py" + + ```python hl_lines="10" + --8<-- "examples/metrics_datadog/src/add_metrics_with_provider.py" + ``` + +=== "add_metrics_without_provider.py" + + ```python hl_lines="13" + --8<-- "examples/metrics_datadog/src/add_dimension_without_provider.py" + ``` + +???+ warning "Warning: Do not create metrics outside the handler" + Metrics added in the global scope will only be added during cold start. Disregard if you that's the intended behavior. + +### Adding high-resolution metrics + +You can create [high-resolution metrics](https://aws.amazon.com/about-aws/whats-new/2023/02/amazon-cloudwatch-high-resolution-metric-extraction-structured-logs/){target="_blank"} passing `resolution` parameter to `add_metric`. + +???+ tip "When is it useful?" + High-resolution metrics are data with a granularity of one second and are very useful in several situations such as telemetry, time series, real-time incident management, and others. + +=== "add_high_resolution_metrics.py" + + ```python hl_lines="10" + --8<-- "examples/metrics/src/add_high_resolution_metric.py" + ``` + +???+ tip "Tip: Autocomplete Metric Resolutions" + `MetricResolution` enum facilitates finding a supported metric resolution by CloudWatch. Alternatively, you can pass the values 1 or 60 (must be one of them) as an integer _e.g. `resolution=1`_. + +### Adding multi-value metrics + +You can call `add_metric()` with the same metric name multiple times. The values will be grouped together in a list. + +=== "add_multi_value_metrics.py" + + ```python hl_lines="14-15" + --8<-- "examples/metrics/src/add_multi_value_metrics.py" + ``` + +=== "add_multi_value_metrics_output.json" + + ```python hl_lines="15 24-26" + --8<-- "examples/metrics/src/add_multi_value_metrics_output.json" + ``` + +### Adding default dimensions + +You can use `set_default_dimensions` method, or `default_dimensions` parameter in `log_metrics` decorator, to persist dimensions across Lambda invocations. + +If you'd like to remove them at some point, you can use `clear_default_dimensions` method. + +=== "set_default_dimensions.py" + + ```python hl_lines="9" + --8<-- "examples/metrics/src/set_default_dimensions.py" + ``` + +=== "set_default_dimensions_log_metrics.py" + + ```python hl_lines="9 13" + --8<-- "examples/metrics/src/set_default_dimensions_log_metrics.py" + ``` + +### Flushing metrics + +As you finish adding all your metrics, you need to serialize and flush them to standard output. You can do that automatically with the `log_metrics` decorator. + +This decorator also **validates**, **serializes**, and **flushes** all your metrics. During metrics validation, if no metrics are provided then a warning will be logged, but no exception will be raised. + +=== "add_metrics.py" + + ```python hl_lines="8" + --8<-- "examples/metrics/src/add_metrics.py" + ``` + +=== "log_metrics_output.json" + + ```json hl_lines="6 9 14 21-23" + --8<-- "examples/metrics/src/log_metrics_output.json" + ``` + +???+ tip "Tip: Metric validation" + If metrics are provided, and any of the following criteria are not met, **`SchemaValidationError`** exception will be raised: + + * Maximum of 29 user-defined dimensions + * Namespace is set, and no more than one + * Metric units must be [supported by CloudWatch](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html){target="_blank"} + +#### Raising SchemaValidationError on empty metrics + +If you want to ensure at least one metric is always emitted, you can pass `raise_on_empty_metrics` to the **log_metrics** decorator: + +```python hl_lines="7" title="Raising SchemaValidationError exception if no metrics are added" +--8<-- "examples/metrics/src/raise_on_empty_metrics.py" +``` + +???+ tip "Suppressing warning messages on empty metrics" + If you expect your function to execute without publishing metrics every time, you can suppress the warning with **`warnings.filterwarnings("ignore", "No metrics to publish*")`**. + +### Capturing cold start metric + +You can optionally capture cold start metrics with `log_metrics` decorator via `capture_cold_start_metric` param. + +=== "capture_cold_start_metric.py" + + ```python hl_lines="7" + --8<-- "examples/metrics/src/capture_cold_start_metric.py" + ``` + +=== "capture_cold_start_metric_output.json" + + ```json hl_lines="9 15 22 24-25" + --8<-- "examples/metrics/src/capture_cold_start_metric_output.json" + ``` + +If it's a cold start invocation, this feature will: + +* Create a separate EMF blob solely containing a metric named `ColdStart` +* Add `function_name` and `service` dimensions + +This has the advantage of keeping cold start metric separate from your application metrics, where you might have unrelated dimensions. + +???+ info + We do not emit 0 as a value for ColdStart metric for cost reasons. [Let us know](https://github.com/aws-powertools/powertools-lambda-python/issues/new?assignees=&labels=feature-request%2C+triage&template=feature_request.md&title=){target="_blank"} if you'd prefer a flag to override it. + +### Environment variables + +The following environment variable is available to configure Metrics at a global scope: + +| Setting | Description | Environment variable | Default | +|--------------------|------------------------------------------------------------------------------|-----------------------------------------|---------| +| **Namespace Name** | Sets namespace used for metrics. | `POWERTOOLS_METRICS_NAMESPACE` | `None` | + +`POWERTOOLS_METRICS_NAMESPACE` is also available on a per-instance basis with the `namespace` parameter, which will consequently override the environment variable value. + +## Advanced + +### Adding metadata + +You can add high-cardinality data as part of your Metrics log with `add_metadata` method. This is useful when you want to search highly contextual information along with your metrics in your logs. + +???+ info + **This will not be available during metrics visualization** - Use **dimensions** for this purpose + +=== "add_metadata.py" + + ```python hl_lines="14" + --8<-- "examples/metrics/src/add_metadata.py" + ``` + +=== "add_metadata_output.json" + + ```json hl_lines="22" + --8<-- "examples/metrics/src/add_metadata_output.json" + ``` + +### Single metric with a different dimension + +CloudWatch EMF uses the same dimensions across all your metrics. Use `single_metric` if you have a metric that should have different dimensions. + +???+ info + Generally, this would be an edge case since you [pay for unique metric](https://aws.amazon.com/cloudwatch/pricing){target="_blank"}. Keep the following formula in mind: + + **unique metric = (metric_name + dimension_name + dimension_value)** + +=== "single_metric.py" + + ```python hl_lines="11" + --8<-- "examples/metrics/src/single_metric.py" + ``` + +=== "single_metric_output.json" + + ```json hl_lines="15" + --8<-- "examples/metrics/src/single_metric_output.json" + ``` + +By default it will skip all previously defined dimensions including default dimensions. Use `default_dimensions` keyword argument if you want to reuse default dimensions or specify custom dimensions from a dictionary. + +=== "single_metric_default_dimensions_inherit.py" + + ```python hl_lines="10 15" + --8<-- "examples/metrics/src/single_metric_default_dimensions_inherit.py" + ``` + +=== "single_metric_default_dimensions.py" + + ```python hl_lines="12" + --8<-- "examples/metrics/src/single_metric_default_dimensions.py" + ``` + +### Flushing metrics manually + +If you are using the AWS Lambda Web Adapter project, or a middleware with custom metric logic, you can use `flush_metrics()`. This method will serialize, print metrics available to standard output, and clear in-memory metrics data. + +???+ warning + This does not capture Cold Start metrics, and metric data validation still applies. + +Contrary to the `log_metrics` decorator, you are now also responsible to flush metrics in the event of an exception. + +```python hl_lines="18" title="Manually flushing and clearing metrics from memory" +--8<-- "examples/metrics/src/flush_metrics.py" +``` + +### Metrics isolation + +You can use `EphemeralMetrics` class when looking to isolate multiple instances of metrics with distinct namespaces and/or dimensions. + +!!! note "This is a typical use case is for multi-tenant, or emitting same metrics for distinct applications." + +```python hl_lines="1 4" title="EphemeralMetrics usage" +--8<-- "examples/metrics/src/ephemeral_metrics.py" +``` + +**Differences between `EphemeralMetrics` and `Metrics`** + +`EphemeralMetrics` has only one difference while keeping nearly the exact same set of features: + +| Feature | Metrics | EphemeralMetrics | +| ----------------------------------------------------------------------------------------------------------- | ------- | ---------------- | +| **Share data across instances** (metrics, dimensions, metadata, etc.) | Yes | - | + +!!! question "Why not changing the default `Metrics` behaviour to not share data across instances?" + +This is an intentional design to prevent accidental data deduplication or data loss issues due to [CloudWatch EMF](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Embedded_Metric_Format_Specification.html){target="_blank"} metric dimension constraint. + + +In CloudWatch, there are two metric ingestion mechanisms: [EMF (async)](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Embedded_Metric_Format_Specification.html){target="_blank"} and [`PutMetricData` API (sync)](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudwatch.html#CloudWatch.Client.put_metric_data){target="_blank"}. + +The former creates metrics asynchronously via CloudWatch Logs, and the latter uses a synchronous and more flexible ingestion API. + +!!! important "Key concept" + CloudWatch [considers a metric unique](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#Metric){target="_blank"} by a combination of metric **name**, metric **namespace**, and zero or more metric **dimensions**. + +With EMF, metric dimensions are shared with any metrics you define. With `PutMetricData` API, you can set a [list](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html){target="_blank"} defining one or more metrics with distinct dimensions. + +This is a subtle yet important distinction. Imagine you had the following metrics to emit: + +| Metric Name | Dimension | Intent | +| ---------------------- | ----------------------------------------- | ------------------ | +| **SuccessfulBooking** | service="booking", **tenant_id**="sample" | Application metric | +| **IntegrationLatency** | service="booking", function_name="sample" | Operational metric | +| **ColdStart** | service="booking", function_name="sample" | Operational metric | + +The `tenant_id` dimension could vary leading to two common issues: + +1. `ColdStart` metric will be created multiple times (N * number of unique tenant_id dimension value), despite the `function_name` being the same +2. `IntegrationLatency` metric will be also created multiple times due to `tenant_id` as well as `function_name` (may or not be intentional) + +These issues are exacerbated when you create **(A)** metric dimensions conditionally, **(B)** multiple metrics' instances throughout your code instead of reusing them (globals). Subsequent metrics' instances will have (or lack) different metric dimensions resulting in different metrics and data points with the same name. + +!!! note "Intentional design to address these scenarios" + +**On 1**, when you enable [capture_start_metric feature](#capturing-cold-start-metric), we transparently create and flush an additional EMF JSON Blob that is independent from your application metrics. This prevents data pollution. + +**On 2**, you can use `EphemeralMetrics` to create an additional EMF JSON Blob from your application metric (`SuccessfulBooking`). This ensures that `IntegrationLatency` operational metric data points aren't tied to any dynamic dimension values like `tenant_id`. + +That is why `Metrics` shares data across instances by default, as that covers 80% of use cases and different personas using Powertools. This allows them to instantiate `Metrics` in multiple places throughout their code - be a separate file, a middleware, or an abstraction that sets default dimensions. + +### Observability providers + +!!! note "In this context, an observability provider is an AWS Lambda Partner offering a platform for logging, metrics, traces, etc." + +You can choose an Observability provider other than CloudWatch EMF Metrics. Powertools offers seamless metric streaming via Lambda Extensions, SDKs, or other methods. Keep in mind, some providers may not support CloudWatch EMF's default Metrics format. + +#### Datadog provider + +Add `aws-lambda-powertools[datadog]` as a dependency in your preferred tool: _e.g._, _requirements.txt_, _pyproject.toml_. This will ensure you have the required dependencies before using Tracer. + +To use Datadog as an external observability provider you will need to configure your Lambda to add an `API_KEY` and the `Datadog Endpoint`. [Check here](https://docs.datadoghq.com/serverless/installation/python/?tab=datadogcli){target="_blank" rel="nofollow"} the Datadog documentation on how to do this. + +You can import from metric provider package, init the provider and use them as default metrics class + +```python title="Using built-in Datadog Metrics Provider" +from aws_lambda_powertools.metrics.provider.datadog_provider_draft import DataDogProvider,DataDogMetrics + +dd_provider = DataDogProvider(namespace="default") +metrics = DataDogMetrics(provider=dd_provider) + +@metrics.log_metrics(capture_cold_start_metric: bool = True, raise_on_empty_metrics: bool = False) +def lambda_handler(event, context) + metrics.add_metric(name="item_sold",value=1,tags=["category:online"]) +``` + +## Testing your code + +### Setting environment variables + +???+ tip + Ignore this section, if: + + * You are explicitly setting namespace/default dimension via `namespace` and `service` parameters + * You're not instantiating `Metrics` in the global namespace + + For example, `Metrics(namespace="ServerlessAirline", service="booking")` + +Make sure to set `POWERTOOLS_METRICS_NAMESPACE` and `POWERTOOLS_SERVICE_NAME` before running your tests to prevent failing on `SchemaValidation` exception. You can set it before you run tests or via pytest plugins like [dotenv](https://pypi.org/project/pytest-dotenv/){target="_blank" rel="nofollow"}. + +```bash title="Injecting dummy Metric Namespace before running tests" +--8<-- "examples/metrics/src/run_tests_env_var.sh" +``` + +### Clearing metrics + +`Metrics` keep metrics in memory across multiple instances. If you need to test this behavior, you can use the following Pytest fixture to ensure metrics are reset incl. cold start: + +```python title="Clearing metrics between tests" +--8<-- "examples/metrics/src/clear_metrics_in_tests.py" +``` + +### Functional testing + +You can read standard output and assert whether metrics have been flushed. Here's an example using `pytest` with `capsys` built-in fixture: + +=== "assert_single_emf_blob.py" + + ```python hl_lines="6 9-10 23-34" + --8<-- "examples/metrics/src/assert_single_emf_blob.py" + ``` + +=== "add_metrics.py" + + ```python + --8<-- "examples/metrics/src/add_metrics.py" + ``` + +=== "assert_multiple_emf_blobs.py" + + This will be needed when using `capture_cold_start_metric=True`, or when both `Metrics` and `single_metric` are used. + + ```python hl_lines="20-21 27" + --8<-- "examples/metrics/src/assert_multiple_emf_blobs.py" + ``` + +=== "assert_multiple_emf_blobs_module.py" + + ```python + --8<-- "examples/metrics/src/assert_multiple_emf_blobs_module.py" + ``` + +???+ tip + For more elaborate assertions and comparisons, check out [our functional testing for Metrics utility.](https://github.com/aws-powertools/powertools-lambda-python/blob/develop/tests/functional/test_metrics.py){target="_blank"} diff --git a/examples/metrics_datadog/sam/template.yaml b/examples/metrics_datadog/sam/template.yaml new file mode 100644 index 00000000000..ebfc3ed2e36 --- /dev/null +++ b/examples/metrics_datadog/sam/template.yaml @@ -0,0 +1,31 @@ +AWSTemplateFormatVersion: "2010-09-09" +Transform: AWS::Serverless-2016-10-31 +Description: Powertools for AWS Lambda (Python) version + +Globals: + Function: + Timeout: 5 + Runtime: python3.10 + Tracing: Active + Environment: + Variables: + POWERTOOLS_METRICS_NAMESPACE: ServerlessAirline + DD_API_KEY: "" + DD_SITE: datadoghq.com + + Layers: + # Find the latest Layer version in the official documentation + # https://docs.powertools.aws.dev/lambda/python/latest/#lambda-layer + - !Sub arn:aws:lambda:${AWS::Region}:017000801446:layer:AWSLambdaPowertoolsPythonV2:40 + # Find the latest Layer version in the Datadog official documentation + # See https://github.com/DataDog/datadog-lambda-python/releases + - !Sub arn:aws:lambda:${AWS::Region}:464622532012:layer:Datadog-Python310:78 + # See https://github.com/DataDog/datadog-lambda-extension/releases + - !Sub arn:aws:lambda:${AWS::Region}:464622532012:layer:Datadog-Extension:45 + +Resources: + CaptureLambdaHandlerExample: + Type: AWS::Serverless::Function + Properties: + CodeUri: ../src + Handler: capture_lambda_handler.handler diff --git a/examples/metrics_datadog/src/add_metrics_with_provider.py b/examples/metrics_datadog/src/add_metrics_with_provider.py new file mode 100644 index 00000000000..ee43da75f4e --- /dev/null +++ b/examples/metrics_datadog/src/add_metrics_with_provider.py @@ -0,0 +1,10 @@ +from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics, DatadogProvider +from aws_lambda_powertools.utilities.typing import LambdaContext + +provider = DatadogProvider() +metrics = DatadogMetrics(provider=provider) + + +@metrics.log_metrics +def lambda_handler(event: dict, context: LambdaContext): + metrics.add_metric(name="SuccessfulBooking", value=1) diff --git a/examples/metrics_datadog/src/add_metrics_without_provider.py b/examples/metrics_datadog/src/add_metrics_without_provider.py new file mode 100644 index 00000000000..714654c9b89 --- /dev/null +++ b/examples/metrics_datadog/src/add_metrics_without_provider.py @@ -0,0 +1,9 @@ +from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics +from aws_lambda_powertools.utilities.typing import LambdaContext + +metrics = DatadogMetrics() + + +@metrics.log_metrics +def lambda_handler(event: dict, context: LambdaContext): + metrics.add_metric(name="SuccessfulBooking", value=1) diff --git a/mkdocs.yml b/mkdocs.yml index 49bf5a347e5..8c4667edd58 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -15,7 +15,9 @@ nav: - Features: - core/tracer.md - core/logger.md - - core/metrics.md + - Metrics: + - core/metrics.md + - core/metrics_datadog.md - Event Handler: - core/event_handler/api_gateway.md - core/event_handler/appsync.md From 84f38d51c28df80c3f0668c386bc0f6d1a146fe5 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Fri, 11 Aug 2023 00:05:45 +0100 Subject: [PATCH 09/37] Documentation: adding examples --- docs/core/metrics.md | 2 +- docs/core/metrics_datadog.md | 248 +++--------------- .../metrics/src/clear_metrics_in_tests.py | 4 +- .../src/add_metrics_with_provider.py | 6 +- .../src/add_metrics_with_tags.py | 9 + .../src/add_metrics_without_provider.py | 6 +- .../src/assert_single_datadog_metric.py | 9 + .../src/capture_cold_start_metric.py | 9 + .../src/capture_cold_start_metric_output.json | 8 + .../src/clear_metrics_in_tests.py | 13 + examples/metrics_datadog/src/flush_metrics.py | 17 ++ .../src/flush_metrics_to_standard_output.py | 10 + .../src/log_metrics_output.json | 9 + .../src/raise_on_empty_metrics.py | 10 + .../metrics_datadog/src/run_tests_env_var.sh | 1 + .../metrics_datadog/src/set_default_tags.py | 10 + .../src/set_default_tags_log_metrics.py | 11 + poetry.lock | 64 ++--- 18 files changed, 201 insertions(+), 245 deletions(-) create mode 100644 examples/metrics_datadog/src/add_metrics_with_tags.py create mode 100644 examples/metrics_datadog/src/assert_single_datadog_metric.py create mode 100644 examples/metrics_datadog/src/capture_cold_start_metric.py create mode 100644 examples/metrics_datadog/src/capture_cold_start_metric_output.json create mode 100644 examples/metrics_datadog/src/clear_metrics_in_tests.py create mode 100644 examples/metrics_datadog/src/flush_metrics.py create mode 100644 examples/metrics_datadog/src/flush_metrics_to_standard_output.py create mode 100644 examples/metrics_datadog/src/log_metrics_output.json create mode 100644 examples/metrics_datadog/src/raise_on_empty_metrics.py create mode 100644 examples/metrics_datadog/src/run_tests_env_var.sh create mode 100644 examples/metrics_datadog/src/set_default_tags.py create mode 100644 examples/metrics_datadog/src/set_default_tags_log_metrics.py diff --git a/docs/core/metrics.md b/docs/core/metrics.md index 014a9726cb9..e3a0676ff67 100644 --- a/docs/core/metrics.md +++ b/docs/core/metrics.md @@ -384,4 +384,4 @@ You can read standard output and assert whether metrics have been flushed. Here' ``` ???+ tip - For more elaborate assertions and comparisons, check out [our functional testing for Metrics utility.](https://github.com/aws-powertools/powertools-lambda-python/blob/develop/tests/functional/test_metrics.py){target="_blank"} + For more elaborate assertions and comparisons, check out [our functional testing for Metrics utility.](https://github.com/aws-powertools/powertools-lambda-python/blob/develop/tests/functional/metrics/test_metrics_cloudwatch_emf.py){target="_blank"} diff --git a/docs/core/metrics_datadog.md b/docs/core/metrics_datadog.md index 1f71b70abc1..351d1228e45 100644 --- a/docs/core/metrics_datadog.md +++ b/docs/core/metrics_datadog.md @@ -47,7 +47,7 @@ If you prefer not to utilize the Datadog SDK provided through the Datadog layer, ### Creating metrics -You can create metrics using `add_metric`. +You can create metrics using `add_metric`. Optional parameter such as timestamp can be included, but if not provided, the Datadog Provider will automatically use the current timestamp by default. ???+ tip You can initialize DadatadogMetrics in any other module too. It'll keep track of your aggregate metrics in memory to optimize costs (one blob instead of multiples). @@ -61,60 +61,51 @@ You can create metrics using `add_metric`. === "add_metrics_without_provider.py" ```python hl_lines="13" - --8<-- "examples/metrics_datadog/src/add_dimension_without_provider.py" + --8<-- "examples/metrics_datadog/src/add_metrics_without_provider.py" ``` ???+ warning "Warning: Do not create metrics outside the handler" Metrics added in the global scope will only be added during cold start. Disregard if you that's the intended behavior. -### Adding high-resolution metrics +### Adding tags -You can create [high-resolution metrics](https://aws.amazon.com/about-aws/whats-new/2023/02/amazon-cloudwatch-high-resolution-metric-extraction-structured-logs/){target="_blank"} passing `resolution` parameter to `add_metric`. +Datadog offers the flexibility to configure tags per metric. To provider a better experience for our customers, you can pass an arbitrary number of keyword arguments (kwargs) that can be user as a tag. -???+ tip "When is it useful?" - High-resolution metrics are data with a granularity of one second and are very useful in several situations such as telemetry, time series, real-time incident management, and others. - -=== "add_high_resolution_metrics.py" +=== "add_metrics_with_tags.py" ```python hl_lines="10" - --8<-- "examples/metrics/src/add_high_resolution_metric.py" + --8<-- "examples/metrics_datadog/src/add_metrics_with_tags.py" ``` -???+ tip "Tip: Autocomplete Metric Resolutions" - `MetricResolution` enum facilitates finding a supported metric resolution by CloudWatch. Alternatively, you can pass the values 1 or 60 (must be one of them) as an integer _e.g. `resolution=1`_. - -### Adding multi-value metrics +### Adding default tags -You can call `add_metric()` with the same metric name multiple times. The values will be grouped together in a list. +If you want to set the same tags for all metrics, you can use the `set_default_tags` method or the `default_tags` parameter in the `log_metrics` decorator and then persist tags across the Lambda invocations. -=== "add_multi_value_metrics.py" +If you'd like to remove them at some point, you can use `clear_default_tags` method. - ```python hl_lines="14-15" - --8<-- "examples/metrics/src/add_multi_value_metrics.py" - ``` +???+ note + When default tags are configured and an additional specific tag is assigned to a metric, the metric will exclusively contain that specific tag. -=== "add_multi_value_metrics_output.json" +=== "set_default_tags.py" - ```python hl_lines="15 24-26" - --8<-- "examples/metrics/src/add_multi_value_metrics_output.json" + ```python hl_lines="9" + --8<-- "examples/metrics_datadog/src/set_default_tags.py" ``` -### Adding default dimensions - -You can use `set_default_dimensions` method, or `default_dimensions` parameter in `log_metrics` decorator, to persist dimensions across Lambda invocations. +=== "set_default_tags_log_metrics.py" -If you'd like to remove them at some point, you can use `clear_default_dimensions` method. + ```python hl_lines="9 13" + --8<-- "examples/metrics_datadog/src/set_default_tags_log_metrics.py" + ``` -=== "set_default_dimensions.py" +### Flushing metrics to standard output - ```python hl_lines="9" - --8<-- "examples/metrics/src/set_default_dimensions.py" - ``` +You have the option to flush metrics to the standard output for exporting, which can then be seamlessly processed through the [Datadog Forwarder](https://docs.datadoghq.com/logs/guide/forwarder/?tab=cloudformation){target="_blank" rel="nofollow"}. -=== "set_default_dimensions_log_metrics.py" +=== "flush_metrics_to_standard_output.py" - ```python hl_lines="9 13" - --8<-- "examples/metrics/src/set_default_dimensions_log_metrics.py" + ```python hl_lines="10" + --8<-- "examples/metrics_datadog/src/flush_metrics_to_standard_output.py" ``` ### Flushing metrics @@ -126,28 +117,21 @@ This decorator also **validates**, **serializes**, and **flushes** all your metr === "add_metrics.py" ```python hl_lines="8" - --8<-- "examples/metrics/src/add_metrics.py" + --8<-- "examples/metrics_datadog/src/add_metrics_with_tags.py" ``` === "log_metrics_output.json" ```json hl_lines="6 9 14 21-23" - --8<-- "examples/metrics/src/log_metrics_output.json" + --8<-- "examples/metrics_datadog/src/log_metrics_output.json" ``` -???+ tip "Tip: Metric validation" - If metrics are provided, and any of the following criteria are not met, **`SchemaValidationError`** exception will be raised: - - * Maximum of 29 user-defined dimensions - * Namespace is set, and no more than one - * Metric units must be [supported by CloudWatch](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html){target="_blank"} - #### Raising SchemaValidationError on empty metrics If you want to ensure at least one metric is always emitted, you can pass `raise_on_empty_metrics` to the **log_metrics** decorator: ```python hl_lines="7" title="Raising SchemaValidationError exception if no metrics are added" ---8<-- "examples/metrics/src/raise_on_empty_metrics.py" +--8<-- "examples/metrics_datadog/src/raise_on_empty_metrics.py" ``` ???+ tip "Suppressing warning messages on empty metrics" @@ -160,19 +144,19 @@ You can optionally capture cold start metrics with `log_metrics` decorator via ` === "capture_cold_start_metric.py" ```python hl_lines="7" - --8<-- "examples/metrics/src/capture_cold_start_metric.py" + --8<-- "examples/metrics_datadog/src/capture_cold_start_metric.py" ``` === "capture_cold_start_metric_output.json" ```json hl_lines="9 15 22 24-25" - --8<-- "examples/metrics/src/capture_cold_start_metric_output.json" + --8<-- "examples/metrics_datadog/src/capture_cold_start_metric_output.json" ``` If it's a cold start invocation, this feature will: -* Create a separate EMF blob solely containing a metric named `ColdStart` -* Add `function_name` and `service` dimensions +* Create a separate Datadog metric solely containing a metric named `ColdStart` +* Add `function_name` as a tag This has the advantage of keeping cold start metric separate from your application metrics, where you might have unrelated dimensions. @@ -191,60 +175,6 @@ The following environment variable is available to configure Metrics at a global ## Advanced -### Adding metadata - -You can add high-cardinality data as part of your Metrics log with `add_metadata` method. This is useful when you want to search highly contextual information along with your metrics in your logs. - -???+ info - **This will not be available during metrics visualization** - Use **dimensions** for this purpose - -=== "add_metadata.py" - - ```python hl_lines="14" - --8<-- "examples/metrics/src/add_metadata.py" - ``` - -=== "add_metadata_output.json" - - ```json hl_lines="22" - --8<-- "examples/metrics/src/add_metadata_output.json" - ``` - -### Single metric with a different dimension - -CloudWatch EMF uses the same dimensions across all your metrics. Use `single_metric` if you have a metric that should have different dimensions. - -???+ info - Generally, this would be an edge case since you [pay for unique metric](https://aws.amazon.com/cloudwatch/pricing){target="_blank"}. Keep the following formula in mind: - - **unique metric = (metric_name + dimension_name + dimension_value)** - -=== "single_metric.py" - - ```python hl_lines="11" - --8<-- "examples/metrics/src/single_metric.py" - ``` - -=== "single_metric_output.json" - - ```json hl_lines="15" - --8<-- "examples/metrics/src/single_metric_output.json" - ``` - -By default it will skip all previously defined dimensions including default dimensions. Use `default_dimensions` keyword argument if you want to reuse default dimensions or specify custom dimensions from a dictionary. - -=== "single_metric_default_dimensions_inherit.py" - - ```python hl_lines="10 15" - --8<-- "examples/metrics/src/single_metric_default_dimensions_inherit.py" - ``` - -=== "single_metric_default_dimensions.py" - - ```python hl_lines="12" - --8<-- "examples/metrics/src/single_metric_default_dimensions.py" - ``` - ### Flushing metrics manually If you are using the AWS Lambda Web Adapter project, or a middleware with custom metric logic, you can use `flush_metrics()`. This method will serialize, print metrics available to standard output, and clear in-memory metrics data. @@ -255,87 +185,7 @@ If you are using the AWS Lambda Web Adapter project, or a middleware with custom Contrary to the `log_metrics` decorator, you are now also responsible to flush metrics in the event of an exception. ```python hl_lines="18" title="Manually flushing and clearing metrics from memory" ---8<-- "examples/metrics/src/flush_metrics.py" -``` - -### Metrics isolation - -You can use `EphemeralMetrics` class when looking to isolate multiple instances of metrics with distinct namespaces and/or dimensions. - -!!! note "This is a typical use case is for multi-tenant, or emitting same metrics for distinct applications." - -```python hl_lines="1 4" title="EphemeralMetrics usage" ---8<-- "examples/metrics/src/ephemeral_metrics.py" -``` - -**Differences between `EphemeralMetrics` and `Metrics`** - -`EphemeralMetrics` has only one difference while keeping nearly the exact same set of features: - -| Feature | Metrics | EphemeralMetrics | -| ----------------------------------------------------------------------------------------------------------- | ------- | ---------------- | -| **Share data across instances** (metrics, dimensions, metadata, etc.) | Yes | - | - -!!! question "Why not changing the default `Metrics` behaviour to not share data across instances?" - -This is an intentional design to prevent accidental data deduplication or data loss issues due to [CloudWatch EMF](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Embedded_Metric_Format_Specification.html){target="_blank"} metric dimension constraint. - - -In CloudWatch, there are two metric ingestion mechanisms: [EMF (async)](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Embedded_Metric_Format_Specification.html){target="_blank"} and [`PutMetricData` API (sync)](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudwatch.html#CloudWatch.Client.put_metric_data){target="_blank"}. - -The former creates metrics asynchronously via CloudWatch Logs, and the latter uses a synchronous and more flexible ingestion API. - -!!! important "Key concept" - CloudWatch [considers a metric unique](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#Metric){target="_blank"} by a combination of metric **name**, metric **namespace**, and zero or more metric **dimensions**. - -With EMF, metric dimensions are shared with any metrics you define. With `PutMetricData` API, you can set a [list](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html){target="_blank"} defining one or more metrics with distinct dimensions. - -This is a subtle yet important distinction. Imagine you had the following metrics to emit: - -| Metric Name | Dimension | Intent | -| ---------------------- | ----------------------------------------- | ------------------ | -| **SuccessfulBooking** | service="booking", **tenant_id**="sample" | Application metric | -| **IntegrationLatency** | service="booking", function_name="sample" | Operational metric | -| **ColdStart** | service="booking", function_name="sample" | Operational metric | - -The `tenant_id` dimension could vary leading to two common issues: - -1. `ColdStart` metric will be created multiple times (N * number of unique tenant_id dimension value), despite the `function_name` being the same -2. `IntegrationLatency` metric will be also created multiple times due to `tenant_id` as well as `function_name` (may or not be intentional) - -These issues are exacerbated when you create **(A)** metric dimensions conditionally, **(B)** multiple metrics' instances throughout your code instead of reusing them (globals). Subsequent metrics' instances will have (or lack) different metric dimensions resulting in different metrics and data points with the same name. - -!!! note "Intentional design to address these scenarios" - -**On 1**, when you enable [capture_start_metric feature](#capturing-cold-start-metric), we transparently create and flush an additional EMF JSON Blob that is independent from your application metrics. This prevents data pollution. - -**On 2**, you can use `EphemeralMetrics` to create an additional EMF JSON Blob from your application metric (`SuccessfulBooking`). This ensures that `IntegrationLatency` operational metric data points aren't tied to any dynamic dimension values like `tenant_id`. - -That is why `Metrics` shares data across instances by default, as that covers 80% of use cases and different personas using Powertools. This allows them to instantiate `Metrics` in multiple places throughout their code - be a separate file, a middleware, or an abstraction that sets default dimensions. - -### Observability providers - -!!! note "In this context, an observability provider is an AWS Lambda Partner offering a platform for logging, metrics, traces, etc." - -You can choose an Observability provider other than CloudWatch EMF Metrics. Powertools offers seamless metric streaming via Lambda Extensions, SDKs, or other methods. Keep in mind, some providers may not support CloudWatch EMF's default Metrics format. - -#### Datadog provider - -Add `aws-lambda-powertools[datadog]` as a dependency in your preferred tool: _e.g._, _requirements.txt_, _pyproject.toml_. This will ensure you have the required dependencies before using Tracer. - -To use Datadog as an external observability provider you will need to configure your Lambda to add an `API_KEY` and the `Datadog Endpoint`. [Check here](https://docs.datadoghq.com/serverless/installation/python/?tab=datadogcli){target="_blank" rel="nofollow"} the Datadog documentation on how to do this. - -You can import from metric provider package, init the provider and use them as default metrics class - -```python title="Using built-in Datadog Metrics Provider" -from aws_lambda_powertools.metrics.provider.datadog_provider_draft import DataDogProvider,DataDogMetrics - -dd_provider = DataDogProvider(namespace="default") -metrics = DataDogMetrics(provider=dd_provider) - -@metrics.log_metrics(capture_cold_start_metric: bool = True, raise_on_empty_metrics: bool = False) -def lambda_handler(event, context) - metrics.add_metric(name="item_sold",value=1,tags=["category:online"]) +--8<-- "examples/metrics_datadog/src/flush_metrics.py" ``` ## Testing your code @@ -345,54 +195,40 @@ def lambda_handler(event, context) ???+ tip Ignore this section, if: - * You are explicitly setting namespace/default dimension via `namespace` and `service` parameters - * You're not instantiating `Metrics` in the global namespace + * You are explicitly setting namespace via `namespace` parameter + * You're not instantiating `DatadogMetrics` in the global namespace - For example, `Metrics(namespace="ServerlessAirline", service="booking")` + For example, `DatadogMetrics(namespace="ServerlessAirline")` -Make sure to set `POWERTOOLS_METRICS_NAMESPACE` and `POWERTOOLS_SERVICE_NAME` before running your tests to prevent failing on `SchemaValidation` exception. You can set it before you run tests or via pytest plugins like [dotenv](https://pypi.org/project/pytest-dotenv/){target="_blank" rel="nofollow"}. +Make sure to set `POWERTOOLS_METRICS_NAMESPACE` before running your tests to prevent failing on `SchemaValidation` exception. You can set it before you run tests or via pytest plugins like [dotenv](https://pypi.org/project/pytest-dotenv/){target="_blank" rel="nofollow"}. ```bash title="Injecting dummy Metric Namespace before running tests" ---8<-- "examples/metrics/src/run_tests_env_var.sh" +--8<-- "examples/metrics_datadog/src/run_tests_env_var.sh" ``` ### Clearing metrics -`Metrics` keep metrics in memory across multiple instances. If you need to test this behavior, you can use the following Pytest fixture to ensure metrics are reset incl. cold start: +`DatadogMetrics` keep metrics in memory across multiple instances. If you need to test this behavior, you can use the following Pytest fixture to ensure metrics are reset incl. cold start: ```python title="Clearing metrics between tests" ---8<-- "examples/metrics/src/clear_metrics_in_tests.py" +--8<-- "examples/metrics_datadog/src/clear_metrics_in_tests.py" ``` ### Functional testing You can read standard output and assert whether metrics have been flushed. Here's an example using `pytest` with `capsys` built-in fixture: -=== "assert_single_emf_blob.py" +=== "assert_single_datadog_metric.py" ```python hl_lines="6 9-10 23-34" - --8<-- "examples/metrics/src/assert_single_emf_blob.py" + --8<-- "examples/metrics_datadog/src/assert_single_datadog_metric.py" ``` === "add_metrics.py" ```python - --8<-- "examples/metrics/src/add_metrics.py" - ``` - -=== "assert_multiple_emf_blobs.py" - - This will be needed when using `capture_cold_start_metric=True`, or when both `Metrics` and `single_metric` are used. - - ```python hl_lines="20-21 27" - --8<-- "examples/metrics/src/assert_multiple_emf_blobs.py" - ``` - -=== "assert_multiple_emf_blobs_module.py" - - ```python - --8<-- "examples/metrics/src/assert_multiple_emf_blobs_module.py" + --8<-- "examples/metrics_datadog/src/add_metrics_without_provider.py" ``` ???+ tip - For more elaborate assertions and comparisons, check out [our functional testing for Metrics utility.](https://github.com/aws-powertools/powertools-lambda-python/blob/develop/tests/functional/test_metrics.py){target="_blank"} + For more elaborate assertions and comparisons, check out [our functional testing for DatadogMetrics utility.](https://github.com/aws-powertools/powertools-lambda-python/blob/develop/tests/functional/metrics/test_metrics_datadog.py){target="_blank"} diff --git a/examples/metrics/src/clear_metrics_in_tests.py b/examples/metrics/src/clear_metrics_in_tests.py index cea3879af83..a5462d3d9e1 100644 --- a/examples/metrics/src/clear_metrics_in_tests.py +++ b/examples/metrics/src/clear_metrics_in_tests.py @@ -1,7 +1,7 @@ import pytest from aws_lambda_powertools import Metrics -from aws_lambda_powertools.metrics import metrics as metrics_global +from aws_lambda_powertools.metrics.provider import cold_start @pytest.fixture(scope="function", autouse=True) @@ -9,6 +9,6 @@ def reset_metric_set(): # Clear out every metric data prior to every test metrics = Metrics() metrics.clear_metrics() - metrics_global.is_cold_start = True # ensure each test has cold start + cold_start.is_cold_start = True # ensure each test has cold start metrics.clear_default_dimensions() # remove persisted default dimensions, if any yield diff --git a/examples/metrics_datadog/src/add_metrics_with_provider.py b/examples/metrics_datadog/src/add_metrics_with_provider.py index ee43da75f4e..9e90b9f41cf 100644 --- a/examples/metrics_datadog/src/add_metrics_with_provider.py +++ b/examples/metrics_datadog/src/add_metrics_with_provider.py @@ -1,3 +1,5 @@ +import time + from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics, DatadogProvider from aws_lambda_powertools.utilities.typing import LambdaContext @@ -5,6 +7,6 @@ metrics = DatadogMetrics(provider=provider) -@metrics.log_metrics +@metrics.log_metrics # ensures metrics are flushed upon request completion/failure def lambda_handler(event: dict, context: LambdaContext): - metrics.add_metric(name="SuccessfulBooking", value=1) + metrics.add_metric(name="SuccessfulBooking", value=1, timestamp=int(time.time())) diff --git a/examples/metrics_datadog/src/add_metrics_with_tags.py b/examples/metrics_datadog/src/add_metrics_with_tags.py new file mode 100644 index 00000000000..9ebb0680c13 --- /dev/null +++ b/examples/metrics_datadog/src/add_metrics_with_tags.py @@ -0,0 +1,9 @@ +from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics +from aws_lambda_powertools.utilities.typing import LambdaContext + +metrics = DatadogMetrics() + + +@metrics.log_metrics # ensures metrics are flushed upon request completion/failure +def lambda_handler(event: dict, context: LambdaContext): + metrics.add_metric(name="SuccessfulBooking", value=1, tag1="powertools", tag2="python") diff --git a/examples/metrics_datadog/src/add_metrics_without_provider.py b/examples/metrics_datadog/src/add_metrics_without_provider.py index 714654c9b89..b2bef65e9ab 100644 --- a/examples/metrics_datadog/src/add_metrics_without_provider.py +++ b/examples/metrics_datadog/src/add_metrics_without_provider.py @@ -1,9 +1,11 @@ +import time + from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics from aws_lambda_powertools.utilities.typing import LambdaContext metrics = DatadogMetrics() -@metrics.log_metrics +@metrics.log_metrics # ensures metrics are flushed upon request completion/failure def lambda_handler(event: dict, context: LambdaContext): - metrics.add_metric(name="SuccessfulBooking", value=1) + metrics.add_metric(name="SuccessfulBooking", value=1, timestamp=int(time.time())) diff --git a/examples/metrics_datadog/src/assert_single_datadog_metric.py b/examples/metrics_datadog/src/assert_single_datadog_metric.py new file mode 100644 index 00000000000..6fc757d4a4f --- /dev/null +++ b/examples/metrics_datadog/src/assert_single_datadog_metric.py @@ -0,0 +1,9 @@ +import add_metrics_without_provider + + +def test_log_metrics(capsys): + add_metrics_without_provider.lambda_handler({}, {}) + + log = capsys.readouterr().out.strip() # remove any extra line + + assert "SuccessfulBooking" in log # basic string assertion in JSON str diff --git a/examples/metrics_datadog/src/capture_cold_start_metric.py b/examples/metrics_datadog/src/capture_cold_start_metric.py new file mode 100644 index 00000000000..ec8c2fc1e19 --- /dev/null +++ b/examples/metrics_datadog/src/capture_cold_start_metric.py @@ -0,0 +1,9 @@ +from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics +from aws_lambda_powertools.utilities.typing import LambdaContext + +metrics = DatadogMetrics() + + +@metrics.log_metrics(capture_cold_start_metric=True) +def lambda_handler(event: dict, context: LambdaContext): + return diff --git a/examples/metrics_datadog/src/capture_cold_start_metric_output.json b/examples/metrics_datadog/src/capture_cold_start_metric_output.json new file mode 100644 index 00000000000..ee7da985f66 --- /dev/null +++ b/examples/metrics_datadog/src/capture_cold_start_metric_output.json @@ -0,0 +1,8 @@ +{ + "m":"ColdStart", + "v":1, + "e":1691707488, + "t":[ + "function_name:HelloWorldFunction" + ] + } diff --git a/examples/metrics_datadog/src/clear_metrics_in_tests.py b/examples/metrics_datadog/src/clear_metrics_in_tests.py new file mode 100644 index 00000000000..e80552eba83 --- /dev/null +++ b/examples/metrics_datadog/src/clear_metrics_in_tests.py @@ -0,0 +1,13 @@ +import pytest + +from aws_lambda_powertools.metrics.provider import cold_start +from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics + + +@pytest.fixture(scope="function", autouse=True) +def reset_metric_set(): + # Clear out every metric data prior to every test + metrics = DatadogMetrics() + metrics.clear_metrics() + cold_start.is_cold_start = True # ensure each test has cold start + yield diff --git a/examples/metrics_datadog/src/flush_metrics.py b/examples/metrics_datadog/src/flush_metrics.py new file mode 100644 index 00000000000..89e02fc2f3f --- /dev/null +++ b/examples/metrics_datadog/src/flush_metrics.py @@ -0,0 +1,17 @@ +from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics +from aws_lambda_powertools.utilities.typing import LambdaContext + +metrics = DatadogMetrics() + + +def book_flight(flight_id: str, **kwargs): + # logic to book flight + ... + metrics.add_metric(name="SuccessfulBooking", value=1) + + +def lambda_handler(event: dict, context: LambdaContext): + try: + book_flight(flight_id=event.get("flight_id", "")) + finally: + metrics.flush_metrics() diff --git a/examples/metrics_datadog/src/flush_metrics_to_standard_output.py b/examples/metrics_datadog/src/flush_metrics_to_standard_output.py new file mode 100644 index 00000000000..1dce54c300e --- /dev/null +++ b/examples/metrics_datadog/src/flush_metrics_to_standard_output.py @@ -0,0 +1,10 @@ +from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics, DatadogProvider +from aws_lambda_powertools.utilities.typing import LambdaContext + +provider = DatadogProvider(flush_to_log=True) +metrics = DatadogMetrics(provider=provider) + + +@metrics.log_metrics # ensures metrics are flushed upon request completion/failure +def lambda_handler(event: dict, context: LambdaContext): + metrics.add_metric(name="SuccessfulBooking", value=1) diff --git a/examples/metrics_datadog/src/log_metrics_output.json b/examples/metrics_datadog/src/log_metrics_output.json new file mode 100644 index 00000000000..7e2b6ffe2ab --- /dev/null +++ b/examples/metrics_datadog/src/log_metrics_output.json @@ -0,0 +1,9 @@ +{ + "m":"ServerlessAirline.SuccessfulBooking", + "v":1, + "e":1691707076, + "t":[ + "tag1:powertools", + "tag2:python" + ] +} diff --git a/examples/metrics_datadog/src/raise_on_empty_metrics.py b/examples/metrics_datadog/src/raise_on_empty_metrics.py new file mode 100644 index 00000000000..2242b1dfe06 --- /dev/null +++ b/examples/metrics_datadog/src/raise_on_empty_metrics.py @@ -0,0 +1,10 @@ +from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics +from aws_lambda_powertools.utilities.typing import LambdaContext + +metrics = DatadogMetrics() + + +@metrics.log_metrics(raise_on_empty_metrics=True) # ensures metrics are flushed upon request completion/failure +def lambda_handler(event: dict, context: LambdaContext): + # no metrics being created will now raise SchemaValidationError + return diff --git a/examples/metrics_datadog/src/run_tests_env_var.sh b/examples/metrics_datadog/src/run_tests_env_var.sh new file mode 100644 index 00000000000..21a3a090242 --- /dev/null +++ b/examples/metrics_datadog/src/run_tests_env_var.sh @@ -0,0 +1 @@ +POWERTOOLS_METRICS_NAMESPACE="ServerlessAirline" DD_FLUSH_TO_LOG="True" python -m pytest diff --git a/examples/metrics_datadog/src/set_default_tags.py b/examples/metrics_datadog/src/set_default_tags.py new file mode 100644 index 00000000000..94d4335b212 --- /dev/null +++ b/examples/metrics_datadog/src/set_default_tags.py @@ -0,0 +1,10 @@ +from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics +from aws_lambda_powertools.utilities.typing import LambdaContext + +metrics = DatadogMetrics() +metrics.set_default_tags(tag1="powertools", tag2="python") + + +@metrics.log_metrics # ensures metrics are flushed upon request completion/failure +def lambda_handler(event: dict, context: LambdaContext): + metrics.add_metric(name="SuccessfulBooking", value=1) diff --git a/examples/metrics_datadog/src/set_default_tags_log_metrics.py b/examples/metrics_datadog/src/set_default_tags_log_metrics.py new file mode 100644 index 00000000000..c276c1d53ff --- /dev/null +++ b/examples/metrics_datadog/src/set_default_tags_log_metrics.py @@ -0,0 +1,11 @@ +from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics +from aws_lambda_powertools.utilities.typing import LambdaContext + +metrics = DatadogMetrics() + +default_tags = {"tag1": "powertools", "tag2": "python"} + + +@metrics.log_metrics(default_tags=default_tags) # ensures metrics are flushed upon request completion/failure +def lambda_handler(event: dict, context: LambdaContext): + metrics.add_metric(name="SuccessfulBooking", value=1) diff --git a/poetry.lock b/poetry.lock index 61474ad1f89..641e62e4f43 100644 --- a/poetry.lock +++ b/poetry.lock @@ -93,69 +93,69 @@ typeguard = ">=2.13.3,<2.14.0" [[package]] name = "aws-cdk-aws-apigatewayv2-alpha" -version = "2.90.0a0" +version = "2.91.0a0" description = "The CDK Construct Library for AWS::APIGatewayv2" optional = false python-versions = "~=3.7" files = [ - {file = "aws-cdk.aws-apigatewayv2-alpha-2.90.0a0.tar.gz", hash = "sha256:b81e8321a2a76594fd6d79725eb6136dad844aef9122fe666b7bc5f11bb18de7"}, - {file = "aws_cdk.aws_apigatewayv2_alpha-2.90.0a0-py3-none-any.whl", hash = "sha256:154f146d5a88c602aa477869aea0109c0691437e9a0f6a9d61d98b9b52c83b51"}, + {file = "aws-cdk.aws-apigatewayv2-alpha-2.91.0a0.tar.gz", hash = "sha256:a7b0e78862f3dd81cf13740df2ecda1c877545500872dc476f2dbf3807632a32"}, + {file = "aws_cdk.aws_apigatewayv2_alpha-2.91.0a0-py3-none-any.whl", hash = "sha256:e3d606055c2fe268d80f96052b583060a25fadcdee79d89a75f2eac4354f2e69"}, ] [package.dependencies] -aws-cdk-lib = "2.90.0" +aws-cdk-lib = "2.91.0" constructs = ">=10.0.0,<11.0.0" -jsii = ">=1.86.0,<2.0.0" +jsii = ">=1.85.0,<2.0.0" publication = ">=0.0.3" typeguard = ">=2.13.3,<2.14.0" [[package]] name = "aws-cdk-aws-apigatewayv2-authorizers-alpha" -version = "2.90.0a0" +version = "2.91.0a0" description = "Authorizers for AWS APIGateway V2" optional = false python-versions = "~=3.7" files = [ - {file = "aws-cdk.aws-apigatewayv2-authorizers-alpha-2.90.0a0.tar.gz", hash = "sha256:7aeda815ca63f14362c2d08b06c8e85a4694fc07b8c7ba2a1c20b9346792c74c"}, - {file = "aws_cdk.aws_apigatewayv2_authorizers_alpha-2.90.0a0-py3-none-any.whl", hash = "sha256:ebaeaeddbdcb16b4130ce948f3d8db42254a68372b497700643bd654331207b6"}, + {file = "aws-cdk.aws-apigatewayv2-authorizers-alpha-2.91.0a0.tar.gz", hash = "sha256:cafd747af66f92755f188172f0e892503bc73c26f0d6d95e5f733c67b0307fa8"}, + {file = "aws_cdk.aws_apigatewayv2_authorizers_alpha-2.91.0a0-py3-none-any.whl", hash = "sha256:972393ad1c220708616322946ba3f8936cbe143a69e543762295c1ea02d69849"}, ] [package.dependencies] -"aws-cdk.aws-apigatewayv2-alpha" = "2.90.0.a0" -aws-cdk-lib = "2.90.0" +"aws-cdk.aws-apigatewayv2-alpha" = "2.91.0.a0" +aws-cdk-lib = "2.91.0" constructs = ">=10.0.0,<11.0.0" -jsii = ">=1.86.0,<2.0.0" +jsii = ">=1.85.0,<2.0.0" publication = ">=0.0.3" typeguard = ">=2.13.3,<2.14.0" [[package]] name = "aws-cdk-aws-apigatewayv2-integrations-alpha" -version = "2.90.0a0" +version = "2.91.0a0" description = "Integrations for AWS APIGateway V2" optional = false python-versions = "~=3.7" files = [ - {file = "aws-cdk.aws-apigatewayv2-integrations-alpha-2.90.0a0.tar.gz", hash = "sha256:558f12d2e951ae424f828ea8e2bfecc5c4271b4eb0c583feadd7420fdaca7cc7"}, - {file = "aws_cdk.aws_apigatewayv2_integrations_alpha-2.90.0a0-py3-none-any.whl", hash = "sha256:b26ee8a02b06f99110383dc4ad9a4542415cc5c50c35aaccc90c63d80aeeb1fd"}, + {file = "aws-cdk.aws-apigatewayv2-integrations-alpha-2.91.0a0.tar.gz", hash = "sha256:db607df2563f0b839795a41218a59e3ebc29e906dd08aed7b0b59aceba0bde02"}, + {file = "aws_cdk.aws_apigatewayv2_integrations_alpha-2.91.0a0-py3-none-any.whl", hash = "sha256:34d0f103846613a72cfae8419be2e4302863a1e8f6e81951b0a51c2f62ab80b3"}, ] [package.dependencies] -"aws-cdk.aws-apigatewayv2-alpha" = "2.90.0.a0" -aws-cdk-lib = "2.90.0" +"aws-cdk.aws-apigatewayv2-alpha" = "2.91.0.a0" +aws-cdk-lib = "2.91.0" constructs = ">=10.0.0,<11.0.0" -jsii = ">=1.86.0,<2.0.0" +jsii = ">=1.85.0,<2.0.0" publication = ">=0.0.3" typeguard = ">=2.13.3,<2.14.0" [[package]] name = "aws-cdk-lib" -version = "2.90.0" +version = "2.91.0" description = "Version 2 of the AWS Cloud Development Kit library" optional = false python-versions = "~=3.7" files = [ - {file = "aws-cdk-lib-2.90.0.tar.gz", hash = "sha256:d99e304f96f1b04c41922cfa2fc98c1cdd7c88e45c6ebb980ecfc367cdc77e87"}, - {file = "aws_cdk_lib-2.90.0-py3-none-any.whl", hash = "sha256:ef481a40c3ece38aeaf15706ecbfeea19860b8a7b789ea7b28056a9f456c65d1"}, + {file = "aws-cdk-lib-2.91.0.tar.gz", hash = "sha256:1163926527a8b7da931cddea77a4824b929b3f775447c3b7427ecdef7701ce74"}, + {file = "aws_cdk_lib-2.91.0-py3-none-any.whl", hash = "sha256:ec2cadeb5727ea8259ad8a54ac9ff40502032cd2572c81f4594df93365da39da"}, ] [package.dependencies] @@ -163,7 +163,7 @@ files = [ "aws-cdk.asset-kubectl-v20" = ">=2.1.2,<3.0.0" "aws-cdk.asset-node-proxy-agent-v5" = ">=2.0.166,<3.0.0" constructs = ">=10.0.0,<11.0.0" -jsii = ">=1.86.0,<2.0.0" +jsii = ">=1.85.0,<2.0.0" publication = ">=0.0.3" typeguard = ">=2.13.3,<2.14.0" @@ -291,17 +291,17 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "boto3" -version = "1.28.23" +version = "1.28.24" description = "The AWS SDK for Python" optional = false python-versions = ">= 3.7" files = [ - {file = "boto3-1.28.23-py3-none-any.whl", hash = "sha256:807d4a4698ba9a76d5901a1663ff1943d13efbc388908f38b60f209c3511f1d6"}, - {file = "boto3-1.28.23.tar.gz", hash = "sha256:839deb868d1278dd5a3f87208cfc4a8e259c95ca3cbe607cc322d435f02f63b0"}, + {file = "boto3-1.28.24-py3-none-any.whl", hash = "sha256:0300ca6ec8bc136eb316b32cc1e30c66b85bc497f5a5fe42e095ae4280569708"}, + {file = "boto3-1.28.24.tar.gz", hash = "sha256:9d1b4713c888e53a218648ad71522bee9bec9d83f2999fff2494675af810b632"}, ] [package.dependencies] -botocore = ">=1.31.23,<1.32.0" +botocore = ">=1.31.24,<1.32.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.6.0,<0.7.0" @@ -310,13 +310,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.31.23" +version = "1.31.24" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">= 3.7" files = [ - {file = "botocore-1.31.23-py3-none-any.whl", hash = "sha256:d0a95f74eb6bd99e8f52f16af0a430ba6cd1526744f40ffdd3fcccceeaf961c2"}, - {file = "botocore-1.31.23.tar.gz", hash = "sha256:f3258feaebce48f138eb2675168c4d33cc3d99e9f45af13cb8de47bdc2b9c573"}, + {file = "botocore-1.31.24-py3-none-any.whl", hash = "sha256:8c7ba9b09e9104e2d473214e1ffcf84b77e04cf6f5f2344942c1eed9e299f947"}, + {file = "botocore-1.31.24.tar.gz", hash = "sha256:2d8f412c67f9285219f52d5dbbb6ef0dfa9f606da29cbdd41b6d6474bcc4bbd4"}, ] [package.dependencies] @@ -1748,13 +1748,13 @@ typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.9\""} [[package]] name = "mypy-boto3-secretsmanager" -version = "1.28.16" -description = "Type annotations for boto3.SecretsManager 1.28.16 service generated with mypy-boto3-builder 7.17.1" +version = "1.28.24" +description = "Type annotations for boto3.SecretsManager 1.28.24 service generated with mypy-boto3-builder 7.17.2" optional = false python-versions = ">=3.7" files = [ - {file = "mypy-boto3-secretsmanager-1.28.16.tar.gz", hash = "sha256:07f443b31d2114ac363cfbdbc5f4b97934ca48fb99734bbd06d5c39bce244b83"}, - {file = "mypy_boto3_secretsmanager-1.28.16-py3-none-any.whl", hash = "sha256:05508c3a96d96e482e5aff21b508319a1911e6662aea5be96aa7f7089b8dbfd4"}, + {file = "mypy-boto3-secretsmanager-1.28.24.tar.gz", hash = "sha256:13461d8d2891ec0e430437dbb71c0879ee431ddfedb6b21c265878642faeb2a7"}, + {file = "mypy_boto3_secretsmanager-1.28.24-py3-none-any.whl", hash = "sha256:e224809e28d99c1360bfe6428e8b567bb4a43c38a71263eba0ff4de7fa321142"}, ] [package.dependencies] From ae95c25ca07987d0b47520f97cb6a8780c8f6d4d Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Fri, 11 Aug 2023 00:12:28 +0100 Subject: [PATCH 10/37] Documentation: fixing highlights --- docs/core/metrics_datadog.md | 24 +++++++++---------- .../src/log_metrics_output.json | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/docs/core/metrics_datadog.md b/docs/core/metrics_datadog.md index 351d1228e45..5131526d1aa 100644 --- a/docs/core/metrics_datadog.md +++ b/docs/core/metrics_datadog.md @@ -39,7 +39,7 @@ Experiment to use your application or main service as the metric namespace to ea To adhere to Lambda best practices and effectively minimize the size of your development package, we recommend using the official Datadog layers built specifically for the SDK and extension components. Below is the template that demonstrates how to configure a SAM template with this information. -```yaml hl_lines="13" title="AWS Serverless Application Model (SAM) example" +```yaml hl_lines="13 14 22 24" title="AWS Serverless Application Model (SAM) example" --8<-- "examples/metrics_datadog/sam/template.yaml" ``` @@ -54,13 +54,13 @@ You can create metrics using `add_metric`. Optional parameter such as timestamp === "add_metrics_with_provider.py" - ```python hl_lines="10" + ```python hl_lines="6 12" --8<-- "examples/metrics_datadog/src/add_metrics_with_provider.py" ``` === "add_metrics_without_provider.py" - ```python hl_lines="13" + ```python hl_lines="11" --8<-- "examples/metrics_datadog/src/add_metrics_without_provider.py" ``` @@ -73,7 +73,7 @@ Datadog offers the flexibility to configure tags per metric. To provider a bette === "add_metrics_with_tags.py" - ```python hl_lines="10" + ```python hl_lines="9" --8<-- "examples/metrics_datadog/src/add_metrics_with_tags.py" ``` @@ -88,13 +88,13 @@ If you'd like to remove them at some point, you can use `clear_default_tags` met === "set_default_tags.py" - ```python hl_lines="9" + ```python hl_lines="5" --8<-- "examples/metrics_datadog/src/set_default_tags.py" ``` === "set_default_tags_log_metrics.py" - ```python hl_lines="9 13" + ```python hl_lines="6 9" --8<-- "examples/metrics_datadog/src/set_default_tags_log_metrics.py" ``` @@ -104,7 +104,7 @@ You have the option to flush metrics to the standard output for exporting, which === "flush_metrics_to_standard_output.py" - ```python hl_lines="10" + ```python hl_lines="4" --8<-- "examples/metrics_datadog/src/flush_metrics_to_standard_output.py" ``` @@ -116,13 +116,13 @@ This decorator also **validates**, **serializes**, and **flushes** all your metr === "add_metrics.py" - ```python hl_lines="8" + ```python hl_lines="7" --8<-- "examples/metrics_datadog/src/add_metrics_with_tags.py" ``` === "log_metrics_output.json" - ```json hl_lines="6 9 14 21-23" + ```json hl_lines="2 6 7" --8<-- "examples/metrics_datadog/src/log_metrics_output.json" ``` @@ -149,7 +149,7 @@ You can optionally capture cold start metrics with `log_metrics` decorator via ` === "capture_cold_start_metric_output.json" - ```json hl_lines="9 15 22 24-25" + ```json hl_lines="2 6" --8<-- "examples/metrics_datadog/src/capture_cold_start_metric_output.json" ``` @@ -184,7 +184,7 @@ If you are using the AWS Lambda Web Adapter project, or a middleware with custom Contrary to the `log_metrics` decorator, you are now also responsible to flush metrics in the event of an exception. -```python hl_lines="18" title="Manually flushing and clearing metrics from memory" +```python hl_lines="17" title="Manually flushing and clearing metrics from memory" --8<-- "examples/metrics_datadog/src/flush_metrics.py" ``` @@ -220,7 +220,7 @@ You can read standard output and assert whether metrics have been flushed. Here' === "assert_single_datadog_metric.py" - ```python hl_lines="6 9-10 23-34" + ```python hl_lines="7" --8<-- "examples/metrics_datadog/src/assert_single_datadog_metric.py" ``` diff --git a/examples/metrics_datadog/src/log_metrics_output.json b/examples/metrics_datadog/src/log_metrics_output.json index 7e2b6ffe2ab..782cea9dc4f 100644 --- a/examples/metrics_datadog/src/log_metrics_output.json +++ b/examples/metrics_datadog/src/log_metrics_output.json @@ -1,5 +1,5 @@ { - "m":"ServerlessAirline.SuccessfulBooking", + "m":"SuccessfulBooking", "v":1, "e":1691707076, "t":[ From cf383ac206bb37c62eeedb6a26a218486bd55d6b Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Fri, 11 Aug 2023 00:28:26 +0100 Subject: [PATCH 11/37] =?UTF-8?q?Documentation:=20fixing=20mypy=20problems?= =?UTF-8?q?=20=F0=9F=92=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../metrics/provider/datadog/metrics.py | 2 +- docs/core/metrics_datadog.md | 11 ++++++----- ...metric.py => capture_cold_start_datadog_metric.py} | 0 ..._in_tests.py => clear_datadog_metrics_in_tests.py} | 0 .../{flush_metrics.py => flush_datadog_metrics.py} | 0 ...y_metrics.py => raise_on_empty_datadog_metrics.py} | 0 6 files changed, 7 insertions(+), 6 deletions(-) rename examples/metrics_datadog/src/{capture_cold_start_metric.py => capture_cold_start_datadog_metric.py} (100%) rename examples/metrics_datadog/src/{clear_metrics_in_tests.py => clear_datadog_metrics_in_tests.py} (100%) rename examples/metrics_datadog/src/{flush_metrics.py => flush_datadog_metrics.py} (100%) rename examples/metrics_datadog/src/{raise_on_empty_metrics.py => raise_on_empty_datadog_metrics.py} (100%) diff --git a/aws_lambda_powertools/metrics/provider/datadog/metrics.py b/aws_lambda_powertools/metrics/provider/datadog/metrics.py index 0e8ca1e8aac..91c5d59191c 100644 --- a/aws_lambda_powertools/metrics/provider/datadog/metrics.py +++ b/aws_lambda_powertools/metrics/provider/datadog/metrics.py @@ -93,7 +93,7 @@ def log_metrics( lambda_handler: Callable[[Dict, Any], Any] | Optional[Callable[[Dict, Any, Optional[Dict]], Any]] = None, capture_cold_start_metric: bool = False, raise_on_empty_metrics: bool = False, - default_tags: List | None = None, + default_tags: Dict | None = None, ): return self.provider.log_metrics( lambda_handler=lambda_handler, diff --git a/docs/core/metrics_datadog.md b/docs/core/metrics_datadog.md index 5131526d1aa..94e44f99cf9 100644 --- a/docs/core/metrics_datadog.md +++ b/docs/core/metrics_datadog.md @@ -2,8 +2,9 @@ title: Datadog description: Core utility --- - + Datadog provider creates custom metrics by flushing metrics to standard output and exporting metrics using [Datadog Forwarder](https://docs.datadoghq.com/logs/guide/forwarder/?tab=cloudformation){target="_blank" rel="nofollow"} or flushing metrics to [Datadog extension](https://docs.datadoghq.com/serverless/installation/python/?tab=datadogcli){target="_blank" rel="nofollow"} using Datadog SDK. + These metrics can be visualized through [Datadog console](https://app.datadoghq.com/metric/explore){target="_blank" rel="nofollow"}. @@ -131,7 +132,7 @@ This decorator also **validates**, **serializes**, and **flushes** all your metr If you want to ensure at least one metric is always emitted, you can pass `raise_on_empty_metrics` to the **log_metrics** decorator: ```python hl_lines="7" title="Raising SchemaValidationError exception if no metrics are added" ---8<-- "examples/metrics_datadog/src/raise_on_empty_metrics.py" +--8<-- "examples/metrics_datadog/src/raise_on_empty_datadog_metrics.py" ``` ???+ tip "Suppressing warning messages on empty metrics" @@ -144,7 +145,7 @@ You can optionally capture cold start metrics with `log_metrics` decorator via ` === "capture_cold_start_metric.py" ```python hl_lines="7" - --8<-- "examples/metrics_datadog/src/capture_cold_start_metric.py" + --8<-- "examples/metrics_datadog/src/capture_cold_start_datadog_metric.py" ``` === "capture_cold_start_metric_output.json" @@ -185,7 +186,7 @@ If you are using the AWS Lambda Web Adapter project, or a middleware with custom Contrary to the `log_metrics` decorator, you are now also responsible to flush metrics in the event of an exception. ```python hl_lines="17" title="Manually flushing and clearing metrics from memory" ---8<-- "examples/metrics_datadog/src/flush_metrics.py" +--8<-- "examples/metrics_datadog/src/flush_datadog_metrics.py" ``` ## Testing your code @@ -211,7 +212,7 @@ Make sure to set `POWERTOOLS_METRICS_NAMESPACE` before running your tests to pre `DatadogMetrics` keep metrics in memory across multiple instances. If you need to test this behavior, you can use the following Pytest fixture to ensure metrics are reset incl. cold start: ```python title="Clearing metrics between tests" ---8<-- "examples/metrics_datadog/src/clear_metrics_in_tests.py" +--8<-- "examples/metrics_datadog/src/clear_datadog_metrics_in_tests.py" ``` ### Functional testing diff --git a/examples/metrics_datadog/src/capture_cold_start_metric.py b/examples/metrics_datadog/src/capture_cold_start_datadog_metric.py similarity index 100% rename from examples/metrics_datadog/src/capture_cold_start_metric.py rename to examples/metrics_datadog/src/capture_cold_start_datadog_metric.py diff --git a/examples/metrics_datadog/src/clear_metrics_in_tests.py b/examples/metrics_datadog/src/clear_datadog_metrics_in_tests.py similarity index 100% rename from examples/metrics_datadog/src/clear_metrics_in_tests.py rename to examples/metrics_datadog/src/clear_datadog_metrics_in_tests.py diff --git a/examples/metrics_datadog/src/flush_metrics.py b/examples/metrics_datadog/src/flush_datadog_metrics.py similarity index 100% rename from examples/metrics_datadog/src/flush_metrics.py rename to examples/metrics_datadog/src/flush_datadog_metrics.py diff --git a/examples/metrics_datadog/src/raise_on_empty_metrics.py b/examples/metrics_datadog/src/raise_on_empty_datadog_metrics.py similarity index 100% rename from examples/metrics_datadog/src/raise_on_empty_metrics.py rename to examples/metrics_datadog/src/raise_on_empty_datadog_metrics.py From 15955e7495864942ec1f55dfc4ecf098797b2326 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Fri, 11 Aug 2023 15:18:17 +0100 Subject: [PATCH 12/37] Addressing Heitor's feedback + improving DX --- aws_lambda_powertools/metrics/functions.py | 56 ++++++++++ .../metrics/provider/datadog/datadog.py | 25 ++++- .../metrics/provider/datadog/metrics.py | 4 +- .../metrics/test_metrics_datadog.py | 13 ++- tests/unit/metrics/test_functions.py | 103 ++++++++++++++++++ 5 files changed, 192 insertions(+), 9 deletions(-) create mode 100644 tests/unit/metrics/test_functions.py diff --git a/aws_lambda_powertools/metrics/functions.py b/aws_lambda_powertools/metrics/functions.py index ef57ee0fe6b..ebf40ab919a 100644 --- a/aws_lambda_powertools/metrics/functions.py +++ b/aws_lambda_powertools/metrics/functions.py @@ -1,5 +1,6 @@ from __future__ import annotations +import re from typing import Any, Dict, List from aws_lambda_powertools.metrics.provider.cloudwatch_emf.exceptions import ( @@ -73,6 +74,61 @@ def extract_cloudwatch_metric_unit_value(metric_units: List, metric_valid_option def serialize_datadog_tags(metric_tags: Dict[str, Any], default_tags: Dict[str, Any]) -> List[str]: + """ + Serialize metric tags into a list of formatted strings for Datadog integration. + + This function takes a dictionary of metric-specific tags or default tags. + It parse these tags and converts them into a list of strings in the format "tag_key:tag_value". + + Parameters + ---------- + metric_tags: Dict[str, Any] + A dictionary containing metric-specific tags. + default_tags: Dict[str, Any] + A dictionary containing default tags applicable to all metrics. + + Returns: + ------- + List[str] + A list of formatted tag strings, each in the "tag_key:tag_value" format. + + Example: + >>> metric_tags = {'environment': 'production', 'service': 'web'} + >>> serialize_datadog_tags(metric_tags, None) + ['environment:production', 'service:web'] + """ tags = metric_tags or default_tags return [f"{tag_key}:{tag_value}" for tag_key, tag_value in tags.items()] + + +def validate_datadog_metric_name(metric_name: str) -> bool: + """ + Validate a metric name according to specific requirements. + + Metric names must start with a letter. + Metric names must only contain ASCII alphanumerics, underscores, and periods. + Other characters, including spaces, are converted to underscores. + Unicode is not supported. + Metric names must not exceed 200 characters. Fewer than 100 is preferred from a UI perspective. + + More information here: https://docs.datadoghq.com/metrics/custom_metrics/#naming-custom-metrics + + Parameters: + ---------- + metric_name: str + The metric name to be validated. + + Returns: + ------- + bool + True if the metric name is valid, False otherwise. + """ + + # Check if the metric name starts with a letter + # Check if the metric name contains more than 200 characters + # Check if the resulting metric name only contains ASCII alphanumerics, underscores, and periods + if not metric_name[0].isalpha() or len(metric_name) > 200 or not re.match(r"^[a-zA-Z0-9_.]+$", metric_name): + return False + + return True diff --git a/aws_lambda_powertools/metrics/provider/datadog/datadog.py b/aws_lambda_powertools/metrics/provider/datadog/datadog.py index 98d5ea835c8..e3d47c35b8b 100644 --- a/aws_lambda_powertools/metrics/provider/datadog/datadog.py +++ b/aws_lambda_powertools/metrics/provider/datadog/datadog.py @@ -9,7 +9,7 @@ from typing import Any, Callable, Dict, List, Optional from aws_lambda_powertools.metrics.exceptions import MetricValueError, SchemaValidationError -from aws_lambda_powertools.metrics.functions import serialize_datadog_tags +from aws_lambda_powertools.metrics.functions import serialize_datadog_tags, validate_datadog_metric_name from aws_lambda_powertools.metrics.provider import BaseProvider from aws_lambda_powertools.shared import constants from aws_lambda_powertools.shared.functions import resolve_env_var_choice @@ -50,12 +50,13 @@ def __init__( metric_set: List | None = None, namespace: str | None = None, flush_to_log: bool | None = None, - default_tags: Dict | None = None, + default_tags: Dict[str, Any] | None = None, ): self.metric_set = metric_set if metric_set is not None else [] - self.namespace = resolve_env_var_choice(choice=namespace, env=os.getenv(constants.METRICS_NAMESPACE_ENV)) - if self.namespace is None: - self.namespace = DEFAULT_NAMESPACE + self.namespace = ( + resolve_env_var_choice(choice=namespace, env=os.getenv(constants.METRICS_NAMESPACE_ENV)) + or DEFAULT_NAMESPACE + ) self.default_tags = default_tags or {} self.flush_to_log = resolve_env_var_choice(choice=flush_to_log, env=os.getenv(constants.DATADOG_FLUSH_TO_LOG)) @@ -96,12 +97,22 @@ def add_metric( >>> sales='sam' >>> ) """ + + # validating metric name + if not validate_datadog_metric_name(name): + docs = "https://docs.datadoghq.com/metrics/custom_metrics/#naming-custom-metrics" + raise SchemaValidationError( + f"Invalid metric name. Please ensure the metric {name} follows the requirements. \n" + f"See Datadog documentation here: \n {docs}", + ) + if not isinstance(value, numbers.Real): raise MetricValueError(f"{value} is not a valid number") if not timestamp: timestamp = int(time.time()) + logger.debug({"details": "Appending metric", "metrics": name}) self.metric_set.append({"m": name, "v": value, "e": timestamp, "t": tags}) def serialize_metric_set(self, metrics: List | None = None) -> List: @@ -141,6 +152,7 @@ def serialize_metric_set(self, metrics: List | None = None) -> List: metric_name = f"{self.namespace}.{single_metric['m']}" else: metric_name = single_metric["m"] + output_list.append( { "m": metric_name, @@ -171,6 +183,7 @@ def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: ) else: + logger.debug("Flushing existing metrics") metrics = self.serialize_metric_set() # submit through datadog extension if lambda_metric and not self.flush_to_log: @@ -188,7 +201,7 @@ def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: for metric_item in metrics: print(json.dumps(metric_item, separators=(",", ":"))) - self.clear_metrics() + self.clear_metrics() def clear_metrics(self): logger.debug("Clearing out existing metric set from memory") diff --git a/aws_lambda_powertools/metrics/provider/datadog/metrics.py b/aws_lambda_powertools/metrics/provider/datadog/metrics.py index 91c5d59191c..e82ab5df3fa 100644 --- a/aws_lambda_powertools/metrics/provider/datadog/metrics.py +++ b/aws_lambda_powertools/metrics/provider/datadog/metrics.py @@ -53,7 +53,7 @@ def lambda_handler(): # e.g., m1 and m2 add metric ProductCreated, however m1 has 'version' dimension but m2 doesn't # Result: ProductCreated is created twice as we now have 2 different EMF blobs _metrics: List = [] - _default_tags: Dict = {} + _default_tags: Dict[str, Any] = {} def __init__( self, @@ -93,7 +93,7 @@ def log_metrics( lambda_handler: Callable[[Dict, Any], Any] | Optional[Callable[[Dict, Any, Optional[Dict]], Any]] = None, capture_cold_start_metric: bool = False, raise_on_empty_metrics: bool = False, - default_tags: Dict | None = None, + default_tags: Dict[str, Any] | None = None, ): return self.provider.log_metrics( lambda_handler=lambda_handler, diff --git a/tests/functional/metrics/test_metrics_datadog.py b/tests/functional/metrics/test_metrics_datadog.py index dd8f0b02112..63f5d648091 100644 --- a/tests/functional/metrics/test_metrics_datadog.py +++ b/tests/functional/metrics/test_metrics_datadog.py @@ -47,7 +47,7 @@ def test_datadog_write_to_log_with_env_variable(capsys, monkeypatch): assert logs == json.loads('{"m":"item_sold","v":1,"e":"","t":["product:latte","order:online"]}') -def test_datadog_with_invalid_value(): +def test_datadog_with_invalid_metric_value(): # GIVEN DatadogMetrics is initialized metrics = DatadogMetrics() @@ -58,6 +58,17 @@ def test_datadog_with_invalid_value(): metrics.add_metric(name="item_sold", value="a", tags=["product:latte", "order:online"]) +def test_datadog_with_invalid_metric_name(): + # GIVEN DatadogMetrics is initialized + metrics = DatadogMetrics() + + # WHEN we a metric name starting with a number + # WHEN we attempt to serialize a valid Datadog metric + # THEN it should fail validation and raise MetricValueError + with pytest.raises(SchemaValidationError, match="Invalid metric name.*"): + metrics.add_metric(name="1_item_sold", value="a", tags=["product:latte", "order:online"]) + + def test_datadog_raise_on_empty(): # GIVEN DatadogMetrics is initialized metrics = DatadogMetrics() diff --git a/tests/unit/metrics/test_functions.py b/tests/unit/metrics/test_functions.py new file mode 100644 index 00000000000..7d8df43893b --- /dev/null +++ b/tests/unit/metrics/test_functions.py @@ -0,0 +1,103 @@ +import pytest + +from aws_lambda_powertools.metrics.functions import ( + extract_cloudwatch_metric_resolution_value, + extract_cloudwatch_metric_unit_value, + serialize_datadog_tags, + validate_datadog_metric_name, +) +from aws_lambda_powertools.metrics.provider.cloudwatch_emf.exceptions import ( + MetricResolutionError, + MetricUnitError, +) +from aws_lambda_powertools.metrics.provider.cloudwatch_emf.metric_properties import MetricResolution, MetricUnit + + +def test_extract_invalid_cloudwatch_metric_resolution_value(): + metric_resolutions = [resolution.value for resolution in MetricResolution] + + # GIVEN an invalid EMF resolution value + resolution = 2 + + # WHEN try to extract this value + # THEN must fail with MetricResolutionError + with pytest.raises(MetricResolutionError, match="Invalid metric resolution.*"): + extract_cloudwatch_metric_resolution_value(metric_resolutions, resolution=resolution) + + +def test_extract_valid_cloudwatch_metric_resolution_value(): + metric_resolutions = [resolution.value for resolution in MetricResolution] + + # GIVEN a valid EMF resolution value + resolution = 1 + + # WHEN try to extract this value + extracted_resolution_value = extract_cloudwatch_metric_resolution_value(metric_resolutions, resolution=resolution) + + # THEN value must be extracted + assert extracted_resolution_value == resolution + + +def test_extract_invalid_cloudwatch_metric_unit_value(): + metric_units = [unit.value for unit in MetricUnit] + metric_unit_valid_options = list(MetricUnit.__members__) + + # GIVEN an invalid EMF unit value + unit = "Fake" + + # WHEN try to extract this value + # THEN must fail with MetricUnitError + with pytest.raises(MetricUnitError, match="Invalid metric unit.*"): + extract_cloudwatch_metric_unit_value(metric_units, metric_unit_valid_options, unit=unit) + + +def test_extract_valid_cloudwatch_metric_unit_value(): + metric_units = [unit.value for unit in MetricUnit] + metric_unit_valid_options = list(MetricUnit.__members__) + + # GIVEN an invalid EMF unit value + unit = "Count" + + # WHEN try to extract this value + extracted_unit_value = extract_cloudwatch_metric_unit_value(metric_units, metric_unit_valid_options, unit=unit) + + # THEN value must be extracted + assert extracted_unit_value == unit + + +def test_serialize_datadog_tags(): + # GIVEN a dict + tags = {"project": "powertools", "language": "python310"} + default_tags = {"project": "powertools-for-lambda", "language": "python311"} + + # WHEN we serialize tags + tags_output = serialize_datadog_tags(tags, None) + + # WHEN we serialize default tags + default_tags_output = serialize_datadog_tags(None, default_tags) + + # THEN output must be a list + assert tags_output == ["project:powertools", "language:python310"] + assert default_tags_output == ["project:powertools-for-lambda", "language:python311"] + + +def test_invalid_datadog_metric_name(): + # GIVEN three metrics names with different invalid names + metric_1 = "1_metric" # Metric name must not start with number + metric_2 = "metric_ç" # Metric name must not contains unicode characters + metric_3 = "".join(["x" for _ in range(201)]) # Metric name must have less than 200 characters + + # WHEN we try to validate those metrics names + # THEN must be False + assert validate_datadog_metric_name(metric_1) is False + assert validate_datadog_metric_name(metric_2) is False + assert validate_datadog_metric_name(metric_3) is False + + +def test_valid_datadog_metric_name(): + # GIVEN a metric with a valid name + metric = "metric_powertools" # Metric name must not start with number + + # WHEN we try to validate those metrics names + # THEN must be True + assert validate_datadog_metric_name(metric) is True From a1c375459526837ed3f9d93973bd72ccee8ced10 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Fri, 11 Aug 2023 15:28:32 +0100 Subject: [PATCH 13/37] Fix documentantion and add python3.11 as default runtime --- docs/core/metrics.md | 2 +- docs/core/metrics/index.md | 7 +++++++ docs/core/metrics_datadog.md | 4 ++-- docs/index.md | 2 +- .../batch_processing/sam/dynamodb_batch_processing.yaml | 2 +- .../batch_processing/sam/kinesis_batch_processing.yaml | 2 +- examples/batch_processing/sam/sqs_batch_processing.yaml | 2 +- examples/idempotency/templates/sam.yaml | 2 +- examples/logger/sam/template.yaml | 2 +- examples/metrics/sam/template.yaml | 2 +- examples/metrics_datadog/sam/template.yaml | 2 +- examples/tracer/sam/template.yaml | 2 +- mkdocs.yml | 5 +++-- 13 files changed, 22 insertions(+), 14 deletions(-) create mode 100644 docs/core/metrics/index.md diff --git a/docs/core/metrics.md b/docs/core/metrics.md index e3a0676ff67..138ac8fdb62 100644 --- a/docs/core/metrics.md +++ b/docs/core/metrics.md @@ -261,7 +261,7 @@ By default it will skip all previously defined dimensions including default dime ### Flushing metrics manually -If you are using the AWS Lambda Web Adapter project, or a middleware with custom metric logic, you can use `flush_metrics()`. This method will serialize, print metrics available to standard output, and clear in-memory metrics data. +If you are using the [AWS Lambda Web Adapter](https://github.com/awslabs/aws-lambda-web-adapter){target="_blank"} project, or a middleware with custom metric logic, you can use `flush_metrics()`. This method will serialize, print metrics available to standard output, and clear in-memory metrics data. ???+ warning This does not capture Cold Start metrics, and metric data validation still applies. diff --git a/docs/core/metrics/index.md b/docs/core/metrics/index.md new file mode 100644 index 00000000000..051e9dd6e51 --- /dev/null +++ b/docs/core/metrics/index.md @@ -0,0 +1,7 @@ +--- +title: Metrics +description: Core utility +--- + + +--8<-- "docs/core/metrics.md" diff --git a/docs/core/metrics_datadog.md b/docs/core/metrics_datadog.md index 94e44f99cf9..fdd0fb1d87b 100644 --- a/docs/core/metrics_datadog.md +++ b/docs/core/metrics_datadog.md @@ -36,7 +36,7 @@ Experiment to use your application or main service as the metric namespace to ea ### Install ???+ note - If you are using Datadog Forwarder, you can skip this step step. + If you are using Datadog Forwarder, you can skip this step. To adhere to Lambda best practices and effectively minimize the size of your development package, we recommend using the official Datadog layers built specifically for the SDK and extension components. Below is the template that demonstrates how to configure a SAM template with this information. @@ -178,7 +178,7 @@ The following environment variable is available to configure Metrics at a global ### Flushing metrics manually -If you are using the AWS Lambda Web Adapter project, or a middleware with custom metric logic, you can use `flush_metrics()`. This method will serialize, print metrics available to standard output, and clear in-memory metrics data. +If you are using the [AWS Lambda Web Adapter](https://github.com/awslabs/aws-lambda-web-adapter){target="_blank"} project, or a middleware with custom metric logic, you can use `flush_metrics()`. This method will serialize, print metrics available to standard output, and clear in-memory metrics data. ???+ warning This does not capture Cold Start metrics, and metric data validation still applies. diff --git a/docs/index.md b/docs/index.md index 54a0f2c58ad..4ea82dd127c 100644 --- a/docs/index.md +++ b/docs/index.md @@ -673,7 +673,7 @@ Compared with the [public Layer ARN](#lambda-layer) option, SAR allows you to ch ## Quick getting started ```bash title="Hello world example using SAM CLI" -sam init --app-template hello-world-powertools-python --name sam-app --package-type Zip --runtime python3.10 --no-tracing +sam init --app-template hello-world-powertools-python --name sam-app --package-type Zip --runtime python3.11 --no-tracing ``` ## Features diff --git a/examples/batch_processing/sam/dynamodb_batch_processing.yaml b/examples/batch_processing/sam/dynamodb_batch_processing.yaml index 2ed70d65a86..4e436c083e5 100644 --- a/examples/batch_processing/sam/dynamodb_batch_processing.yaml +++ b/examples/batch_processing/sam/dynamodb_batch_processing.yaml @@ -6,7 +6,7 @@ Globals: Function: Timeout: 5 MemorySize: 256 - Runtime: python3.10 + Runtime: python3.11 Tracing: Active Environment: Variables: diff --git a/examples/batch_processing/sam/kinesis_batch_processing.yaml b/examples/batch_processing/sam/kinesis_batch_processing.yaml index 314d4f8c98f..6c80bd2f333 100644 --- a/examples/batch_processing/sam/kinesis_batch_processing.yaml +++ b/examples/batch_processing/sam/kinesis_batch_processing.yaml @@ -6,7 +6,7 @@ Globals: Function: Timeout: 5 MemorySize: 256 - Runtime: python3.10 + Runtime: python3.11 Tracing: Active Environment: Variables: diff --git a/examples/batch_processing/sam/sqs_batch_processing.yaml b/examples/batch_processing/sam/sqs_batch_processing.yaml index 77871c3478b..2dd827107d4 100644 --- a/examples/batch_processing/sam/sqs_batch_processing.yaml +++ b/examples/batch_processing/sam/sqs_batch_processing.yaml @@ -6,7 +6,7 @@ Globals: Function: Timeout: 5 MemorySize: 256 - Runtime: python3.10 + Runtime: python3.11 Tracing: Active Environment: Variables: diff --git a/examples/idempotency/templates/sam.yaml b/examples/idempotency/templates/sam.yaml index 8443a0914d7..7c2f65a6a4d 100644 --- a/examples/idempotency/templates/sam.yaml +++ b/examples/idempotency/templates/sam.yaml @@ -17,7 +17,7 @@ Resources: HelloWorldFunction: Type: AWS::Serverless::Function Properties: - Runtime: python3.10 + Runtime: python3.11 Handler: app.py Policies: - Statement: diff --git a/examples/logger/sam/template.yaml b/examples/logger/sam/template.yaml index a72b96f32e2..ddaa2f16407 100644 --- a/examples/logger/sam/template.yaml +++ b/examples/logger/sam/template.yaml @@ -5,7 +5,7 @@ Description: Powertools for AWS Lambda (Python) version Globals: Function: Timeout: 5 - Runtime: python3.10 + Runtime: python3.11 Tracing: Active Environment: Variables: diff --git a/examples/metrics/sam/template.yaml b/examples/metrics/sam/template.yaml index 50a2964bc4b..ace4c71f2e1 100644 --- a/examples/metrics/sam/template.yaml +++ b/examples/metrics/sam/template.yaml @@ -5,7 +5,7 @@ Description: Powertools for AWS Lambda (Python) version Globals: Function: Timeout: 5 - Runtime: python3.10 + Runtime: python3.11 Tracing: Active Environment: Variables: diff --git a/examples/metrics_datadog/sam/template.yaml b/examples/metrics_datadog/sam/template.yaml index ebfc3ed2e36..a22ff131d55 100644 --- a/examples/metrics_datadog/sam/template.yaml +++ b/examples/metrics_datadog/sam/template.yaml @@ -5,7 +5,7 @@ Description: Powertools for AWS Lambda (Python) version Globals: Function: Timeout: 5 - Runtime: python3.10 + Runtime: python3.11 Tracing: Active Environment: Variables: diff --git a/examples/tracer/sam/template.yaml b/examples/tracer/sam/template.yaml index 3eb6ef0acd0..d9e7d8a29da 100644 --- a/examples/tracer/sam/template.yaml +++ b/examples/tracer/sam/template.yaml @@ -5,7 +5,7 @@ Description: Powertools for AWS Lambda (Python) version Globals: Function: Timeout: 5 - Runtime: python3.10 + Runtime: python3.11 Tracing: Active Environment: Variables: diff --git a/mkdocs.yml b/mkdocs.yml index 8c4667edd58..77da0221ad9 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -16,8 +16,9 @@ nav: - core/tracer.md - core/logger.md - Metrics: - - core/metrics.md - - core/metrics_datadog.md + - core/metrics/index.md + - Amazon CloudWatch EMF: core/metrics.md + - Datadog: core/metrics_datadog.md - Event Handler: - core/event_handler/api_gateway.md - core/event_handler/appsync.md From 228001c15c5a8695908db76e9a25197616f06e66 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Fri, 11 Aug 2023 16:39:23 +0100 Subject: [PATCH 14/37] Fix documentantion --- docs/core/metrics_datadog.md | 25 +++++++++++++++++-- examples/metrics_datadog/sam/template.yaml | 11 ++++++-- .../src/log_metrics_standard_output.json | 8 ++++++ 3 files changed, 40 insertions(+), 4 deletions(-) create mode 100644 examples/metrics_datadog/src/log_metrics_standard_output.json diff --git a/docs/core/metrics_datadog.md b/docs/core/metrics_datadog.md index fdd0fb1d87b..8bb71c5b869 100644 --- a/docs/core/metrics_datadog.md +++ b/docs/core/metrics_datadog.md @@ -3,9 +3,22 @@ title: Datadog description: Core utility --- -Datadog provider creates custom metrics by flushing metrics to standard output and exporting metrics using [Datadog Forwarder](https://docs.datadoghq.com/logs/guide/forwarder/?tab=cloudformation){target="_blank" rel="nofollow"} or flushing metrics to [Datadog extension](https://docs.datadoghq.com/serverless/installation/python/?tab=datadogcli){target="_blank" rel="nofollow"} using Datadog SDK. +Datadog provider creates custom metrics by flushing metrics to [Datadog extension](https://docs.datadoghq.com/serverless/installation/python/?tab=datadogcli){target="_blank" rel="nofollow"} using Datadog SDK. Alternatively you can flush metrics to standard output and exporting metrics using [Datadog Forwarder](https://docs.datadoghq.com/logs/guide/forwarder/?tab=cloudformation){target="_blank" rel="nofollow"} +```mermaid +stateDiagram-v2 + direction LR + LambdaCode: Lambda code with Powertools + DatadogSDK: Datadog SDK + DatadogExtension: Datadog Extension async + Datadog: Datadog Dashboard + + LambdaCode --> DatadogSDK + DatadogSDK --> DatadogExtension + DatadogExtension --> Datadog +``` + These metrics can be visualized through [Datadog console](https://app.datadoghq.com/metric/explore){target="_blank" rel="nofollow"}. ## Key features @@ -78,6 +91,8 @@ Datadog offers the flexibility to configure tags per metric. To provider a bette --8<-- "examples/metrics_datadog/src/add_metrics_with_tags.py" ``` +We recommend [read](https://docs.datadoghq.com/getting_started/tagging/){target="_blank" rel="nofollow"} the official Datadog documentation for comprehensive insights into the best practices for effectively utilizing tags. + ### Adding default tags If you want to set the same tags for all metrics, you can use the `set_default_tags` method or the `default_tags` parameter in the `log_metrics` decorator and then persist tags across the Lambda invocations. @@ -99,7 +114,7 @@ If you'd like to remove them at some point, you can use `clear_default_tags` met --8<-- "examples/metrics_datadog/src/set_default_tags_log_metrics.py" ``` -### Flushing metrics to standard output +### Exporting to Datadog Log Forwarder You have the option to flush metrics to the standard output for exporting, which can then be seamlessly processed through the [Datadog Forwarder](https://docs.datadoghq.com/logs/guide/forwarder/?tab=cloudformation){target="_blank" rel="nofollow"}. @@ -109,6 +124,12 @@ You have the option to flush metrics to the standard output for exporting, which --8<-- "examples/metrics_datadog/src/flush_metrics_to_standard_output.py" ``` +=== "log_metrics_standard_output.json" + + ```json hl_lines="2 6 7" + --8<-- "examples/metrics_datadog/src/log_metrics_standard_output.json" + ``` + ### Flushing metrics As you finish adding all your metrics, you need to serialize and flush them to standard output. You can do that automatically with the `log_metrics` decorator. diff --git a/examples/metrics_datadog/sam/template.yaml b/examples/metrics_datadog/sam/template.yaml index a22ff131d55..f1d22b80f11 100644 --- a/examples/metrics_datadog/sam/template.yaml +++ b/examples/metrics_datadog/sam/template.yaml @@ -10,6 +10,9 @@ Globals: Environment: Variables: POWERTOOLS_METRICS_NAMESPACE: ServerlessAirline + # Configuring the API_KEY and DD_SITE can be achieved through various methods. + # An functional approach involves utilizing these two environment variables. + # Further details can be found at: https://docs.datadoghq.com/serverless/installation/python/?tab=datadogcli DD_API_KEY: "" DD_SITE: datadoghq.com @@ -18,9 +21,13 @@ Globals: # https://docs.powertools.aws.dev/lambda/python/latest/#lambda-layer - !Sub arn:aws:lambda:${AWS::Region}:017000801446:layer:AWSLambdaPowertoolsPythonV2:40 # Find the latest Layer version in the Datadog official documentation - # See https://github.com/DataDog/datadog-lambda-python/releases + + # You can use the Datadog layer to bring the Datadog SDK + # Further details can be found at: https://github.com/DataDog/datadog-lambda-python/releases - !Sub arn:aws:lambda:${AWS::Region}:464622532012:layer:Datadog-Python310:78 - # See https://github.com/DataDog/datadog-lambda-extension/releases + + # You can use the Datadog extension to export logs directly to Datadog + # Further details can be found at: https://github.com/DataDog/datadog-lambda-extension/releases - !Sub arn:aws:lambda:${AWS::Region}:464622532012:layer:Datadog-Extension:45 Resources: diff --git a/examples/metrics_datadog/src/log_metrics_standard_output.json b/examples/metrics_datadog/src/log_metrics_standard_output.json new file mode 100644 index 00000000000..35fcb8a096a --- /dev/null +++ b/examples/metrics_datadog/src/log_metrics_standard_output.json @@ -0,0 +1,8 @@ +{ + "m":"SuccessfulBooking", + "v":1, + "e":1691768022, + "t":[ + + ] +} From 4bdf4ff8f6ef79b4bfa8acfa10a71ab03cf8dea8 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Mon, 14 Aug 2023 12:53:27 +0100 Subject: [PATCH 15/37] Moving internal functions to Provider class --- aws_lambda_powertools/metrics/functions.py | 64 +---------------- .../metrics/provider/datadog/datadog.py | 69 ++++++++++++++++++- tests/unit/metrics/test_functions.py | 40 ----------- 3 files changed, 67 insertions(+), 106 deletions(-) diff --git a/aws_lambda_powertools/metrics/functions.py b/aws_lambda_powertools/metrics/functions.py index ebf40ab919a..d951c0749a3 100644 --- a/aws_lambda_powertools/metrics/functions.py +++ b/aws_lambda_powertools/metrics/functions.py @@ -1,7 +1,6 @@ from __future__ import annotations -import re -from typing import Any, Dict, List +from typing import List from aws_lambda_powertools.metrics.provider.cloudwatch_emf.exceptions import ( MetricResolutionError, @@ -71,64 +70,3 @@ def extract_cloudwatch_metric_unit_value(metric_units: List, metric_valid_option unit = unit.value return unit - - -def serialize_datadog_tags(metric_tags: Dict[str, Any], default_tags: Dict[str, Any]) -> List[str]: - """ - Serialize metric tags into a list of formatted strings for Datadog integration. - - This function takes a dictionary of metric-specific tags or default tags. - It parse these tags and converts them into a list of strings in the format "tag_key:tag_value". - - Parameters - ---------- - metric_tags: Dict[str, Any] - A dictionary containing metric-specific tags. - default_tags: Dict[str, Any] - A dictionary containing default tags applicable to all metrics. - - Returns: - ------- - List[str] - A list of formatted tag strings, each in the "tag_key:tag_value" format. - - Example: - >>> metric_tags = {'environment': 'production', 'service': 'web'} - >>> serialize_datadog_tags(metric_tags, None) - ['environment:production', 'service:web'] - """ - tags = metric_tags or default_tags - - return [f"{tag_key}:{tag_value}" for tag_key, tag_value in tags.items()] - - -def validate_datadog_metric_name(metric_name: str) -> bool: - """ - Validate a metric name according to specific requirements. - - Metric names must start with a letter. - Metric names must only contain ASCII alphanumerics, underscores, and periods. - Other characters, including spaces, are converted to underscores. - Unicode is not supported. - Metric names must not exceed 200 characters. Fewer than 100 is preferred from a UI perspective. - - More information here: https://docs.datadoghq.com/metrics/custom_metrics/#naming-custom-metrics - - Parameters: - ---------- - metric_name: str - The metric name to be validated. - - Returns: - ------- - bool - True if the metric name is valid, False otherwise. - """ - - # Check if the metric name starts with a letter - # Check if the metric name contains more than 200 characters - # Check if the resulting metric name only contains ASCII alphanumerics, underscores, and periods - if not metric_name[0].isalpha() or len(metric_name) > 200 or not re.match(r"^[a-zA-Z0-9_.]+$", metric_name): - return False - - return True diff --git a/aws_lambda_powertools/metrics/provider/datadog/datadog.py b/aws_lambda_powertools/metrics/provider/datadog/datadog.py index e3d47c35b8b..e06c92abea3 100644 --- a/aws_lambda_powertools/metrics/provider/datadog/datadog.py +++ b/aws_lambda_powertools/metrics/provider/datadog/datadog.py @@ -4,17 +4,19 @@ import logging import numbers import os +import re import time import warnings from typing import Any, Callable, Dict, List, Optional from aws_lambda_powertools.metrics.exceptions import MetricValueError, SchemaValidationError -from aws_lambda_powertools.metrics.functions import serialize_datadog_tags, validate_datadog_metric_name from aws_lambda_powertools.metrics.provider import BaseProvider from aws_lambda_powertools.shared import constants from aws_lambda_powertools.shared.functions import resolve_env_var_choice from aws_lambda_powertools.utilities.typing import LambdaContext +METRIC_NAME_REGEX = re.compile(r"^[a-zA-Z0-9_.]+$") + logger = logging.getLogger(__name__) # Check if using datadog layer @@ -99,7 +101,7 @@ def add_metric( """ # validating metric name - if not validate_datadog_metric_name(name): + if not self._validate_datadog_metric_name(name): docs = "https://docs.datadoghq.com/metrics/custom_metrics/#naming-custom-metrics" raise SchemaValidationError( f"Invalid metric name. Please ensure the metric {name} follows the requirements. \n" @@ -158,7 +160,7 @@ def serialize_metric_set(self, metrics: List | None = None) -> List: "m": metric_name, "v": single_metric["v"], "e": single_metric["e"], - "t": serialize_datadog_tags(metric_tags=single_metric["t"], default_tags=self.default_tags), + "t": self._serialize_datadog_tags(metric_tags=single_metric["t"], default_tags=self.default_tags), }, ) @@ -293,3 +295,64 @@ def lambda_handler(): return True """ self.default_tags.update(**tags) + + @staticmethod + def _serialize_datadog_tags(metric_tags: Dict[str, Any], default_tags: Dict[str, Any]) -> List[str]: + """ + Serialize metric tags into a list of formatted strings for Datadog integration. + + This function takes a dictionary of metric-specific tags or default tags. + It parse these tags and converts them into a list of strings in the format "tag_key:tag_value". + + Parameters + ---------- + metric_tags: Dict[str, Any] + A dictionary containing metric-specific tags. + default_tags: Dict[str, Any] + A dictionary containing default tags applicable to all metrics. + + Returns: + ------- + List[str] + A list of formatted tag strings, each in the "tag_key:tag_value" format. + + Example: + >>> metric_tags = {'environment': 'production', 'service': 'web'} + >>> serialize_datadog_tags(metric_tags, None) + ['environment:production', 'service:web'] + """ + tags = metric_tags or default_tags + + return [f"{tag_key}:{tag_value}" for tag_key, tag_value in tags.items()] + + @staticmethod + def _validate_datadog_metric_name(metric_name: str) -> bool: + """ + Validate a metric name according to specific requirements. + + Metric names must start with a letter. + Metric names must only contain ASCII alphanumerics, underscores, and periods. + Other characters, including spaces, are converted to underscores. + Unicode is not supported. + Metric names must not exceed 200 characters. Fewer than 100 is preferred from a UI perspective. + + More information here: https://docs.datadoghq.com/metrics/custom_metrics/#naming-custom-metrics + + Parameters: + ---------- + metric_name: str + The metric name to be validated. + + Returns: + ------- + bool + True if the metric name is valid, False otherwise. + """ + + # Check if the metric name starts with a letter + # Check if the metric name contains more than 200 characters + # Check if the resulting metric name only contains ASCII alphanumerics, underscores, and periods + if not metric_name[0].isalpha() or len(metric_name) > 200 or not METRIC_NAME_REGEX.match(metric_name): + return False + + return True diff --git a/tests/unit/metrics/test_functions.py b/tests/unit/metrics/test_functions.py index 7d8df43893b..f3414720bba 100644 --- a/tests/unit/metrics/test_functions.py +++ b/tests/unit/metrics/test_functions.py @@ -3,8 +3,6 @@ from aws_lambda_powertools.metrics.functions import ( extract_cloudwatch_metric_resolution_value, extract_cloudwatch_metric_unit_value, - serialize_datadog_tags, - validate_datadog_metric_name, ) from aws_lambda_powertools.metrics.provider.cloudwatch_emf.exceptions import ( MetricResolutionError, @@ -63,41 +61,3 @@ def test_extract_valid_cloudwatch_metric_unit_value(): # THEN value must be extracted assert extracted_unit_value == unit - - -def test_serialize_datadog_tags(): - # GIVEN a dict - tags = {"project": "powertools", "language": "python310"} - default_tags = {"project": "powertools-for-lambda", "language": "python311"} - - # WHEN we serialize tags - tags_output = serialize_datadog_tags(tags, None) - - # WHEN we serialize default tags - default_tags_output = serialize_datadog_tags(None, default_tags) - - # THEN output must be a list - assert tags_output == ["project:powertools", "language:python310"] - assert default_tags_output == ["project:powertools-for-lambda", "language:python311"] - - -def test_invalid_datadog_metric_name(): - # GIVEN three metrics names with different invalid names - metric_1 = "1_metric" # Metric name must not start with number - metric_2 = "metric_ç" # Metric name must not contains unicode characters - metric_3 = "".join(["x" for _ in range(201)]) # Metric name must have less than 200 characters - - # WHEN we try to validate those metrics names - # THEN must be False - assert validate_datadog_metric_name(metric_1) is False - assert validate_datadog_metric_name(metric_2) is False - assert validate_datadog_metric_name(metric_3) is False - - -def test_valid_datadog_metric_name(): - # GIVEN a metric with a valid name - metric = "metric_powertools" # Metric name must not start with number - - # WHEN we try to validate those metrics names - # THEN must be True - assert validate_datadog_metric_name(metric) is True From d8b84de4d77014939d5fc95cf4e78e307fa91b83 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Mon, 14 Aug 2023 13:07:48 +0100 Subject: [PATCH 16/37] Adding more information about partners --- docs/core/metrics_datadog.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/docs/core/metrics_datadog.md b/docs/core/metrics_datadog.md index 8bb71c5b869..329b0653c32 100644 --- a/docs/core/metrics_datadog.md +++ b/docs/core/metrics_datadog.md @@ -2,6 +2,9 @@ title: Datadog description: Core utility --- + +In this context, a metric provider is an [AWS Lambda Partner](https://go.aws/3HtU6CZ){target="_blank" rel="nofollow"} that provides an integration via SDK where Powertools for AWS Lambda (Python) can create a wrapper around this one. If you are an AWS Lambda partner and would like to add support in Powertools for AWS Lambda (Python), open an [issue](https://github.com/aws-powertools/powertools-lambda-python/issues/new?assignees=&labels=feature-request%2Ctriage&projects=&template=feature_request.yml&title=Feature+request%3A+TITLE){target="_blank"}. + Datadog provider creates custom metrics by flushing metrics to [Datadog extension](https://docs.datadoghq.com/serverless/installation/python/?tab=datadogcli){target="_blank" rel="nofollow"} using Datadog SDK. Alternatively you can flush metrics to standard output and exporting metrics using [Datadog Forwarder](https://docs.datadoghq.com/logs/guide/forwarder/?tab=cloudformation){target="_blank" rel="nofollow"} @@ -9,9 +12,9 @@ Datadog provider creates custom metrics by flushing metrics to [Datadog extensio ```mermaid stateDiagram-v2 direction LR - LambdaCode: Lambda code with Powertools + LambdaCode: Lambda code with Powertools for AWS Lambda DatadogSDK: Datadog SDK - DatadogExtension: Datadog Extension async + DatadogExtension: Datadog Lambda Extension Datadog: Datadog Dashboard LambdaCode --> DatadogSDK From 724a2e7f997ae9cb30a7739c3814e0b8eef6a45b Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Mon, 14 Aug 2023 14:40:16 +0200 Subject: [PATCH 17/37] docs(config): collapse by default given nav size --- mkdocs.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/mkdocs.yml b/mkdocs.yml index 77da0221ad9..72ab2d80cba 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -60,7 +60,6 @@ theme: features: - header.autohide - navigation.sections - - navigation.expand - navigation.top - navigation.instant - navigation.indexes From 5e4593c62d69bba98983a0acfaa9f0e38dfa536a Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Mon, 14 Aug 2023 15:00:27 +0200 Subject: [PATCH 18/37] docs: fix yaml frontmatter issue Signed-off-by: heitorlessa --- .markdownlintignore | 1 + docs/core/metrics/index.md | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 .markdownlintignore diff --git a/.markdownlintignore b/.markdownlintignore new file mode 100644 index 00000000000..e477d879941 --- /dev/null +++ b/.markdownlintignore @@ -0,0 +1 @@ +docs/core/metrics/index.md diff --git a/docs/core/metrics/index.md b/docs/core/metrics/index.md index 051e9dd6e51..359ce28eb33 100644 --- a/docs/core/metrics/index.md +++ b/docs/core/metrics/index.md @@ -2,6 +2,5 @@ title: Metrics description: Core utility --- - --8<-- "docs/core/metrics.md" From 5e95e5676b47315d5e10bd3f81c5fd8c00f2ccf3 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Mon, 14 Aug 2023 15:30:08 +0200 Subject: [PATCH 19/37] docs: auto-include abbreviations Signed-off-by: heitorlessa --- .markdownlintignore | 1 + docs/core/metrics.md | 14 +++++++------- includes/abbreviations.md | 1 + mkdocs.yml | 3 +++ 4 files changed, 12 insertions(+), 7 deletions(-) create mode 100644 includes/abbreviations.md diff --git a/.markdownlintignore b/.markdownlintignore index e477d879941..11b6d7ffe29 100644 --- a/.markdownlintignore +++ b/.markdownlintignore @@ -1 +1,2 @@ docs/core/metrics/index.md +includes/abbreviations.md diff --git a/docs/core/metrics.md b/docs/core/metrics.md index 138ac8fdb62..af1fce68e34 100644 --- a/docs/core/metrics.md +++ b/docs/core/metrics.md @@ -1,5 +1,5 @@ --- -title: CloudWatch EMF +title: Amazon CloudWatch EMF Metrics description: Core utility --- @@ -197,9 +197,9 @@ This has the advantage of keeping cold start metric separate from your applicati The following environment variable is available to configure Metrics at a global scope: -| Setting | Description | Environment variable | Default | -|--------------------|------------------------------------------------------------------------------|-----------------------------------------|---------| -| **Namespace Name** | Sets namespace used for metrics. | `POWERTOOLS_METRICS_NAMESPACE` | `None` | +| Setting | Description | Environment variable | Default | +| ------------------ | -------------------------------- | ------------------------------ | ------- | +| **Namespace Name** | Sets namespace used for metrics. | `POWERTOOLS_METRICS_NAMESPACE` | `None` | `POWERTOOLS_METRICS_NAMESPACE` is also available on a per-instance basis with the `namespace` parameter, which will consequently override the environment variable value. @@ -286,9 +286,9 @@ You can use `EphemeralMetrics` class when looking to isolate multiple instances `EphemeralMetrics` has only one difference while keeping nearly the exact same set of features: -| Feature | Metrics | EphemeralMetrics | -| ----------------------------------------------------------------------------------------------------------- | ------- | ---------------- | -| **Share data across instances** (metrics, dimensions, metadata, etc.) | Yes | - | +| Feature | Metrics | EphemeralMetrics | +| --------------------------------------------------------------------- | ------- | ---------------- | +| **Share data across instances** (metrics, dimensions, metadata, etc.) | Yes | - | !!! question "Why not changing the default `Metrics` behaviour to not share data across instances?" diff --git a/includes/abbreviations.md b/includes/abbreviations.md new file mode 100644 index 00000000000..ed52b93fe64 --- /dev/null +++ b/includes/abbreviations.md @@ -0,0 +1 @@ +*[observability provider]: An AWS Lambda Observability Partner diff --git a/mkdocs.yml b/mkdocs.yml index 72ab2d80cba..55d02e1f008 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -75,6 +75,7 @@ theme: markdown_extensions: - admonition + - abbr - pymdownx.tabbed: alternate_style: true - pymdownx.highlight: @@ -84,6 +85,8 @@ markdown_extensions: base_path: "." check_paths: true restrict_base_path: false + auto_append: + - includes/abbreviations.md - meta - toc: permalink: true From 6e841c7f7c135968d6c79ac59862a5a37f6253bd Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Mon, 14 Aug 2023 16:06:30 +0200 Subject: [PATCH 20/37] docs(nav): move datadog to its own nav --- .../datadog.md} | 33 +++++++++++-------- mkdocs.yml | 2 +- 2 files changed, 21 insertions(+), 14 deletions(-) rename docs/core/{metrics_datadog.md => metrics/datadog.md} (88%) diff --git a/docs/core/metrics_datadog.md b/docs/core/metrics/datadog.md similarity index 88% rename from docs/core/metrics_datadog.md rename to docs/core/metrics/datadog.md index 329b0653c32..4666bbf14c0 100644 --- a/docs/core/metrics_datadog.md +++ b/docs/core/metrics/datadog.md @@ -1,27 +1,34 @@ --- title: Datadog -description: Core utility +description: Metrics provider --- -In this context, a metric provider is an [AWS Lambda Partner](https://go.aws/3HtU6CZ){target="_blank" rel="nofollow"} that provides an integration via SDK where Powertools for AWS Lambda (Python) can create a wrapper around this one. If you are an AWS Lambda partner and would like to add support in Powertools for AWS Lambda (Python), open an [issue](https://github.com/aws-powertools/powertools-lambda-python/issues/new?assignees=&labels=feature-request%2Ctriage&projects=&template=feature_request.yml&title=Feature+request%3A+TITLE){target="_blank"}. - -Datadog provider creates custom metrics by flushing metrics to [Datadog extension](https://docs.datadoghq.com/serverless/installation/python/?tab=datadogcli){target="_blank" rel="nofollow"} using Datadog SDK. Alternatively you can flush metrics to standard output and exporting metrics using [Datadog Forwarder](https://docs.datadoghq.com/logs/guide/forwarder/?tab=cloudformation){target="_blank" rel="nofollow"} +This observability provider creates custom metrics by flushing metrics to [Datadog Lambda extension](https://docs.datadoghq.com/serverless/installation/python/?tab=datadogcli){target="_blank" rel="nofollow"}, or to standard output via [Datadog Forwarder](https://docs.datadoghq.com/logs/guide/forwarder/?tab=cloudformation){target="_blank" rel="nofollow"}. ```mermaid stateDiagram-v2 direction LR - LambdaCode: Lambda code with Powertools for AWS Lambda + LambdaFn: Your Lambda function + LambdaCode: DatadogMetrics DatadogSDK: Datadog SDK DatadogExtension: Datadog Lambda Extension Datadog: Datadog Dashboard + LambdaExtension: Lambda Extension + LambdaFn --> LambdaCode LambdaCode --> DatadogSDK DatadogSDK --> DatadogExtension - DatadogExtension --> Datadog + + state LambdaExtension { + DatadogExtension --> Datadog: async + } + ``` +In this context, a metric provider is an [AWS Lambda Partner](https://go.aws/3HtU6CZ){target="_blank" rel="nofollow"} that provides an integration via SDK where Powertools for AWS Lambda (Python) can create a wrapper around this one. If you are an AWS Lambda partner and would like to add support in Powertools for AWS Lambda (Python), open an [issue](https://github.com/aws-powertools/powertools-lambda-python/issues/new?assignees=&labels=feature-request%2Ctriage&projects=&template=feature_request.yml&title=Feature+request%3A+TITLE){target="_blank"}. + These metrics can be visualized through [Datadog console](https://app.datadoghq.com/metric/explore){target="_blank" rel="nofollow"}. ## Key features @@ -42,10 +49,10 @@ If you're new to Datadog custom metrics, we suggest you read the Datadog [offici Datadog provider has two global settings that will be used across all metrics emitted: -| Setting | Description | Environment variable | Constructor parameter | -| -------------------- | ------------------------------------------------------------------------------- | ------------------------------ | --------------------- | -| **Metric namespace** | Logical container where all metrics will be placed e.g. `ServerlessAirline` | `POWERTOOLS_METRICS_NAMESPACE` | `namespace` | -| **Flush to log** | Use this when you want to flush metrics to be exported through Datadog Forwarder| `DD_FLUSH_TO_LOG` | `flush_to_log` | +| Setting | Description | Environment variable | Constructor parameter | +| -------------------- | -------------------------------------------------------------------------------- | ------------------------------ | --------------------- | +| **Metric namespace** | Logical container where all metrics will be placed e.g. `ServerlessAirline` | `POWERTOOLS_METRICS_NAMESPACE` | `namespace` | +| **Flush to log** | Use this when you want to flush metrics to be exported through Datadog Forwarder | `DD_FLUSH_TO_LOG` | `flush_to_log` | Experiment to use your application or main service as the metric namespace to easily group all metrics. @@ -192,9 +199,9 @@ This has the advantage of keeping cold start metric separate from your applicati The following environment variable is available to configure Metrics at a global scope: -| Setting | Description | Environment variable | Default | -|--------------------|------------------------------------------------------------------------------|-----------------------------------------|---------| -| **Namespace Name** | Sets namespace used for metrics. | `POWERTOOLS_METRICS_NAMESPACE` | `None` | +| Setting | Description | Environment variable | Default | +| ------------------ | -------------------------------- | ------------------------------ | ------- | +| **Namespace Name** | Sets namespace used for metrics. | `POWERTOOLS_METRICS_NAMESPACE` | `None` | `POWERTOOLS_METRICS_NAMESPACE` is also available on a per-instance basis with the `namespace` parameter, which will consequently override the environment variable value. diff --git a/mkdocs.yml b/mkdocs.yml index 55d02e1f008..1b9f4545239 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -18,7 +18,7 @@ nav: - Metrics: - core/metrics/index.md - Amazon CloudWatch EMF: core/metrics.md - - Datadog: core/metrics_datadog.md + - Datadog: core/metrics/datadog.md - Event Handler: - core/event_handler/api_gateway.md - core/event_handler/appsync.md From 0467e509d3f1847fc74c3f9f4365d2e3d0924884 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Mon, 14 Aug 2023 16:29:54 +0200 Subject: [PATCH 21/37] docs(datadog): provide terminologies; feat cleanup Signed-off-by: heitorlessa --- docs/core/metrics/datadog.md | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/docs/core/metrics/datadog.md b/docs/core/metrics/datadog.md index 4666bbf14c0..24914e4c18e 100644 --- a/docs/core/metrics/datadog.md +++ b/docs/core/metrics/datadog.md @@ -4,7 +4,7 @@ description: Metrics provider --- -This observability provider creates custom metrics by flushing metrics to [Datadog Lambda extension](https://docs.datadoghq.com/serverless/installation/python/?tab=datadogcli){target="_blank" rel="nofollow"}, or to standard output via [Datadog Forwarder](https://docs.datadoghq.com/logs/guide/forwarder/?tab=cloudformation){target="_blank" rel="nofollow"}. +This observability provider creates custom metrics by flushing metrics to [Datadog Lambda extension](https://docs.datadoghq.com/serverless/installation/python/?tab=datadogcli){target="_blank" rel="nofollow"}, or to standard output via [Datadog Forwarder](https://docs.datadoghq.com/logs/guide/forwarder/?tab=cloudformation){target="_blank" rel="nofollow"}. These metrics can be visualized in the [Datadog console](https://app.datadoghq.com/metric/explore){target="_blank" rel="nofollow"}. ```mermaid @@ -27,20 +27,21 @@ stateDiagram-v2 ``` -In this context, a metric provider is an [AWS Lambda Partner](https://go.aws/3HtU6CZ){target="_blank" rel="nofollow"} that provides an integration via SDK where Powertools for AWS Lambda (Python) can create a wrapper around this one. If you are an AWS Lambda partner and would like to add support in Powertools for AWS Lambda (Python), open an [issue](https://github.com/aws-powertools/powertools-lambda-python/issues/new?assignees=&labels=feature-request%2Ctriage&projects=&template=feature_request.yml&title=Feature+request%3A+TITLE){target="_blank"}. - -These metrics can be visualized through [Datadog console](https://app.datadoghq.com/metric/explore){target="_blank" rel="nofollow"}. - ## Key features -* Flush metrics to standard output -* Flush metrics to Datadog extension -* Validate against common metric definitions mistakes (values) -* Support to add default tags to all created metrics +* Flush metrics to Datadog extension or standard output +* Validate against common metric definitions mistakes +* Support to add default tags ## Terminologies -If you're new to Datadog custom metrics, we suggest you read the Datadog [official documentation](https://docs.datadoghq.com/metrics/custom_metrics/){target="_blank" rel="nofollow"} for custom metrics. +If you're new to Datadog Metrics, there are three terminologies you must be aware of before using this utility: + +* **Namespace**. It's the highest level container that will group multiple metrics from multiple services for a given application, for example `ServerlessEcommerce`. +* **Metric**. It's the name of the metric, for example: SuccessfulBooking or UpdatedBooking. +* **Tags**. Metrics metadata in key-value pair format. They help provide contextual information, and filter org organize metrics. + +You can read more details in the [Datadog official documentation](https://docs.datadoghq.com/metrics/custom_metrics/){target="_blank" rel="nofollow"}. ## Getting started From 7c39ded456fe8cd49b4a42b3d0343a39acb243b7 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Mon, 14 Aug 2023 16:30:08 +0200 Subject: [PATCH 22/37] docs(metrics): correct typo in terminologies Signed-off-by: heitorlessa --- docs/core/metrics.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/core/metrics.md b/docs/core/metrics.md index af1fce68e34..cf41871ec66 100644 --- a/docs/core/metrics.md +++ b/docs/core/metrics.md @@ -16,7 +16,7 @@ These metrics can be visualized through [Amazon CloudWatch Console](https://cons ## Terminologies -If you're new to Amazon CloudWatch, there are two terminologies you must be aware of before using this utility: +If you're new to Amazon CloudWatch, there are five terminologies you must be aware of before using this utility: * **Namespace**. It's the highest level container that will group multiple metrics from multiple services for a given application, for example `ServerlessEcommerce`. * **Dimensions**. Metrics metadata in key-value format. They help you slice and dice metrics visualization, for example `ColdStart` metric by Payment `service`. From ed7a56774f87113f4d5cec0d3a4f03a52ea4e823 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Mon, 14 Aug 2023 16:49:24 +0200 Subject: [PATCH 23/37] shorten word Signed-off-by: heitorlessa --- examples/metrics_datadog/sam/template.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/metrics_datadog/sam/template.yaml b/examples/metrics_datadog/sam/template.yaml index f1d22b80f11..37689bb638b 100644 --- a/examples/metrics_datadog/sam/template.yaml +++ b/examples/metrics_datadog/sam/template.yaml @@ -22,12 +22,12 @@ Globals: - !Sub arn:aws:lambda:${AWS::Region}:017000801446:layer:AWSLambdaPowertoolsPythonV2:40 # Find the latest Layer version in the Datadog official documentation - # You can use the Datadog layer to bring the Datadog SDK - # Further details can be found at: https://github.com/DataDog/datadog-lambda-python/releases + # Datadog SDK + # Latest versions: https://github.com/DataDog/datadog-lambda-python/releases - !Sub arn:aws:lambda:${AWS::Region}:464622532012:layer:Datadog-Python310:78 - # You can use the Datadog extension to export logs directly to Datadog - # Further details can be found at: https://github.com/DataDog/datadog-lambda-extension/releases + # Datadog Lambda Extension + # Latest versions: https://github.com/DataDog/datadog-lambda-extension/releases - !Sub arn:aws:lambda:${AWS::Region}:464622532012:layer:Datadog-Extension:45 Resources: From eb1ee5d008a203024e691da51bab98e6952205e1 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Mon, 14 Aug 2023 16:56:38 +0200 Subject: [PATCH 24/37] docs(datadog): shorten install Signed-off-by: heitorlessa --- docs/core/metrics/datadog.md | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/docs/core/metrics/datadog.md b/docs/core/metrics/datadog.md index 24914e4c18e..6a6968c4fc3 100644 --- a/docs/core/metrics/datadog.md +++ b/docs/core/metrics/datadog.md @@ -55,21 +55,18 @@ Datadog provider has two global settings that will be used across all metrics em | **Metric namespace** | Logical container where all metrics will be placed e.g. `ServerlessAirline` | `POWERTOOLS_METRICS_NAMESPACE` | `namespace` | | **Flush to log** | Use this when you want to flush metrics to be exported through Datadog Forwarder | `DD_FLUSH_TO_LOG` | `flush_to_log` | -Experiment to use your application or main service as the metric namespace to easily group all metrics. - ### Install -???+ note - If you are using Datadog Forwarder, you can skip this step. +> **Using Datadog Forwarder?** You can skip this step. -To adhere to Lambda best practices and effectively minimize the size of your development package, we recommend using the official Datadog layers built specifically for the SDK and extension components. Below is the template that demonstrates how to configure a SAM template with this information. +We recommend using Datadog SDK and Datadog Lambda Extension with this feature for optimal results. -```yaml hl_lines="13 14 22 24" title="AWS Serverless Application Model (SAM) example" +For Datadog SDK, you can add `aws-lambda-powertools[datadog]` as a dependency in your preferred tool, or as a Lambda Layer in the following example: + +```yaml hl_lines="16-17 27 31" title="AWS Serverless Application Model (SAM) example" --8<-- "examples/metrics_datadog/sam/template.yaml" ``` -If you prefer not to utilize the Datadog SDK provided through the Datadog layer, add `aws-lambda-powertools[datadog]` as a dependency in your preferred tool: _e.g._, _requirements.txt_, _pyproject.toml_. This will ensure you have the required dependencies before using Datadog provider. - ### Creating metrics You can create metrics using `add_metric`. Optional parameter such as timestamp can be included, but if not provided, the Datadog Provider will automatically use the current timestamp by default. From de11c5c747bc164bfd288fa597bde3b4cb91d736 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Mon, 14 Aug 2023 17:14:13 +0200 Subject: [PATCH 25/37] docs(datadog): simplify add_metrics Signed-off-by: heitorlessa --- docs/core/metrics/datadog.md | 17 ++++++++--------- ..._metrics_with_provider.py => add_metrics.py} | 9 +++------ ...rovider.py => add_metrics_with_timestamp.py} | 0 3 files changed, 11 insertions(+), 15 deletions(-) rename examples/metrics_datadog/src/{add_metrics_with_provider.py => add_metrics.py} (57%) rename examples/metrics_datadog/src/{add_metrics_without_provider.py => add_metrics_with_timestamp.py} (100%) diff --git a/docs/core/metrics/datadog.md b/docs/core/metrics/datadog.md index 6a6968c4fc3..cf5ec99b386 100644 --- a/docs/core/metrics/datadog.md +++ b/docs/core/metrics/datadog.md @@ -69,21 +69,20 @@ For Datadog SDK, you can add `aws-lambda-powertools[datadog]` as a dependency in ### Creating metrics -You can create metrics using `add_metric`. Optional parameter such as timestamp can be included, but if not provided, the Datadog Provider will automatically use the current timestamp by default. +You can create metrics using `add_metric`. -???+ tip - You can initialize DadatadogMetrics in any other module too. It'll keep track of your aggregate metrics in memory to optimize costs (one blob instead of multiples). +By default, we will generate the current timestamp for you. Alternatively, you can use the `timestamp` parameter to set a custom one in epoch time. -=== "add_metrics_with_provider.py" +=== "add_metrics.py" - ```python hl_lines="6 12" - --8<-- "examples/metrics_datadog/src/add_metrics_with_provider.py" + ```python hl_lines="4 7 9" + --8<-- "examples/metrics_datadog/src/add_metrics.py" ``` -=== "add_metrics_without_provider.py" +=== "add_metrics_with_timestamp.py" ```python hl_lines="11" - --8<-- "examples/metrics_datadog/src/add_metrics_without_provider.py" + --8<-- "examples/metrics_datadog/src/add_metrics_with_timestamp.py" ``` ???+ warning "Warning: Do not create metrics outside the handler" @@ -257,7 +256,7 @@ You can read standard output and assert whether metrics have been flushed. Here' === "add_metrics.py" ```python - --8<-- "examples/metrics_datadog/src/add_metrics_without_provider.py" + --8<-- "examples/metrics_datadog/src/add_metrics.py" ``` ???+ tip diff --git a/examples/metrics_datadog/src/add_metrics_with_provider.py b/examples/metrics_datadog/src/add_metrics.py similarity index 57% rename from examples/metrics_datadog/src/add_metrics_with_provider.py rename to examples/metrics_datadog/src/add_metrics.py index 9e90b9f41cf..6fe6774152e 100644 --- a/examples/metrics_datadog/src/add_metrics_with_provider.py +++ b/examples/metrics_datadog/src/add_metrics.py @@ -1,12 +1,9 @@ -import time - -from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics, DatadogProvider +from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics from aws_lambda_powertools.utilities.typing import LambdaContext -provider = DatadogProvider() -metrics = DatadogMetrics(provider=provider) +metrics = DatadogMetrics() @metrics.log_metrics # ensures metrics are flushed upon request completion/failure def lambda_handler(event: dict, context: LambdaContext): - metrics.add_metric(name="SuccessfulBooking", value=1, timestamp=int(time.time())) + metrics.add_metric(name="SuccessfulBooking", value=1) diff --git a/examples/metrics_datadog/src/add_metrics_without_provider.py b/examples/metrics_datadog/src/add_metrics_with_timestamp.py similarity index 100% rename from examples/metrics_datadog/src/add_metrics_without_provider.py rename to examples/metrics_datadog/src/add_metrics_with_timestamp.py From 28673c52c00bc1caa7b2af56468e9367ee4a2fc3 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Mon, 14 Aug 2023 17:35:20 +0200 Subject: [PATCH 26/37] docs(datadog): simplify tags, mention new warning Signed-off-by: heitorlessa --- docs/core/metrics/datadog.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/core/metrics/datadog.md b/docs/core/metrics/datadog.md index cf5ec99b386..528b668e081 100644 --- a/docs/core/metrics/datadog.md +++ b/docs/core/metrics/datadog.md @@ -90,7 +90,9 @@ By default, we will generate the current timestamp for you. Alternatively, you c ### Adding tags -Datadog offers the flexibility to configure tags per metric. To provider a better experience for our customers, you can pass an arbitrary number of keyword arguments (kwargs) that can be user as a tag. +You can add any number of tags to your metrics via keyword arguments (`key=value`). They are helpful to filter, organize, and aggregate your metrics later. + +!!! info "We will emit a warning for tags [beyond the 200 chars limit](https://docs.datadoghq.com/getting_started/tagging/){target="_blank" rel="nofollow"}." === "add_metrics_with_tags.py" @@ -98,8 +100,6 @@ Datadog offers the flexibility to configure tags per metric. To provider a bette --8<-- "examples/metrics_datadog/src/add_metrics_with_tags.py" ``` -We recommend [read](https://docs.datadoghq.com/getting_started/tagging/){target="_blank" rel="nofollow"} the official Datadog documentation for comprehensive insights into the best practices for effectively utilizing tags. - ### Adding default tags If you want to set the same tags for all metrics, you can use the `set_default_tags` method or the `default_tags` parameter in the `log_metrics` decorator and then persist tags across the Lambda invocations. From 7888c384c19cd12bf92b5a09a27f3859fc9c3c3d Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Mon, 14 Aug 2023 17:42:17 +0200 Subject: [PATCH 27/37] docs(datadog): cleanup default tags Signed-off-by: heitorlessa --- docs/core/metrics/datadog.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/core/metrics/datadog.md b/docs/core/metrics/datadog.md index 528b668e081..2f10c967031 100644 --- a/docs/core/metrics/datadog.md +++ b/docs/core/metrics/datadog.md @@ -102,12 +102,12 @@ You can add any number of tags to your metrics via keyword arguments (`key=value ### Adding default tags -If you want to set the same tags for all metrics, you can use the `set_default_tags` method or the `default_tags` parameter in the `log_metrics` decorator and then persist tags across the Lambda invocations. +You can persist tags across Lambda invocations and `DatadogMetrics` instances via `set_default_tags` method, or `default_tags` parameter in the `log_metrics` decorator. -If you'd like to remove them at some point, you can use `clear_default_tags` method. +If you'd like to remove them at some point, you can use the `clear_default_tags` method. -???+ note - When default tags are configured and an additional specific tag is assigned to a metric, the metric will exclusively contain that specific tag. +???+ note "Metric tag takes precedence over default tags of the same name" + When adding tags with the same name via `add_metric` and `set_default_tags`, `add_metric` takes precedence. === "set_default_tags.py" From 340b4466b44881e15c3bf6b92c30f60a6678d428 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Mon, 14 Aug 2023 17:51:17 +0200 Subject: [PATCH 28/37] docs(datadog): simplify code snippet Signed-off-by: heitorlessa --- .../metrics_datadog/src/flush_metrics_to_standard_output.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/examples/metrics_datadog/src/flush_metrics_to_standard_output.py b/examples/metrics_datadog/src/flush_metrics_to_standard_output.py index 1dce54c300e..a58fe877925 100644 --- a/examples/metrics_datadog/src/flush_metrics_to_standard_output.py +++ b/examples/metrics_datadog/src/flush_metrics_to_standard_output.py @@ -1,8 +1,7 @@ -from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics, DatadogProvider +from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics from aws_lambda_powertools.utilities.typing import LambdaContext -provider = DatadogProvider(flush_to_log=True) -metrics = DatadogMetrics(provider=provider) +metrics = DatadogMetrics(flush_to_log=True) @metrics.log_metrics # ensures metrics are flushed upon request completion/failure From 9085e9ec84253b0b36ffbb5f605506f941ad71ba Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Mon, 14 Aug 2023 17:51:30 +0200 Subject: [PATCH 29/37] docs(datadog): move forwarder to advanced; cleanup Signed-off-by: heitorlessa --- docs/core/metrics/datadog.md | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/docs/core/metrics/datadog.md b/docs/core/metrics/datadog.md index 2f10c967031..22322f73a76 100644 --- a/docs/core/metrics/datadog.md +++ b/docs/core/metrics/datadog.md @@ -121,22 +121,6 @@ If you'd like to remove them at some point, you can use the `clear_default_tags` --8<-- "examples/metrics_datadog/src/set_default_tags_log_metrics.py" ``` -### Exporting to Datadog Log Forwarder - -You have the option to flush metrics to the standard output for exporting, which can then be seamlessly processed through the [Datadog Forwarder](https://docs.datadoghq.com/logs/guide/forwarder/?tab=cloudformation){target="_blank" rel="nofollow"}. - -=== "flush_metrics_to_standard_output.py" - - ```python hl_lines="4" - --8<-- "examples/metrics_datadog/src/flush_metrics_to_standard_output.py" - ``` - -=== "log_metrics_standard_output.json" - - ```json hl_lines="2 6 7" - --8<-- "examples/metrics_datadog/src/log_metrics_standard_output.json" - ``` - ### Flushing metrics As you finish adding all your metrics, you need to serialize and flush them to standard output. You can do that automatically with the `log_metrics` decorator. @@ -217,6 +201,24 @@ Contrary to the `log_metrics` decorator, you are now also responsible to flush m --8<-- "examples/metrics_datadog/src/flush_datadog_metrics.py" ``` +### Integrating with Datadog Forwarder + +Use `flush_to_log=True` in `DatadogMetrics` to integrate with the legacy [Datadog Forwarder](https://docs.datadoghq.com/logs/guide/forwarder/?tab=cloudformation){target="_blank" rel="nofollow"}. + +This will serialize and flush metrics to standard output. + +=== "flush_metrics_to_standard_output.py" + + ```python hl_lines="4" + --8<-- "examples/metrics_datadog/src/flush_metrics_to_standard_output.py" + ``` + +=== "log_metrics_standard_output.json" + + ```json + --8<-- "examples/metrics_datadog/src/log_metrics_standard_output.json" + ``` + ## Testing your code ### Setting environment variables From f6f20e8fc6a6590e75b965765391d34640c3c9c9 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Mon, 14 Aug 2023 18:10:45 +0200 Subject: [PATCH 30/37] docs(datadog): cleanup flush Signed-off-by: heitorlessa --- docs/core/metrics/datadog.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/core/metrics/datadog.md b/docs/core/metrics/datadog.md index 22322f73a76..0546e6055c7 100644 --- a/docs/core/metrics/datadog.md +++ b/docs/core/metrics/datadog.md @@ -123,9 +123,9 @@ If you'd like to remove them at some point, you can use the `clear_default_tags` ### Flushing metrics -As you finish adding all your metrics, you need to serialize and flush them to standard output. You can do that automatically with the `log_metrics` decorator. +Use `log_metrics` decorator to automatically serialize and flush your metrics (SDK or Forwarder) at the end of your invocation. -This decorator also **validates**, **serializes**, and **flushes** all your metrics. During metrics validation, if no metrics are provided then a warning will be logged, but no exception will be raised. +This decorator also ensures metrics are flushed in the event of an exception, including warning you in case you forgot to add metrics. === "add_metrics.py" @@ -141,9 +141,9 @@ This decorator also **validates**, **serializes**, and **flushes** all your metr #### Raising SchemaValidationError on empty metrics -If you want to ensure at least one metric is always emitted, you can pass `raise_on_empty_metrics` to the **log_metrics** decorator: +Use `raise_on_empty_metrics=True` if you want to ensure at least one metric is always emitted. -```python hl_lines="7" title="Raising SchemaValidationError exception if no metrics are added" +```python hl_lines="7" title="Failing fast if no metrics are added" --8<-- "examples/metrics_datadog/src/raise_on_empty_datadog_metrics.py" ``` From 3ed85ee63248fcca9611b3c372f5ccfb66a5fca3 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Mon, 14 Aug 2023 18:11:44 +0200 Subject: [PATCH 31/37] docs(datadog): correct typo in cold start Signed-off-by: heitorlessa --- docs/core/metrics/datadog.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/core/metrics/datadog.md b/docs/core/metrics/datadog.md index 0546e6055c7..fe7c9ba91ae 100644 --- a/docs/core/metrics/datadog.md +++ b/docs/core/metrics/datadog.md @@ -169,9 +169,9 @@ You can optionally capture cold start metrics with `log_metrics` decorator via ` If it's a cold start invocation, this feature will: * Create a separate Datadog metric solely containing a metric named `ColdStart` -* Add `function_name` as a tag +* Add `function_name` metric tag -This has the advantage of keeping cold start metric separate from your application metrics, where you might have unrelated dimensions. +This has the advantage of keeping cold start metric separate from your application metrics, where you might have unrelated tags. ???+ info We do not emit 0 as a value for ColdStart metric for cost reasons. [Let us know](https://github.com/aws-powertools/powertools-lambda-python/issues/new?assignees=&labels=feature-request%2C+triage&template=feature_request.md&title=){target="_blank"} if you'd prefer a flag to override it. From 3b0812f39dce056d4fbb2ae0f8c2a66874ab16e1 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Mon, 14 Aug 2023 18:18:34 +0200 Subject: [PATCH 32/37] docs: code annotation, move env vars Signed-off-by: heitorlessa --- docs/core/metrics/datadog.md | 22 +++++++------------ .../metrics_datadog/src/run_tests_env_var.sh | 2 +- 2 files changed, 9 insertions(+), 15 deletions(-) diff --git a/docs/core/metrics/datadog.md b/docs/core/metrics/datadog.md index fe7c9ba91ae..198a88ec80b 100644 --- a/docs/core/metrics/datadog.md +++ b/docs/core/metrics/datadog.md @@ -48,13 +48,6 @@ You can read more details in the [Datadog official documentation](https://docs.d ???+ tip All examples shared in this documentation are available within the [project repository](https://github.com/aws-powertools/powertools-lambda-python/tree/develop/examples){target="_blank" }. -Datadog provider has two global settings that will be used across all metrics emitted: - -| Setting | Description | Environment variable | Constructor parameter | -| -------------------- | -------------------------------------------------------------------------------- | ------------------------------ | --------------------- | -| **Metric namespace** | Logical container where all metrics will be placed e.g. `ServerlessAirline` | `POWERTOOLS_METRICS_NAMESPACE` | `namespace` | -| **Flush to log** | Use this when you want to flush metrics to be exported through Datadog Forwarder | `DD_FLUSH_TO_LOG` | `flush_to_log` | - ### Install > **Using Datadog Forwarder?** You can skip this step. @@ -178,13 +171,12 @@ This has the advantage of keeping cold start metric separate from your applicati ### Environment variables -The following environment variable is available to configure Metrics at a global scope: +You can use any of the following environment variables to configure `DatadogMetrics`: -| Setting | Description | Environment variable | Default | -| ------------------ | -------------------------------- | ------------------------------ | ------- | -| **Namespace Name** | Sets namespace used for metrics. | `POWERTOOLS_METRICS_NAMESPACE` | `None` | - -`POWERTOOLS_METRICS_NAMESPACE` is also available on a per-instance basis with the `namespace` parameter, which will consequently override the environment variable value. +| Setting | Description | Environment variable | Constructor parameter | +| -------------------- | -------------------------------------------------------------------------------- | ------------------------------ | --------------------- | +| **Metric namespace** | Logical container where all metrics will be placed e.g. `ServerlessAirline` | `POWERTOOLS_METRICS_NAMESPACE` | `namespace` | +| **Flush to log** | Use this when you want to flush metrics to be exported through Datadog Forwarder | `DD_FLUSH_TO_LOG` | `flush_to_log` | ## Advanced @@ -233,10 +225,12 @@ This will serialize and flush metrics to standard output. Make sure to set `POWERTOOLS_METRICS_NAMESPACE` before running your tests to prevent failing on `SchemaValidation` exception. You can set it before you run tests or via pytest plugins like [dotenv](https://pypi.org/project/pytest-dotenv/){target="_blank" rel="nofollow"}. -```bash title="Injecting dummy Metric Namespace before running tests" +```bash title="Injecting dummy metric namespace before running tests" --8<-- "examples/metrics_datadog/src/run_tests_env_var.sh" ``` +1. **`DD_FLUSH_TO_LOG=True`** makes it easier to test by flushing final metrics to standard output. + ### Clearing metrics `DatadogMetrics` keep metrics in memory across multiple instances. If you need to test this behavior, you can use the following Pytest fixture to ensure metrics are reset incl. cold start: diff --git a/examples/metrics_datadog/src/run_tests_env_var.sh b/examples/metrics_datadog/src/run_tests_env_var.sh index 21a3a090242..5663afd3ba4 100644 --- a/examples/metrics_datadog/src/run_tests_env_var.sh +++ b/examples/metrics_datadog/src/run_tests_env_var.sh @@ -1 +1 @@ -POWERTOOLS_METRICS_NAMESPACE="ServerlessAirline" DD_FLUSH_TO_LOG="True" python -m pytest +POWERTOOLS_METRICS_NAMESPACE="ServerlessAirline" DD_FLUSH_TO_LOG="True" python -m pytest # (1)! From f33a719efb1504f42a8402ac6406fdd282bb51ad Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Mon, 14 Aug 2023 18:33:36 +0200 Subject: [PATCH 33/37] docs: recommend using Secrets for API Key --- docs/core/metrics/datadog.md | 4 ++-- examples/metrics_datadog/sam/template.yaml | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/docs/core/metrics/datadog.md b/docs/core/metrics/datadog.md index 198a88ec80b..ceab2284310 100644 --- a/docs/core/metrics/datadog.md +++ b/docs/core/metrics/datadog.md @@ -52,11 +52,11 @@ You can read more details in the [Datadog official documentation](https://docs.d > **Using Datadog Forwarder?** You can skip this step. -We recommend using Datadog SDK and Datadog Lambda Extension with this feature for optimal results. +We recommend using [Datadog SDK](https://docs.datadoghq.com/serverless/installation/python/){target="_blank" rel="nofollow"} and Datadog Lambda Extension with this feature for optimal results. For Datadog SDK, you can add `aws-lambda-powertools[datadog]` as a dependency in your preferred tool, or as a Lambda Layer in the following example: -```yaml hl_lines="16-17 27 31" title="AWS Serverless Application Model (SAM) example" +```yaml hl_lines="15-16 28 32" title="AWS Serverless Application Model (SAM) example" --8<-- "examples/metrics_datadog/sam/template.yaml" ``` diff --git a/examples/metrics_datadog/sam/template.yaml b/examples/metrics_datadog/sam/template.yaml index 37689bb638b..39c8883c150 100644 --- a/examples/metrics_datadog/sam/template.yaml +++ b/examples/metrics_datadog/sam/template.yaml @@ -10,10 +10,11 @@ Globals: Environment: Variables: POWERTOOLS_METRICS_NAMESPACE: ServerlessAirline - # Configuring the API_KEY and DD_SITE can be achieved through various methods. - # An functional approach involves utilizing these two environment variables. - # Further details can be found at: https://docs.datadoghq.com/serverless/installation/python/?tab=datadogcli + # [Production setup] + # DATADOG_API_KEY_SECRET_ARN: "" + # [Development only] DD_API_KEY: "" + # Configuration details: https://docs.datadoghq.com/serverless/installation/python/?tab=datadogcli DD_SITE: datadoghq.com Layers: From d4c5f8f800e99742796f2221e3407170684bd397 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Mon, 14 Aug 2023 17:47:55 +0100 Subject: [PATCH 34/37] Adding tags validation + tests --- .../metrics/provider/datadog/datadog.py | 35 +++++++++- .../metrics/provider/datadog/metrics.py | 4 +- .../metrics/provider/datadog/warnings.py | 8 +++ .../metrics/test_metrics_datadog.py | 30 ++------ tests/unit/metrics/conftest.py | 6 ++ tests/unit/metrics/test_unit_datadog.py | 69 +++++++++++++++++++ 6 files changed, 124 insertions(+), 28 deletions(-) create mode 100644 aws_lambda_powertools/metrics/provider/datadog/warnings.py create mode 100644 tests/unit/metrics/conftest.py create mode 100644 tests/unit/metrics/test_unit_datadog.py diff --git a/aws_lambda_powertools/metrics/provider/datadog/datadog.py b/aws_lambda_powertools/metrics/provider/datadog/datadog.py index e06c92abea3..230e5cdae52 100644 --- a/aws_lambda_powertools/metrics/provider/datadog/datadog.py +++ b/aws_lambda_powertools/metrics/provider/datadog/datadog.py @@ -11,6 +11,7 @@ from aws_lambda_powertools.metrics.exceptions import MetricValueError, SchemaValidationError from aws_lambda_powertools.metrics.provider import BaseProvider +from aws_lambda_powertools.metrics.provider.datadog.warnings import DatadogDataValidationWarning from aws_lambda_powertools.shared import constants from aws_lambda_powertools.shared.functions import resolve_env_var_choice from aws_lambda_powertools.utilities.typing import LambdaContext @@ -108,6 +109,9 @@ def add_metric( f"See Datadog documentation here: \n {docs}", ) + # validating metric tag + self._validate_datadog_tags_name(**tags) + if not isinstance(value, numbers.Real): raise MetricValueError(f"{value} is not a valid number") @@ -235,7 +239,8 @@ def log_metrics( ------- **Lambda function using tracer and metrics decorators** - from aws_lambda_powertools import DatadogMetrics, Tracer + from aws_lambda_powertools import Tracer + from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics metrics = DatadogMetrics(namespace="powertools") tracer = Tracer(service="payment") @@ -294,6 +299,7 @@ def set_default_tags(self, **tags) -> None: def lambda_handler(): return True """ + self._validate_datadog_tags_name(**tags) self.default_tags.update(**tags) @staticmethod @@ -325,6 +331,33 @@ def _serialize_datadog_tags(metric_tags: Dict[str, Any], default_tags: Dict[str, return [f"{tag_key}:{tag_value}" for tag_key, tag_value in tags.items()] + @staticmethod + def _validate_datadog_tags_name(**tags): + """ + Validate a metric tag according to specific requirements. + + Metric tags must start with a letter. + Metric tags must not exceed 200 characters. Fewer than 100 is preferred from a UI perspective. + + More information here: https://docs.datadoghq.com/getting_started/tagging/#define-tags + + Parameters: + ---------- + tags: Dict + The metric tags to be validated. + """ + for tag_key, tag_value in tags.items(): + tag = f"{tag_key}:{tag_value}" + if not tag[0].isalpha() or len(tag) > 200: + docs = "https://docs.datadoghq.com/getting_started/tagging/#define-tags" + warnings.warn( + f"Invalid tag value. Please ensure the specific tag {tag} follows the requirements. \n" + f"May incur data loss for metrics. \n" + f"See Datadog documentation here: \n {docs}", + DatadogDataValidationWarning, + stacklevel=2, + ) + @staticmethod def _validate_datadog_metric_name(metric_name: str) -> bool: """ diff --git a/aws_lambda_powertools/metrics/provider/datadog/metrics.py b/aws_lambda_powertools/metrics/provider/datadog/metrics.py index e82ab5df3fa..3ee4dc2f835 100644 --- a/aws_lambda_powertools/metrics/provider/datadog/metrics.py +++ b/aws_lambda_powertools/metrics/provider/datadog/metrics.py @@ -16,7 +16,7 @@ class DatadogMetrics: ------- **Creates a few metrics and publish at the end of a function execution** - from aws_lambda_powertools import DatadogMetrics + from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics metrics = DatadogMetrics(namespace="ServerlessAirline") @@ -33,7 +33,7 @@ def lambda_handler(): Parameters ---------- flush_to_log : bool, optional - Used when using export instead of extension + Used when using export instead of Lambda Extension namespace : str, optional Namespace for metrics provider: DatadogProvider, optional diff --git a/aws_lambda_powertools/metrics/provider/datadog/warnings.py b/aws_lambda_powertools/metrics/provider/datadog/warnings.py new file mode 100644 index 00000000000..accf19526e7 --- /dev/null +++ b/aws_lambda_powertools/metrics/provider/datadog/warnings.py @@ -0,0 +1,8 @@ +class DatadogDataValidationWarning(Warning): + message: str + + def __init__(self, message: str): + self.message = message + + def __str__(self) -> str: + return self.message diff --git a/tests/functional/metrics/test_metrics_datadog.py b/tests/functional/metrics/test_metrics_datadog.py index 63f5d648091..c81c825f656 100644 --- a/tests/functional/metrics/test_metrics_datadog.py +++ b/tests/functional/metrics/test_metrics_datadog.py @@ -22,7 +22,7 @@ def test_datadog_coldstart(capsys): # WHEN log_metrics is used with capture_cold_start_metric @metrics.log_metrics(capture_cold_start_metric=True) def lambda_handler(event, context): - metrics.add_metric(name="item_sold", value=1, tags=["product:latte", "order:online"]) + metrics.add_metric(name="item_sold", value=1, product="latte", order="online") lambda_handler({}, LambdaContext("example_fn2")) logs = capsys.readouterr().out.strip() @@ -55,7 +55,7 @@ def test_datadog_with_invalid_metric_value(): # WHEN we attempt to serialize a valid Datadog metric # THEN it should fail validation and raise MetricValueError with pytest.raises(MetricValueError, match=".*is not a valid number"): - metrics.add_metric(name="item_sold", value="a", tags=["product:latte", "order:online"]) + metrics.add_metric(name="item_sold", value="a", product="latte", order="online") def test_datadog_with_invalid_metric_name(): @@ -66,7 +66,7 @@ def test_datadog_with_invalid_metric_name(): # WHEN we attempt to serialize a valid Datadog metric # THEN it should fail validation and raise MetricValueError with pytest.raises(SchemaValidationError, match="Invalid metric name.*"): - metrics.add_metric(name="1_item_sold", value="a", tags=["product:latte", "order:online"]) + metrics.add_metric(name="1_item_sold", value="a", product="latte", order="online") def test_datadog_raise_on_empty(): @@ -145,7 +145,7 @@ def test_metrics_with_default_namespace(capsys, namespace): # WHEN we add metrics @metrics.log_metrics def lambda_handler(event, context): - metrics.add_metric(name="item_sold", value=1, tags=["product:latte", "order:online"]) + metrics.add_metric(name="item_sold", value=1, product="latte", order="online") lambda_handler({}, LambdaContext("example_fn2")) logs = capsys.readouterr().out.strip() @@ -163,7 +163,7 @@ def test_datadog_with_non_default_namespace(capsys, namespace): # WHEN log_metrics is used @metrics.log_metrics def lambda_handler(event, context): - metrics.add_metric(name="item_sold", value=1, tags=["product:latte", "order:online"]) + metrics.add_metric(name="item_sold", value=1, product="latte", order="online") lambda_handler({}, LambdaContext("example_fn")) logs = capsys.readouterr().out.strip() @@ -194,26 +194,6 @@ def test_clear_metrics(metric): assert my_metrics.metric_set == [] -def test_get_namespace_property(namespace): - # GIVEN DatadogMetrics is initialized - my_metrics = DatadogMetrics(namespace=namespace) - - # WHEN we try to access the namespace property - # THEN namespace property must be present - assert my_metrics.namespace == namespace - - -def test_set_namespace_property(namespace): - # GIVEN DatadogMetrics is initialized - my_metrics = DatadogMetrics() - - # WHEN we set the namespace property after ther initialization - my_metrics.namespace = namespace - - # THEN namespace property must be present - assert my_metrics.namespace == namespace - - def test_persist_default_tags(capsys): # GIVEN DatadogMetrics is initialized and we persist a set of default tags my_metrics = DatadogMetrics(flush_to_log=True) diff --git a/tests/unit/metrics/conftest.py b/tests/unit/metrics/conftest.py new file mode 100644 index 00000000000..8d601e4d13b --- /dev/null +++ b/tests/unit/metrics/conftest.py @@ -0,0 +1,6 @@ +import pytest + + +@pytest.fixture +def namespace() -> str: + return "test_namespace" diff --git a/tests/unit/metrics/test_unit_datadog.py b/tests/unit/metrics/test_unit_datadog.py new file mode 100644 index 00000000000..ab54e9730fe --- /dev/null +++ b/tests/unit/metrics/test_unit_datadog.py @@ -0,0 +1,69 @@ +import pytest + +from aws_lambda_powertools.metrics.exceptions import SchemaValidationError +from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics +from aws_lambda_powertools.metrics.provider.datadog.warnings import DatadogDataValidationWarning + + +def test_get_namespace_property(namespace): + # GIVEN DatadogMetrics is initialized + my_metrics = DatadogMetrics(namespace=namespace) + + # WHEN we try to access the namespace property + # THEN namespace property must be present + assert my_metrics.namespace == namespace + + +def test_set_namespace_property(namespace): + # GIVEN DatadogMetrics is initialized + my_metrics = DatadogMetrics() + + # WHEN we set the namespace property after ther initialization + my_metrics.namespace = namespace + + # THEN namespace property must be present + assert my_metrics.namespace == namespace + + +def test_default_tags_across_instances(): + # GIVEN DatadogMetrics is initialized and we persist a set of default tags + my_metrics = DatadogMetrics() + my_metrics.set_default_tags(environment="test", log_group="/lambda/test") + + # WHEN a new DatadogMetrics instance is created + same_metrics = DatadogMetrics() + + # THEN default tags should also be present in the new instance + assert "environment" in same_metrics.default_tags + + +def test_invalid_datadog_metric_name(): + metrics = DatadogMetrics() + + # GIVEN three metrics names with different invalid names + metric_name_1 = "1_metric" # Metric name must not start with number + metric_name_2 = "metric_ç" # Metric name must not contains unicode characters + metric_name_3 = "".join(["x" for _ in range(201)]) # Metric name must have less than 200 characters + + # WHEN we try to validate those metrics names + # THEN must be False + with pytest.raises(SchemaValidationError, match="Invalid metric name.*"): + metrics.add_metric(name=metric_name_1, value=1) + + with pytest.raises(SchemaValidationError, match="Invalid metric name.*"): + metrics.add_metric(name=metric_name_2, value=1) + + with pytest.raises(SchemaValidationError, match="Invalid metric name.*"): + metrics.add_metric(name=metric_name_3, value=1) + + +def test_invalid_datadog_metric_tag(): + metrics = DatadogMetrics() + + # GIVEN three metrics with different invalid tags + metric_tag_1 = "".join(["x" for _ in range(201)]) # Metric tags must have less than 200 characters + + # WHEN we try to validate those metrics tags + # THEN must be False + with pytest.warns(DatadogDataValidationWarning): + metrics.add_metric(name="metric_2", value=1, tag1=metric_tag_1) From 6fd79451d029927f9d61c63a713aa66b7e10fe89 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Mon, 14 Aug 2023 17:51:38 +0100 Subject: [PATCH 35/37] Reverting changes --- docs/core/metrics/datadog.md | 8 ++++---- .../src/{add_metrics.py => add_datadog_metrics.py} | 0 2 files changed, 4 insertions(+), 4 deletions(-) rename examples/metrics_datadog/src/{add_metrics.py => add_datadog_metrics.py} (100%) diff --git a/docs/core/metrics/datadog.md b/docs/core/metrics/datadog.md index ceab2284310..fb5927b3a63 100644 --- a/docs/core/metrics/datadog.md +++ b/docs/core/metrics/datadog.md @@ -66,10 +66,10 @@ You can create metrics using `add_metric`. By default, we will generate the current timestamp for you. Alternatively, you can use the `timestamp` parameter to set a custom one in epoch time. -=== "add_metrics.py" +=== "add_datadog_metrics.py" ```python hl_lines="4 7 9" - --8<-- "examples/metrics_datadog/src/add_metrics.py" + --8<-- "examples/metrics_datadog/src/add_datadog_metrics.py" ``` === "add_metrics_with_timestamp.py" @@ -249,10 +249,10 @@ You can read standard output and assert whether metrics have been flushed. Here' --8<-- "examples/metrics_datadog/src/assert_single_datadog_metric.py" ``` -=== "add_metrics.py" +=== "add_datadog_metrics.py" ```python - --8<-- "examples/metrics_datadog/src/add_metrics.py" + --8<-- "examples/metrics_datadog/src/add_datadog_metrics.py" ``` ???+ tip diff --git a/examples/metrics_datadog/src/add_metrics.py b/examples/metrics_datadog/src/add_datadog_metrics.py similarity index 100% rename from examples/metrics_datadog/src/add_metrics.py rename to examples/metrics_datadog/src/add_datadog_metrics.py From a78b4fc2dd017c2cc9391ca11c6386eed458bafd Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Mon, 14 Aug 2023 18:54:50 +0200 Subject: [PATCH 36/37] docs(metrics): add observability providers section --- docs/core/metrics.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/docs/core/metrics.md b/docs/core/metrics.md index cf41871ec66..31b4ea99ce7 100644 --- a/docs/core/metrics.md +++ b/docs/core/metrics.md @@ -327,6 +327,20 @@ These issues are exacerbated when you create **(A)** metric dimensions condition That is why `Metrics` shares data across instances by default, as that covers 80% of use cases and different personas using Powertools. This allows them to instantiate `Metrics` in multiple places throughout their code - be a separate file, a middleware, or an abstraction that sets default dimensions. +### Observability providers + +> An observability provider is an [AWS Lambda Partner](https://docs.aws.amazon.com/lambda/latest/dg/extensions-api-partners.html){target="_blank" rel="nofollow"} offering a platform for logging, metrics, traces, etc. + +We provide a thin-wrapper on top of the most requested observability providers. We strive to keep a similar UX as close as possible while keeping our value add features. + +!!! tip "Missing your preferred provider? Please create a [feature request](https://github.com/aws-powertools/powertools-lambda-python/issues/new?assignees=&labels=feature-request%2Ctriage&projects=&template=feature_request.yml&title=Feature+request%3A+TITLE){target="_blank"}." + +Current providers: + +| Provider | Notes | +| ------------------------------------- | -------------------------------------------------------- | +| [Datadog](./datadog){target="_blank"} | Uses Datadog SDK and Datadog Lambda Extension by default | + ## Testing your code ### Setting environment variables From 400486c4f71a17c48d08226182138c4785e0cddc Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Mon, 14 Aug 2023 18:02:39 +0100 Subject: [PATCH 37/37] Addressing Heitor's feedback --- aws_lambda_powertools/metrics/provider/datadog/datadog.py | 6 +++--- .../metrics_datadog/src/assert_single_datadog_metric.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/aws_lambda_powertools/metrics/provider/datadog/datadog.py b/aws_lambda_powertools/metrics/provider/datadog/datadog.py index 230e5cdae52..6195589cd1b 100644 --- a/aws_lambda_powertools/metrics/provider/datadog/datadog.py +++ b/aws_lambda_powertools/metrics/provider/datadog/datadog.py @@ -110,7 +110,7 @@ def add_metric( ) # validating metric tag - self._validate_datadog_tags_name(**tags) + self._validate_datadog_tags_name(tags) if not isinstance(value, numbers.Real): raise MetricValueError(f"{value} is not a valid number") @@ -299,7 +299,7 @@ def set_default_tags(self, **tags) -> None: def lambda_handler(): return True """ - self._validate_datadog_tags_name(**tags) + self._validate_datadog_tags_name(tags) self.default_tags.update(**tags) @staticmethod @@ -332,7 +332,7 @@ def _serialize_datadog_tags(metric_tags: Dict[str, Any], default_tags: Dict[str, return [f"{tag_key}:{tag_value}" for tag_key, tag_value in tags.items()] @staticmethod - def _validate_datadog_tags_name(**tags): + def _validate_datadog_tags_name(tags: Dict): """ Validate a metric tag according to specific requirements. diff --git a/examples/metrics_datadog/src/assert_single_datadog_metric.py b/examples/metrics_datadog/src/assert_single_datadog_metric.py index 6fc757d4a4f..7b6ebf0909b 100644 --- a/examples/metrics_datadog/src/assert_single_datadog_metric.py +++ b/examples/metrics_datadog/src/assert_single_datadog_metric.py @@ -1,8 +1,8 @@ -import add_metrics_without_provider +import add_datadog_metrics def test_log_metrics(capsys): - add_metrics_without_provider.lambda_handler({}, {}) + add_datadog_metrics.lambda_handler({}, {}) log = capsys.readouterr().out.strip() # remove any extra line