Skip to content

chore: remove deprecated code before GA #78

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Jun 15, 2020
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions aws_lambda_powertools/helper/__init__.py

This file was deleted.

132 changes: 0 additions & 132 deletions aws_lambda_powertools/helper/models.py

This file was deleted.

5 changes: 2 additions & 3 deletions aws_lambda_powertools/logging/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
"""Logging utility
"""
from ..helper.models import MetricUnit
from .logger import Logger, log_metric, logger_inject_lambda_context, logger_setup
from .logger import Logger

__all__ = ["logger_setup", "logger_inject_lambda_context", "log_metric", "MetricUnit", "Logger"]
__all__ = ["Logger"]
99 changes: 99 additions & 0 deletions aws_lambda_powertools/logging/formatter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
import json
import logging
from typing import Any


def json_formatter(unserialized_value: Any):
"""JSON custom serializer to cast unserialisable values to strings.
Example
-------
**Serialize unserialisable value to string**
class X: pass
value = {"x": X()}
json.dumps(value, default=json_formatter)
Parameters
----------
unserialized_value: Any
Python object unserializable by JSON
"""
return str(unserialized_value)


class JsonFormatter(logging.Formatter):
"""AWS Lambda Logging formatter.
Formats the log message as a JSON encoded string. If the message is a
dict it will be used directly. If the message can be parsed as JSON, then
the parse d value is used in the output record.
Originally taken from https://gitlab.com/hadrien/aws_lambda_logging/
"""

def __init__(self, **kwargs):
"""Return a JsonFormatter instance.
The `json_default` kwarg is used to specify a formatter for otherwise
unserialisable values. It must not throw. Defaults to a function that
coerces the value to a string.
Other kwargs are used to specify log field format strings.
"""
datefmt = kwargs.pop("datefmt", None)

super(JsonFormatter, self).__init__(datefmt=datefmt)
self.reserved_keys = ["timestamp", "level", "location"]
self.format_dict = {
"timestamp": "%(asctime)s",
"level": "%(levelname)s",
"location": "%(funcName)s:%(lineno)d",
}
self.format_dict.update(kwargs)
self.default_json_formatter = kwargs.pop("json_default", json_formatter)

def format(self, record): # noqa: A003
record_dict = record.__dict__.copy()
record_dict["asctime"] = self.formatTime(record, self.datefmt)

log_dict = {}
for key, value in self.format_dict.items():
if value and key in self.reserved_keys:
# converts default logging expr to its record value
# e.g. '%(asctime)s' to '2020-04-24 09:35:40,698'
log_dict[key] = value % record_dict
else:
log_dict[key] = value

if isinstance(record_dict["msg"], dict):
log_dict["message"] = record_dict["msg"]
else:
log_dict["message"] = record.getMessage()

# Attempt to decode the message as JSON, if so, merge it with the
# overall message for clarity.
try:
log_dict["message"] = json.loads(log_dict["message"])
except (json.decoder.JSONDecodeError, TypeError, ValueError):
pass

if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
# from logging.Formatter:format
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)

if record.exc_text:
log_dict["exception"] = record.exc_text

json_record = json.dumps(log_dict, default=self.default_json_formatter)

if hasattr(json_record, "decode"): # pragma: no cover
json_record = json_record.decode("utf-8")

return json_record
55 changes: 55 additions & 0 deletions aws_lambda_powertools/logging/lambda_context.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
class LambdaContextModel:
"""A handful of Lambda Runtime Context fields
Full Lambda Context object: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
Parameters
----------
function_name: str
Lambda function name, by default "UNDEFINED"
e.g. "test"
function_memory_size: int
Lambda function memory in MB, by default 128
function_arn: str
Lambda function ARN, by default "UNDEFINED"
e.g. "arn:aws:lambda:eu-west-1:809313241:function:test"
function_request_id: str
Lambda function unique request id, by default "UNDEFINED"
e.g. "52fdfc07-2182-154f-163f-5f0f9a621d72"
"""

def __init__(
self,
function_name: str = "UNDEFINED",
function_memory_size: int = 128,
function_arn: str = "UNDEFINED",
function_request_id: str = "UNDEFINED",
):
self.function_name = function_name
self.function_memory_size = function_memory_size
self.function_arn = function_arn
self.function_request_id = function_request_id


def build_lambda_context_model(context: object) -> LambdaContextModel:
"""Captures Lambda function runtime info to be used across all log statements
Parameters
----------
context : object
Lambda context object
Returns
-------
LambdaContextModel
Lambda context only with select fields
"""

context = {
"function_name": context.function_name,
"function_memory_size": context.memory_limit_in_mb,
"function_arn": context.invoked_function_arn,
"function_request_id": context.aws_request_id,
}

return LambdaContextModel(**context)
247 changes: 2 additions & 245 deletions aws_lambda_powertools/logging/logger.py
Original file line number Diff line number Diff line change
@@ -1,157 +1,21 @@
import copy
import functools
import itertools
import json
import logging
import os
import random
import sys
import warnings
from distutils.util import strtobool
from typing import Any, Callable, Dict, Union

from ..helper.models import MetricUnit, build_lambda_context_model, build_metric_unit_from_str
from .exceptions import InvalidLoggerSamplingRateError
from .formatter import JsonFormatter
from .lambda_context import build_lambda_context_model

logger = logging.getLogger(__name__)

is_cold_start = True


def json_formatter(unserialized_value: Any):
"""JSON custom serializer to cast unserialisable values to strings.
Example
-------
**Serialize unserialisable value to string**
class X: pass
value = {"x": X()}
json.dumps(value, default=json_formatter)
Parameters
----------
unserialized_value: Any
Python object unserializable by JSON
"""
return str(unserialized_value)


class JsonFormatter(logging.Formatter):
"""AWS Lambda Logging formatter.
Formats the log message as a JSON encoded string. If the message is a
dict it will be used directly. If the message can be parsed as JSON, then
the parse d value is used in the output record.
Originally taken from https://gitlab.com/hadrien/aws_lambda_logging/
"""

def __init__(self, **kwargs):
"""Return a JsonFormatter instance.
The `json_default` kwarg is used to specify a formatter for otherwise
unserialisable values. It must not throw. Defaults to a function that
coerces the value to a string.
Other kwargs are used to specify log field format strings.
"""
datefmt = kwargs.pop("datefmt", None)

super(JsonFormatter, self).__init__(datefmt=datefmt)
self.reserved_keys = ["timestamp", "level", "location"]
self.format_dict = {
"timestamp": "%(asctime)s",
"level": "%(levelname)s",
"location": "%(funcName)s:%(lineno)d",
}
self.format_dict.update(kwargs)
self.default_json_formatter = kwargs.pop("json_default", json_formatter)

def format(self, record): # noqa: A003
record_dict = record.__dict__.copy()
record_dict["asctime"] = self.formatTime(record, self.datefmt)

log_dict = {}
for key, value in self.format_dict.items():
if value and key in self.reserved_keys:
# converts default logging expr to its record value
# e.g. '%(asctime)s' to '2020-04-24 09:35:40,698'
log_dict[key] = value % record_dict
else:
log_dict[key] = value

if isinstance(record_dict["msg"], dict):
log_dict["message"] = record_dict["msg"]
else:
log_dict["message"] = record.getMessage()

# Attempt to decode the message as JSON, if so, merge it with the
# overall message for clarity.
try:
log_dict["message"] = json.loads(log_dict["message"])
except (json.decoder.JSONDecodeError, TypeError, ValueError):
pass

if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
# from logging.Formatter:format
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)

if record.exc_text:
log_dict["exception"] = record.exc_text

json_record = json.dumps(log_dict, default=self.default_json_formatter)

if hasattr(json_record, "decode"): # pragma: no cover
json_record = json_record.decode("utf-8")

return json_record


def logger_setup(
service: str = None, level: str = None, sampling_rate: float = 0.0, legacy: bool = False, **kwargs
) -> DeprecationWarning:
"""DEPRECATED
This will be removed when GA - Use `aws_lambda_powertools.logging.logger.Logger` instead
Example
-------
**Logger class - Same UX**
from aws_lambda_powertools import Logger
logger = Logger(service="payment") # same env var still applies
"""
raise DeprecationWarning("Use Logger instead - This method will be removed when GA")


def logger_inject_lambda_context(
lambda_handler: Callable[[Dict, Any], Any] = None, log_event: bool = False
) -> DeprecationWarning:
"""DEPRECATED
This will be removed when GA - Use `aws_lambda_powertools.logging.logger.Logger` instead
Example
-------
**Logger class - Same UX**
from aws_lambda_powertools import Logger
logger = Logger(service="payment") # same env var still applies
@logger.inject_lambda_context
def handler(evt, ctx):
pass
"""
raise DeprecationWarning("Use Logger instead - This method will be removed when GA")


def _is_cold_start() -> bool:
"""Verifies whether is cold start
@@ -170,113 +34,6 @@ def _is_cold_start() -> bool:
return cold_start


def log_metric(
name: str, namespace: str, unit: MetricUnit, value: float = 0, service: str = "service_undefined", **dimensions,
):
"""Logs a custom metric in a statsD-esque format to stdout.
**This will be removed when GA - Use `aws_lambda_powertools.metrics.metrics.Metrics` instead**
Creating Custom Metrics synchronously impact on performance/execution time.
Instead, log_metric prints a metric to CloudWatch Logs.
That allows us to pick them up asynchronously via another Lambda function and create them as a metric.
NOTE: It takes up to 9 dimensions by default, and Metric units are conveniently available via MetricUnit Enum.
If service is not passed as arg or via env var, "service_undefined" will be used as dimension instead.
**Output in CloudWatch Logs**: `MONITORING|<metric_value>|<metric_unit>|<metric_name>|<namespace>|<dimensions>`
Serverless Application Repository App that creates custom metric from this log output:
https://serverlessrepo.aws.amazon.com/applications/arn:aws:serverlessrepo:us-east-1:374852340823:applications~async-custom-metrics
Environment variables
---------------------
POWERTOOLS_SERVICE_NAME: str
service name
Parameters
----------
name : str
metric name, by default None
namespace : str
metric namespace (e.g. application name), by default None
unit : MetricUnit, by default MetricUnit.Count
metric unit enum value (e.g. MetricUnit.Seconds), by default None\n
API Info: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html
value : float, optional
metric value, by default 0
service : str, optional
service name used as dimension, by default "service_undefined"
dimensions: dict, optional
keyword arguments as additional dimensions (e.g. `customer=customerId`)
Example
-------
**Log metric to count number of successful payments; define service via env var**
$ export POWERTOOLS_SERVICE_NAME="payment"
from aws_lambda_powertools.logging import MetricUnit, log_metric
log_metric(
name="SuccessfulPayments",
unit=MetricUnit.Count,
value=1,
namespace="DemoApp"
)
**Log metric to count number of successful payments per campaign & customer**
from aws_lambda_powertools.logging import MetricUnit, log_metric
log_metric(
name="SuccessfulPayments",
service="payment",
unit=MetricUnit.Count,
value=1,
namespace="DemoApp",
campaign=campaign_id,
customer=customer_id
)
"""

warnings.warn(message="This method will be removed in GA; use Metrics instead", category=DeprecationWarning)
logger.debug(f"Building new custom metric. Name: {name}, Unit: {unit}, Value: {value}, Dimensions: {dimensions}")
service = os.getenv("POWERTOOLS_SERVICE_NAME") or service
dimensions = __build_dimensions(**dimensions)
unit = build_metric_unit_from_str(unit)

metric = f"MONITORING|{value}|{unit.name}|{name}|{namespace}|service={service}"
if dimensions:
metric = f"MONITORING|{value}|{unit.name}|{name}|{namespace}|service={service},{dimensions}"

print(metric)


def __build_dimensions(**dimensions) -> str:
"""Builds correct format for custom metric dimensions from kwargs
Parameters
----------
dimensions: dict, optional
additional dimensions
Returns
-------
str
Dimensions in the form of "key=value,key2=value2"
"""
MAX_DIMENSIONS = 10
dimension = ""

# CloudWatch accepts a max of 10 dimensions per metric
# We include service name as a dimension
# so we take up to 9 values as additional dimensions
# before we convert everything to a string of key=value
dimensions_partition = dict(itertools.islice(dimensions.items(), MAX_DIMENSIONS))
dimensions_list = [dimension + "=" + value for dimension, value in dimensions_partition.items() if value]
dimension = ",".join(dimensions_list)

return dimension


class Logger(logging.Logger):
"""Creates and setups a logger to format statements in JSON.
5 changes: 2 additions & 3 deletions aws_lambda_powertools/metrics/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
"""CloudWatch Embedded Metric Format utility
"""
from ..helper.models import MetricUnit
from .exceptions import MetricUnitError, MetricValueError, SchemaValidationError, UniqueNamespaceError
from .base import MetricUnit
from .exceptions import MetricUnitError, MetricValueError, SchemaValidationError
from .metric import single_metric
from .metrics import Metrics

@@ -12,5 +12,4 @@
"MetricUnitError",
"SchemaValidationError",
"MetricValueError",
"UniqueNamespaceError",
]
60 changes: 31 additions & 29 deletions aws_lambda_powertools/metrics/base.py
Original file line number Diff line number Diff line change
@@ -4,13 +4,12 @@
import numbers
import os
import pathlib
import warnings
from enum import Enum
from typing import Dict, List, Union

import fastjsonschema

from ..helper.models import MetricUnit
from .exceptions import MetricUnitError, MetricValueError, SchemaValidationError, UniqueNamespaceError
from .exceptions import MetricUnitError, MetricValueError, SchemaValidationError

logger = logging.getLogger(__name__)

@@ -21,6 +20,35 @@
MAX_METRICS = 100


class MetricUnit(Enum):
Seconds = "Seconds"
Microseconds = "Microseconds"
Milliseconds = "Milliseconds"
Bytes = "Bytes"
Kilobytes = "Kilobytes"
Megabytes = "Megabytes"
Gigabytes = "Gigabytes"
Terabytes = "Terabytes"
Bits = "Bits"
Kilobits = "Kilobits"
Megabits = "Megabits"
Gigabits = "Gigabits"
Terabits = "Terabits"
Percent = "Percent"
Count = "Count"
BytesPerSecond = "Bytes/Second"
KilobytesPerSecond = "Kilobytes/Second"
MegabytesPerSecond = "Megabytes/Second"
GigabytesPerSecond = "Gigabytes/Second"
TerabytesPerSecond = "Terabytes/Second"
BitsPerSecond = "Bits/Second"
KilobitsPerSecond = "Kilobits/Second"
MegabitsPerSecond = "Megabits/Second"
GigabitsPerSecond = "Gigabits/Second"
TerabitsPerSecond = "Terabits/Second"
CountPerSecond = "Count/Second"


class MetricManager:
"""Base class for metric functionality (namespace, metric, dimension, serialization)
@@ -45,8 +73,6 @@ class MetricManager:
When metric metric isn't supported by CloudWatch
MetricValueError
When metric value isn't a number
UniqueNamespaceError
When an additional namespace is set
SchemaValidationError
When metric object fails EMF schema validation
"""
@@ -61,30 +87,6 @@ def __init__(
self._metric_units = [unit.value for unit in MetricUnit]
self._metric_unit_options = list(MetricUnit.__members__)

def add_namespace(self, name: str):
"""Adds given metric namespace
Example
-------
**Add metric namespace**
metric.add_namespace(name="ServerlessAirline")
Parameters
----------
name : str
Metric namespace
"""
warnings.warn(
"add_namespace method is deprecated. Pass namespace to Metrics constructor instead", DeprecationWarning
)
if self.namespace is not None:
raise UniqueNamespaceError(
f"Namespace '{self.namespace}' already set - Only one namespace is allowed across metrics"
)
logger.debug(f"Adding metrics namespace: {name}")
self.namespace = name

def add_metric(self, name: str, unit: MetricUnit, value: Union[float, int]):
"""Adds given metric
6 changes: 0 additions & 6 deletions aws_lambda_powertools/metrics/exceptions.py
Original file line number Diff line number Diff line change
@@ -14,9 +14,3 @@ class MetricValueError(Exception):
"""When metric value isn't a valid number"""

pass


class UniqueNamespaceError(Exception):
"""When an additional namespace is set"""

pass
3 changes: 1 addition & 2 deletions aws_lambda_powertools/metrics/metric.py
Original file line number Diff line number Diff line change
@@ -3,8 +3,7 @@
from contextlib import contextmanager
from typing import Dict

from ..helper.models import MetricUnit
from .base import MetricManager
from .base import MetricManager, MetricUnit

logger = logging.getLogger(__name__)

89 changes: 0 additions & 89 deletions tests/functional/test_logger.py
Original file line number Diff line number Diff line change
@@ -6,7 +6,6 @@
import pytest

from aws_lambda_powertools import Logger, Tracer
from aws_lambda_powertools.logging import MetricUnit, log_metric, logger_inject_lambda_context, logger_setup
from aws_lambda_powertools.logging.exceptions import InvalidLoggerSamplingRateError
from aws_lambda_powertools.logging.logger import JsonFormatter, set_package_logger

@@ -236,68 +235,6 @@ def handler(event, context):
assert fourth_log["cold_start"] is False


def test_log_metric(capsys):
# GIVEN a service, unit and value have been provided
# WHEN log_metric is called
# THEN custom metric line should be match given values
log_metric(
service="payment", name="test_metric", unit=MetricUnit.Seconds, value=60, namespace="DemoApp",
)
expected = "MONITORING|60|Seconds|test_metric|DemoApp|service=payment\n"
captured = capsys.readouterr()

assert captured.out == expected


def test_log_metric_env_var(monkeypatch, capsys):
# GIVEN a service, unit and value have been provided
# WHEN log_metric is called
# THEN custom metric line should be match given values
service_name = "payment"
monkeypatch.setenv("POWERTOOLS_SERVICE_NAME", service_name)

log_metric(name="test_metric", unit=MetricUnit.Seconds, value=60, namespace="DemoApp")
expected = "MONITORING|60|Seconds|test_metric|DemoApp|service=payment\n"
captured = capsys.readouterr()

assert captured.out == expected


def test_log_metric_multiple_dimensions(capsys):
# GIVEN multiple optional dimensions are provided
# WHEN log_metric is called
# THEN dimensions should appear as dimenion=value
log_metric(
name="test_metric", unit=MetricUnit.Seconds, value=60, customer="abc", charge_id="123", namespace="DemoApp",
)
expected = "MONITORING|60|Seconds|test_metric|DemoApp|service=service_undefined,customer=abc,charge_id=123\n"
captured = capsys.readouterr()

assert captured.out == expected


@pytest.mark.parametrize(
"invalid_input,expected",
[
({"unit": "seconds"}, "MONITORING|0|Seconds|test_metric|DemoApp|service=service_undefined\n",),
(
{"unit": "Seconds", "customer": None, "charge_id": "123", "payment_status": ""},
"MONITORING|0|Seconds|test_metric|DemoApp|service=service_undefined,charge_id=123\n",
),
],
ids=["metric unit as string lower case", "empty dimension value"],
)
def test_log_metric_partially_correct_args(capsys, invalid_input, expected):
# GIVEN invalid arguments are provided such as empty dimension values and metric units in strings
# WHEN log_metric is called
# THEN default values should be used such as "Count" as a unit, invalid dimensions not included
# and no exception raised
log_metric(name="test_metric", namespace="DemoApp", **invalid_input)
captured = capsys.readouterr()

assert captured.out == expected


def test_package_logger(capsys):

set_package_logger()
@@ -315,32 +252,6 @@ def test_package_logger_format(stdout, capsys):
assert "test" in output["formatter"]


@pytest.mark.parametrize(
"invalid_input,expected",
[({"unit": "Blah"}, ValueError), ({"unit": None}, ValueError), ({}, TypeError)],
ids=["invalid metric unit as str", "unit as None", "missing required unit"],
)
def test_log_metric_invalid_unit(capsys, invalid_input, expected):
# GIVEN invalid units are provided
# WHEN log_metric is called
# THEN ValueError exception should be raised

with pytest.raises(expected):
log_metric(name="test_metric", namespace="DemoApp", **invalid_input)


def test_logger_setup_deprecated():
# Should be removed when GA
with pytest.raises(DeprecationWarning):
logger_setup()


def test_logger_inject_lambda_context_deprecated():
# Should be removed when GA
with pytest.raises(DeprecationWarning):
logger_inject_lambda_context()


def test_logger_append_duplicated(stdout):
logger = Logger(stream=stdout, request_id="value")
logger.structure_logs(append=True, request_id="new_value")
155 changes: 42 additions & 113 deletions tests/functional/test_metrics.py

Large diffs are not rendered by default.