From c4ea3cb16e7cf58752ae35b26287ef6288b81aa3 Mon Sep 17 00:00:00 2001 From: Timothy Pansino <11214426+TimPansino@users.noreply.github.com> Date: Tue, 24 Oct 2023 14:25:33 -0700 Subject: [PATCH 01/16] Bedrock Testing Infrastructure (#937) * Add AWS Bedrock testing infrastructure * Cache Package Version Lookups (#946) * Cache _get_package_version * Add Python 2.7 support to get_package_version caching * [Mega-Linter] Apply linters fixes * Bump tests --------- Co-authored-by: SlavaSkvortsov <29122694+SlavaSkvortsov@users.noreply.github.com> Co-authored-by: TimPansino * Fix Redis Generator Methods (#947) * Fix scan_iter for redis * Replace generator methods * Update instance info instrumentation * Remove mistake from uninstrumented methods * Add skip condition to asyncio generator tests * Add skip condition to asyncio generator tests --------- Co-authored-by: Lalleh Rafeei Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> * Automatic RPM System Updates (#948) * Checkout old action * Adding RPM action * Add dry run * Incorporating action into workflow * Wire secret into custom action * Enable action * Correct action name * Fix syntax * Fix quoting issues * Drop pre-verification. Does not work on python * Fix merge artifact * Remove OpenAI references --------- Co-authored-by: Uma Annamalai Co-authored-by: SlavaSkvortsov <29122694+SlavaSkvortsov@users.noreply.github.com> Co-authored-by: TimPansino Co-authored-by: Lalleh Rafeei Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- .github/actions/update-rpm-config/action.yml | 109 + .github/workflows/deploy-python.yml | 10 + .github/workflows/tests.yml | 1 - newrelic/common/package_version_utils.py | 41 +- newrelic/hooks/datastore_redis.py | 74 +- .../test_package_version_utils.py | 24 +- tests/datastore_redis/conftest.py | 1 + tests/datastore_redis/test_generators.py | 258 +++ .../test_uninstrumented_methods.py | 1 - .../_mock_external_bedrock_server.py | 2018 +++++++++++++++++ tests/mlmodel_bedrock/conftest.py | 136 ++ tests/mlmodel_bedrock/test_chat_completion.py | 40 + tox.ini | 3 + 13 files changed, 2672 insertions(+), 44 deletions(-) create mode 100644 .github/actions/update-rpm-config/action.yml create mode 100644 tests/datastore_redis/test_generators.py create mode 100644 tests/mlmodel_bedrock/_mock_external_bedrock_server.py create mode 100644 tests/mlmodel_bedrock/conftest.py create mode 100644 tests/mlmodel_bedrock/test_chat_completion.py diff --git a/.github/actions/update-rpm-config/action.yml b/.github/actions/update-rpm-config/action.yml new file mode 100644 index 0000000000..9d19ebba0b --- /dev/null +++ b/.github/actions/update-rpm-config/action.yml @@ -0,0 +1,109 @@ +name: "update-rpm-config" +description: "Set current version of agent in rpm config using API." +inputs: + agent-language: + description: "Language agent to configure (eg. python)" + required: true + default: "python" + target-system: + description: "Target System: prod|staging|all" + required: true + default: "all" + agent-version: + description: "3-4 digit agent version number (eg. 1.2.3) with optional leading v (ignored)" + required: true + dry-run: + description: "Dry Run" + required: true + default: "false" + production-api-key: + description: "API key for New Relic Production" + required: false + staging-api-key: + description: "API key for New Relic Staging" + required: false + +runs: + using: "composite" + steps: + - name: Trim potential leading v from agent version + shell: bash + run: | + AGENT_VERSION=${{ inputs.agent-version }} + echo "AGENT_VERSION=${AGENT_VERSION#"v"}" >> $GITHUB_ENV + + - name: Generate Payload + shell: bash + run: | + echo "PAYLOAD='{ \"system_configuration\": { \"key\": \"${{ inputs.agent-language }}_agent_version\", \"value\": \"${{ env.AGENT_VERSION }}\" } }'" >> $GITHUB_ENV + + - name: Generate Content-Type + shell: bash + run: | + echo "CONTENT_TYPE='Content-Type: application/json'" >> $GITHUB_ENV + + - name: Update Staging system configuration page + shell: bash + if: ${{ inputs.dry-run == 'false' && (inputs.target-system == 'staging' || inputs.target-system == 'all') }} + run: | + curl -X POST 'https://staging-api.newrelic.com/v2/system_configuration.json' \ + -H "X-Api-Key:${{ inputs.staging-api-key }}" -i \ + -H ${{ env.CONTENT_TYPE }} \ + -d ${{ env.PAYLOAD }} + + - name: Update Production system configuration page + shell: bash + if: ${{ inputs.dry-run == 'false' && (inputs.target-system == 'prod' || inputs.target-system == 'all') }} + run: | + curl -X POST 'https://api.newrelic.com/v2/system_configuration.json' \ + -H "X-Api-Key:${{ inputs.production-api-key }}" -i \ + -H ${{ env.CONTENT_TYPE }} \ + -d ${{ env.PAYLOAD }} + + - name: Verify Staging system configuration update + shell: bash + if: ${{ inputs.dry-run == 'false' && (inputs.target-system == 'staging' || inputs.target-system == 'all') }} + run: | + STAGING_VERSION=$(curl -X GET 'https://staging-api.newrelic.com/v2/system_configuration.json' \ + -H "X-Api-Key:${{ inputs.staging-api-key }}" \ + -H "${{ env.CONTENT_TYPE }}" | jq ".system_configurations | from_entries | .${{inputs.agent-language}}_agent_version") + + if [ "${{ env.AGENT_VERSION }}" != "$STAGING_VERSION" ]; then + echo "Staging version mismatch: $STAGING_VERSION" + exit 1 + fi + + - name: Verify Production system configuration update + shell: bash + if: ${{ inputs.dry-run == 'false' && (inputs.target-system == 'prod' || inputs.target-system == 'all') }} + run: | + PROD_VERSION=$(curl -X GET 'https://api.newrelic.com/v2/system_configuration.json' \ + -H "X-Api-Key:${{ inputs.production-api-key }}" \ + -H "${{ env.CONTENT_TYPE }}" | jq ".system_configurations | from_entries | .${{inputs.agent-language}}_agent_version") + + if [ "${{ env.AGENT_VERSION }}" != "$PROD_VERSION" ]; then + echo "Production version mismatch: $PROD_VERSION" + exit 1 + fi + + - name: (dry-run) Update Staging system configuration page + shell: bash + if: ${{ inputs.dry-run != 'false' && (inputs.target-system == 'staging' || inputs.target-system == 'all') }} + run: | + cat << EOF + curl -X POST 'https://staging-api.newrelic.com/v2/system_configuration.json' \ + -H "X-Api-Key:**REDACTED**" -i \ + -H ${{ env.CONTENT_TYPE }} \ + -d ${{ env.PAYLOAD }} + EOF + + - name: (dry-run) Update Production system configuration page + shell: bash + if: ${{ inputs.dry-run != 'false' && (inputs.target-system == 'prod' || inputs.target-system == 'all') }} + run: | + cat << EOF + curl -X POST 'https://api.newrelic.com/v2/system_configuration.json' \ + -H "X-Api-Key:**REDACTED**" -i \ + -H ${{ env.CONTENT_TYPE }} \ + -d ${{ env.PAYLOAD }} + EOF diff --git a/.github/workflows/deploy-python.yml b/.github/workflows/deploy-python.yml index fe16ee4854..ca908b8250 100644 --- a/.github/workflows/deploy-python.yml +++ b/.github/workflows/deploy-python.yml @@ -80,3 +80,13 @@ jobs: env: TWINE_USERNAME: __token__ TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }} + + - name: Update RPM Config + uses: ./.github/actions/update-rpm-config + with: + agent-language: "python" + target-system: "all" + agent-version: "${{ github.ref_name }}" + dry-run: "false" + production-api-key: ${{ secrets.NEW_RELIC_API_KEY_PRODUCTION }}" + staging-api-key: ${{ secrets.NEW_RELIC_API_KEY_STAGING }}" diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e3b264a9fc..402d0c629c 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -62,7 +62,6 @@ jobs: steps: - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 with: python-version: "3.10" diff --git a/newrelic/common/package_version_utils.py b/newrelic/common/package_version_utils.py index 3152342b4d..68320b897f 100644 --- a/newrelic/common/package_version_utils.py +++ b/newrelic/common/package_version_utils.py @@ -14,6 +14,44 @@ import sys +try: + from functools import cache as _cache_package_versions +except ImportError: + from functools import wraps + from threading import Lock + + _package_version_cache = {} + _package_version_cache_lock = Lock() + + def _cache_package_versions(wrapped): + """ + Threadsafe implementation of caching for _get_package_version. + + Python 2.7 does not have the @functools.cache decorator, and + must be reimplemented with support for clearing the cache. + """ + + @wraps(wrapped) + def _wrapper(name): + if name in _package_version_cache: + return _package_version_cache[name] + + with _package_version_cache_lock: + if name in _package_version_cache: + return _package_version_cache[name] + + version = _package_version_cache[name] = wrapped(name) + return version + + def cache_clear(): + """Cache clear function to mimic @functools.cache""" + with _package_version_cache_lock: + _package_version_cache.clear() + + _wrapper.cache_clear = cache_clear + return _wrapper + + # Need to account for 4 possible variations of version declaration specified in (rejected) PEP 396 VERSION_ATTRS = ("__version__", "version", "__version_tuple__", "version_tuple") # nosec NULL_VERSIONS = frozenset((None, "", "0", "0.0", "0.0.0", "0.0.0.0", (0,), (0, 0), (0, 0, 0), (0, 0, 0, 0))) # nosec @@ -67,6 +105,7 @@ def int_or_str(value): return version +@_cache_package_versions def _get_package_version(name): module = sys.modules.get(name, None) version = None @@ -75,7 +114,7 @@ def _get_package_version(name): if "importlib" in sys.modules and hasattr(sys.modules["importlib"], "metadata"): try: # In Python3.10+ packages_distribution can be checked for as well - if hasattr(sys.modules["importlib"].metadata, "packages_distributions"): # pylint: disable=E1101 + if hasattr(sys.modules["importlib"].metadata, "packages_distributions"): # pylint: disable=E1101 distributions = sys.modules["importlib"].metadata.packages_distributions() # pylint: disable=E1101 distribution_name = distributions.get(name, name) else: diff --git a/newrelic/hooks/datastore_redis.py b/newrelic/hooks/datastore_redis.py index 6ba1920029..bbc517586d 100644 --- a/newrelic/hooks/datastore_redis.py +++ b/newrelic/hooks/datastore_redis.py @@ -14,10 +14,11 @@ import re -from newrelic.api.datastore_trace import DatastoreTrace +from newrelic.api.datastore_trace import DatastoreTrace, DatastoreTraceWrapper, wrap_datastore_trace from newrelic.api.time_trace import current_trace from newrelic.api.transaction import current_transaction -from newrelic.common.object_wrapper import function_wrapper, wrap_function_wrapper +from newrelic.common.object_wrapper import wrap_function_wrapper +from newrelic.common.async_wrapper import coroutine_wrapper, async_generator_wrapper, generator_wrapper _redis_client_sync_methods = { "acl_dryrun", @@ -136,6 +137,7 @@ "client_no_evict", "client_pause", "client_reply", + "client_setinfo", "client_setname", "client_tracking", "client_trackinginfo", @@ -162,7 +164,6 @@ "cluster_reset", "cluster_save_config", "cluster_set_config_epoch", - "client_setinfo", "cluster_setslot", "cluster_slaves", "cluster_slots", @@ -248,7 +249,7 @@ "hmset_dict", "hmset", "hrandfield", - "hscan_inter", + "hscan_iter", "hscan", "hset", "hsetnx", @@ -399,8 +400,8 @@ "syndump", "synupdate", "tagvals", - "tfcall", "tfcall_async", + "tfcall", "tfunction_delete", "tfunction_list", "tfunction_load", @@ -473,6 +474,13 @@ "zunionstore", } +_redis_client_gen_methods = { + "scan_iter", + "hscan_iter", + "sscan_iter", + "zscan_iter", +} + _redis_client_methods = _redis_client_sync_methods.union(_redis_client_async_methods) _redis_multipart_commands = set(["client", "cluster", "command", "config", "debug", "sentinel", "slowlog", "script"]) @@ -498,50 +506,31 @@ def _instance_info(kwargs): def _wrap_Redis_method_wrapper_(module, instance_class_name, operation): - def _nr_wrapper_Redis_method_(wrapped, instance, args, kwargs): - transaction = current_transaction() - - if transaction is None: - return wrapped(*args, **kwargs) - - dt = DatastoreTrace(product="Redis", target=None, operation=operation, source=wrapped) - - transaction._nr_datastore_instance_info = (None, None, None) - - with dt: - result = wrapped(*args, **kwargs) - - host, port_path_or_id, db = transaction._nr_datastore_instance_info - dt.host = host - dt.port_path_or_id = port_path_or_id - dt.database_name = db - - return result - name = "%s.%s" % (instance_class_name, operation) - wrap_function_wrapper(module, name, _nr_wrapper_Redis_method_) + if operation in _redis_client_gen_methods: + async_wrapper = generator_wrapper + else: + async_wrapper = None + wrap_datastore_trace(module, name, product="Redis", target=None, operation=operation, async_wrapper=async_wrapper) -def _wrap_asyncio_Redis_method_wrapper(module, instance_class_name, operation): - @function_wrapper - async def _nr_wrapper_asyncio_Redis_async_method_(wrapped, instance, args, kwargs): - transaction = current_transaction() - if transaction is None: - return await wrapped(*args, **kwargs) - - with DatastoreTrace(product="Redis", target=None, operation=operation): - return await wrapped(*args, **kwargs) +def _wrap_asyncio_Redis_method_wrapper(module, instance_class_name, operation): def _nr_wrapper_asyncio_Redis_method_(wrapped, instance, args, kwargs): from redis.asyncio.client import Pipeline if isinstance(instance, Pipeline): return wrapped(*args, **kwargs) - # Method should be run when awaited, therefore we wrap in an async wrapper. - return _nr_wrapper_asyncio_Redis_async_method_(wrapped)(*args, **kwargs) + # Method should be run when awaited or iterated, therefore we wrap in an async wrapper. + return DatastoreTraceWrapper(wrapped, product="Redis", target=None, operation=operation, async_wrapper=async_wrapper)(*args, **kwargs) name = "%s.%s" % (instance_class_name, operation) + if operation in _redis_client_gen_methods: + async_wrapper = async_generator_wrapper + else: + async_wrapper = coroutine_wrapper + wrap_function_wrapper(module, name, _nr_wrapper_asyncio_Redis_method_) @@ -614,7 +603,15 @@ def _nr_Connection_send_command_wrapper_(wrapped, instance, args, kwargs): except: pass - transaction._nr_datastore_instance_info = (host, port_path_or_id, db) + # Find DatastoreTrace no matter how many other traces are inbetween + trace = current_trace() + while trace is not None and not isinstance(trace, DatastoreTrace): + trace = getattr(trace, "parent", None) + + if trace is not None: + trace.host = host + trace.port_path_or_id = port_path_or_id + trace.database_name = db # Older Redis clients would when sending multi part commands pass # them in as separate arguments to send_command(). Need to therefore @@ -666,7 +663,6 @@ def instrument_asyncio_redis_client(module): if hasattr(class_, operation): _wrap_asyncio_Redis_method_wrapper(module, "Redis", operation) - def instrument_redis_commands_core(module): _instrument_redis_commands_module(module, "CoreCommands") diff --git a/tests/agent_unittests/test_package_version_utils.py b/tests/agent_unittests/test_package_version_utils.py index 30c22cff18..5ed689ea2a 100644 --- a/tests/agent_unittests/test_package_version_utils.py +++ b/tests/agent_unittests/test_package_version_utils.py @@ -20,6 +20,7 @@ from newrelic.common.package_version_utils import ( NULL_VERSIONS, VERSION_ATTRS, + _get_package_version, get_package_version, get_package_version_tuple, ) @@ -31,7 +32,7 @@ # such as distribution_packages and removed pkg_resources. IS_PY38_PLUS = sys.version_info[:2] >= (3, 8) -IS_PY310_PLUS = sys.version_info[:2] >= (3,10) +IS_PY310_PLUS = sys.version_info[:2] >= (3, 10) SKIP_IF_NOT_IMPORTLIB_METADATA = pytest.mark.skipif(not IS_PY38_PLUS, reason="importlib.metadata is not supported.") SKIP_IF_IMPORTLIB_METADATA = pytest.mark.skipif( IS_PY38_PLUS, reason="importlib.metadata is preferred over pkg_resources." @@ -46,7 +47,13 @@ def patched_pytest_module(monkeypatch): monkeypatch.delattr(pytest, attr) yield pytest - + + +@pytest.fixture(scope="function", autouse=True) +def cleared_package_version_cache(): + """Ensure cache is empty before every test to exercise code paths.""" + _get_package_version.cache_clear() + # This test only works on Python 3.7 @SKIP_IF_IMPORTLIB_METADATA @@ -123,3 +130,16 @@ def test_mapping_import_to_distribution_packages(): def test_pkg_resources_metadata(): version = get_package_version("pytest") assert version not in NULL_VERSIONS, version + + +def test_version_caching(monkeypatch): + # Add fake module to be deleted later + sys.modules["mymodule"] = sys.modules["pytest"] + setattr(pytest, "__version__", "1.0.0") + version = get_package_version("mymodule") + assert version not in NULL_VERSIONS, version + + # Ensure after deleting that the call to _get_package_version still completes because of caching + del sys.modules["mymodule"] + version = get_package_version("mymodule") + assert version not in NULL_VERSIONS, version diff --git a/tests/datastore_redis/conftest.py b/tests/datastore_redis/conftest.py index 53ff2658de..6747039b47 100644 --- a/tests/datastore_redis/conftest.py +++ b/tests/datastore_redis/conftest.py @@ -15,6 +15,7 @@ import pytest from testing_support.fixtures import collector_agent_registration_fixture, collector_available_fixture # noqa: F401; pylint: disable=W0611 +from testing_support.fixture.event_loop import event_loop as loop # noqa: F401; pylint: disable=W0611 _default_settings = { diff --git a/tests/datastore_redis/test_generators.py b/tests/datastore_redis/test_generators.py new file mode 100644 index 0000000000..f747838e19 --- /dev/null +++ b/tests/datastore_redis/test_generators.py @@ -0,0 +1,258 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import redis +from testing_support.db_settings import redis_settings +from testing_support.fixtures import override_application_settings +from testing_support.util import instance_hostname +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.api.datastore_trace import DatastoreTrace +from newrelic.api.time_trace import current_trace +from newrelic.common.package_version_utils import get_package_version_tuple + +DB_SETTINGS = redis_settings()[0] +REDIS_PY_VERSION = get_package_version_tuple("redis") + +# Settings + +_enable_instance_settings = { + "datastore_tracer.instance_reporting.enabled": True, +} +_disable_instance_settings = { + "datastore_tracer.instance_reporting.enabled": False, +} + +# Metrics + +_base_scoped_metrics = ( + ("Datastore/operation/Redis/scan_iter", 1), + ("Datastore/operation/Redis/sscan_iter", 1), + ("Datastore/operation/Redis/zscan_iter", 1), + ("Datastore/operation/Redis/hscan_iter", 1), + ("Datastore/operation/Redis/set", 1), + ("Datastore/operation/Redis/sadd", 1), + ("Datastore/operation/Redis/zadd", 1), + ("Datastore/operation/Redis/hset", 1), +) + +_base_rollup_metrics = ( + ("Datastore/all", 8), + ("Datastore/allOther", 8), + ("Datastore/Redis/all", 8), + ("Datastore/Redis/allOther", 8), + ("Datastore/operation/Redis/scan_iter", 1), + ("Datastore/operation/Redis/sscan_iter", 1), + ("Datastore/operation/Redis/zscan_iter", 1), + ("Datastore/operation/Redis/hscan_iter", 1), + ("Datastore/operation/Redis/set", 1), + ("Datastore/operation/Redis/sadd", 1), + ("Datastore/operation/Redis/zadd", 1), + ("Datastore/operation/Redis/hset", 1), +) + +_disable_rollup_metrics = list(_base_rollup_metrics) +_enable_rollup_metrics = list(_base_rollup_metrics) + +_host = instance_hostname(DB_SETTINGS["host"]) +_port = DB_SETTINGS["port"] + +_instance_metric_name = "Datastore/instance/Redis/%s/%s" % (_host, _port) + +_enable_rollup_metrics.append((_instance_metric_name, 8)) + +_disable_rollup_metrics.append((_instance_metric_name, None)) + +# Operations + + +def exercise_redis(client): + """ + Exercise client generators by iterating on various methods and ensuring they are + non-empty, and that traces are started and stopped with the generator. + """ + + # Set existing values + client.set("scan-key", "value") + client.sadd("sscan-key", "value") + client.zadd("zscan-key", {"value": 1}) + client.hset("hscan-key", "field", "value") + + # Check generators + flag = False + assert not isinstance(current_trace(), DatastoreTrace) # Assert no active DatastoreTrace + for k in client.scan_iter("scan-*"): + assert k == b"scan-key" + assert isinstance(current_trace(), DatastoreTrace) # Assert DatastoreTrace now active + flag = True + assert flag + + flag = False + assert not isinstance(current_trace(), DatastoreTrace) # Assert no active DatastoreTrace + for k in client.sscan_iter("sscan-key"): + assert k == b"value" + assert isinstance(current_trace(), DatastoreTrace) # Assert DatastoreTrace now active + flag = True + assert flag + + flag = False + assert not isinstance(current_trace(), DatastoreTrace) # Assert no active DatastoreTrace + for k, _ in client.zscan_iter("zscan-key"): + assert k == b"value" + assert isinstance(current_trace(), DatastoreTrace) # Assert DatastoreTrace now active + flag = True + assert flag + + flag = False + assert not isinstance(current_trace(), DatastoreTrace) # Assert no active DatastoreTrace + for f, v in client.hscan_iter("hscan-key"): + assert f == b"field" + assert v == b"value" + assert isinstance(current_trace(), DatastoreTrace) # Assert DatastoreTrace now active + flag = True + assert flag + + +async def exercise_redis_async(client): + """ + Exercise client generators by iterating on various methods and ensuring they are + non-empty, and that traces are started and stopped with the generator. + """ + + # Set existing values + await client.set("scan-key", "value") + await client.sadd("sscan-key", "value") + await client.zadd("zscan-key", {"value": 1}) + await client.hset("hscan-key", "field", "value") + + # Check generators + flag = False + assert not isinstance(current_trace(), DatastoreTrace) # Assert no active DatastoreTrace + async for k in client.scan_iter("scan-*"): + assert k == b"scan-key" + assert isinstance(current_trace(), DatastoreTrace) # Assert DatastoreTrace now active + flag = True + assert flag + + flag = False + assert not isinstance(current_trace(), DatastoreTrace) # Assert no active DatastoreTrace + async for k in client.sscan_iter("sscan-key"): + assert k == b"value" + assert isinstance(current_trace(), DatastoreTrace) # Assert DatastoreTrace now active + flag = True + assert flag + + flag = False + assert not isinstance(current_trace(), DatastoreTrace) # Assert no active DatastoreTrace + async for k, _ in client.zscan_iter("zscan-key"): + assert k == b"value" + assert isinstance(current_trace(), DatastoreTrace) # Assert DatastoreTrace now active + flag = True + assert flag + + flag = False + assert not isinstance(current_trace(), DatastoreTrace) # Assert no active DatastoreTrace + async for f, v in client.hscan_iter("hscan-key"): + assert f == b"field" + assert v == b"value" + assert isinstance(current_trace(), DatastoreTrace) # Assert DatastoreTrace now active + flag = True + assert flag + + +# Tests + + +@override_application_settings(_enable_instance_settings) +@validate_transaction_metrics( + "test_generators:test_strict_redis_generator_enable_instance", + scoped_metrics=_base_scoped_metrics, + rollup_metrics=_enable_rollup_metrics, + background_task=True, +) +@background_task() +def test_strict_redis_generator_enable_instance(): + client = redis.StrictRedis(host=DB_SETTINGS["host"], port=DB_SETTINGS["port"], db=0) + exercise_redis(client) + + +@override_application_settings(_disable_instance_settings) +@validate_transaction_metrics( + "test_generators:test_strict_redis_generator_disable_instance", + scoped_metrics=_base_scoped_metrics, + rollup_metrics=_disable_rollup_metrics, + background_task=True, +) +@background_task() +def test_strict_redis_generator_disable_instance(): + client = redis.StrictRedis(host=DB_SETTINGS["host"], port=DB_SETTINGS["port"], db=0) + exercise_redis(client) + + +@override_application_settings(_enable_instance_settings) +@validate_transaction_metrics( + "test_generators:test_redis_generator_enable_instance", + scoped_metrics=_base_scoped_metrics, + rollup_metrics=_enable_rollup_metrics, + background_task=True, +) +@background_task() +def test_redis_generator_enable_instance(): + client = redis.Redis(host=DB_SETTINGS["host"], port=DB_SETTINGS["port"], db=0) + exercise_redis(client) + + +@override_application_settings(_disable_instance_settings) +@validate_transaction_metrics( + "test_generators:test_redis_generator_disable_instance", + scoped_metrics=_base_scoped_metrics, + rollup_metrics=_disable_rollup_metrics, + background_task=True, +) +@background_task() +def test_redis_generator_disable_instance(): + client = redis.Redis(host=DB_SETTINGS["host"], port=DB_SETTINGS["port"], db=0) + exercise_redis(client) + + +@pytest.mark.skipif(REDIS_PY_VERSION < (4, 2), reason="Redis.asyncio was not added until v4.2") +@override_application_settings(_enable_instance_settings) +@validate_transaction_metrics( + "test_generators:test_redis_async_generator_enable_instance", + scoped_metrics=_base_scoped_metrics, + rollup_metrics=_enable_rollup_metrics, + background_task=True, +) +@background_task() +def test_redis_async_generator_enable_instance(loop): + client = redis.asyncio.Redis(host=DB_SETTINGS["host"], port=DB_SETTINGS["port"], db=0) + loop.run_until_complete(exercise_redis_async(client)) + + +@pytest.mark.skipif(REDIS_PY_VERSION < (4, 2), reason="Redis.asyncio was not added until v4.2") +@override_application_settings(_disable_instance_settings) +@validate_transaction_metrics( + "test_generators:test_redis_async_generator_disable_instance", + scoped_metrics=_base_scoped_metrics, + rollup_metrics=_disable_rollup_metrics, + background_task=True, +) +@background_task() +def test_redis_async_generator_disable_instance(loop): + client = redis.asyncio.Redis(host=DB_SETTINGS["host"], port=DB_SETTINGS["port"], db=0) + loop.run_until_complete(exercise_redis_async(client)) diff --git a/tests/datastore_redis/test_uninstrumented_methods.py b/tests/datastore_redis/test_uninstrumented_methods.py index d86f4de955..c0be684b2f 100644 --- a/tests/datastore_redis/test_uninstrumented_methods.py +++ b/tests/datastore_redis/test_uninstrumented_methods.py @@ -65,7 +65,6 @@ "get_property", "get_relation", "get_retry", - "hscan_iter", "index_name", "labels", "list_keys", diff --git a/tests/mlmodel_bedrock/_mock_external_bedrock_server.py b/tests/mlmodel_bedrock/_mock_external_bedrock_server.py new file mode 100644 index 0000000000..3d200449bf --- /dev/null +++ b/tests/mlmodel_bedrock/_mock_external_bedrock_server.py @@ -0,0 +1,2018 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import re + +from testing_support.mock_external_http_server import MockExternalHTTPServer + +# This defines an external server test apps can make requests to instead of +# the real Bedrock backend. This provides 3 features: +# +# 1) This removes dependencies on external websites. +# 2) Provides a better mechanism for making an external call in a test app than +# simple calling another endpoint the test app makes available because this +# server will not be instrumented meaning we don't have to sort through +# transactions to separate the ones created in the test app and the ones +# created by an external call. +# 3) This app runs on a separate thread meaning it won't block the test app. + +RESPONSES = { + "amazon.titan-text-express-v1::Command: Write me a blog about making strong business decisions as a leader.": [ + {}, + { + "inputTextTokenCount": 19, + "results": [ + { + "tokenCount": 128, + "outputText": " Making strong business decisions as a leader requires a combination of strategic thinking, data analysis, and intuition. Here are some tips to help you make informed and effective decisions:\nDefine your goals and vision: Clearly understand your organization's goals and vision, and ensure that all decision-making aligns with these objectives. This will provide a roadmap for your decisions and help you stay focused on the bigger picture.\nGather relevant data and information: Collect and analyze data related to the decision you need to make. Consider multiple sources of information, such as market trends, financial reports, and stakeholder feedback. Use data to inform your decision-making process", + "completionReason": "LENGTH", + } + ], + }, + ], + "anthropic.claude-instant-v1::Human: Write me a blog about making strong business decisions as a leader.": [ + {}, + { + "completion": " Here is a draft blog post on making strong business decisions as a leader:\n\nTitle: 5 Tips for Making Strong Business Decisions as a Leader\n\nBeing a leader means that tough business decisions will inevitably land on your desk. How you handle those decisions can have a huge impact on your company's success. Here are some tips to help you make strong, informed choices that move your business in the right direction.\n\n1. Gather all relevant data. Don't make a call until you've examined the issue from every angle. Seek out useful metrics, get feedback from various stakeholders, and look at historical trends and precedents. The more data you have, the clearer the right path will become. \n\n2. Consider both short and long-term implications. While it's important to address immediate needs, don't lose sight of how a decision may impact the future. Will your choice simply solve today's problem or help build sustainable growth? Carefully weigh short-term gains against potential long-term consequences.\n\n3. Trust your instincts but don't decide alone. Your gut feelings are valuable, but they shouldn't be the sole basis for a leadership decision. Consult with your management team and get differing perspectives. Encourage respectful debate to surface any risks or uncertainties that need discussion. \n\n4. Be willing todelaya decisionif youneed moretime.There'snobenefittomakingarushjudgement beforeallfactorshavebeenweighed.It'sbettertoletyourdecision\"bake\"athirdopinionormoredataratherthanpotentiallyregrettingahastycalllater.\n\n5. Follow through on the outcome. A good decision means little without effective implementation. Clearly communicate the rationale for your choice and gain organizational buy-in. Then follow up to ensure your solution is executed properly and intended goals are achieved. Are any adjustments needed along the way? \n\nLeaders are entrusted to make the calls that steer a business. With care, research and an open yet discerning approach, you can make decisions that propel your company confidently into the future.", + "stop_reason": "stop_sequence", + }, + ], + "ai21.j2-mid-v1::Write me a blog about making strong business decisions as a leader.": [ + {}, + { + "id": 1234, + "prompt": { + "text": "Write me a blog about making strong business decisions as a leader.", + "tokens": [ + { + "generatedToken": { + "token": "\u2581Write", + "logprob": -10.650314331054688, + "raw_logprob": -10.650314331054688, + }, + "topTokens": None, + "textRange": {"start": 0, "end": 5}, + }, + { + "generatedToken": { + "token": "\u2581me", + "logprob": -5.457987308502197, + "raw_logprob": -5.457987308502197, + }, + "topTokens": None, + "textRange": {"start": 5, "end": 8}, + }, + { + "generatedToken": { + "token": "\u2581a\u2581blog", + "logprob": -8.36896800994873, + "raw_logprob": -8.36896800994873, + }, + "topTokens": None, + "textRange": {"start": 8, "end": 15}, + }, + { + "generatedToken": { + "token": "\u2581about\u2581making", + "logprob": -14.223419189453125, + "raw_logprob": -14.223419189453125, + }, + "topTokens": None, + "textRange": {"start": 15, "end": 28}, + }, + { + "generatedToken": { + "token": "\u2581strong", + "logprob": -9.367725372314453, + "raw_logprob": -9.367725372314453, + }, + "topTokens": None, + "textRange": {"start": 28, "end": 35}, + }, + { + "generatedToken": { + "token": "\u2581business\u2581decisions", + "logprob": -7.66295862197876, + "raw_logprob": -7.66295862197876, + }, + "topTokens": None, + "textRange": {"start": 35, "end": 54}, + }, + { + "generatedToken": { + "token": "\u2581as\u2581a\u2581leader", + "logprob": -13.765915870666504, + "raw_logprob": -13.765915870666504, + }, + "topTokens": None, + "textRange": {"start": 54, "end": 66}, + }, + { + "generatedToken": { + "token": ".", + "logprob": -10.953210830688477, + "raw_logprob": -10.953210830688477, + }, + "topTokens": None, + "textRange": {"start": 66, "end": 67}, + }, + ], + }, + "completions": [ + { + "data": { + "text": "\nWhen you are a leader at work, you need to make timely and informed decisions on behalf of your team or company. You have to consider multiple factors and variables, and analyze data in a way to make the best possible choice.\n\nHowever, sometimes things don't turn out the way you intended. Your decision might not work as intended, or act in unforeseen ways. Or, you might find new information or context that causes you to question your decision. That's okay.\n\nIt's important to have courage when you're a leader. This means being willing to think critically, reflect, learn from mistakes, and take action steps moving forward.\n\nThere are three steps that can help you grow as a leader and make better business decisions:\n\nStep 1: Gather information\n\nThe first step to making a good decision is to make sure that you have all of the facts. It's important to know what information you need, and from where to get it.\n\nYou can gather information by doing things like reading reports, talking to stakeholders, and conducting research.\n\nStep 2: Analyze information\n\nOnce you've gathered all of your information, you need to take some time to think about it. You need to analyze the data and identify patterns, trends, and trends that might not be immediately obvious.\n\nThere are a few things you should keep in mind when you're analyzing information:\n\n* Identify the key points: What are the key takeaways from this information? What", + "tokens": [ + { + "generatedToken": { + "token": "<|newline|>", + "logprob": -0.00011955977242905647, + "raw_logprob": -0.00011955977242905647, + }, + "topTokens": None, + "textRange": {"start": 0, "end": 1}, + }, + { + "generatedToken": { + "token": "\u2581When\u2581you\u2581are", + "logprob": -6.066172122955322, + "raw_logprob": -6.066172122955322, + }, + "topTokens": None, + "textRange": {"start": 1, "end": 13}, + }, + { + "generatedToken": { + "token": "\u2581a\u2581leader", + "logprob": -0.8404027223587036, + "raw_logprob": -0.8404027223587036, + }, + "topTokens": None, + "textRange": {"start": 13, "end": 22}, + }, + { + "generatedToken": { + "token": "\u2581at\u2581work", + "logprob": -8.004234313964844, + "raw_logprob": -8.004234313964844, + }, + "topTokens": None, + "textRange": {"start": 22, "end": 30}, + }, + { + "generatedToken": { + "token": ",", + "logprob": -0.07083408534526825, + "raw_logprob": -0.07083408534526825, + }, + "topTokens": None, + "textRange": {"start": 30, "end": 31}, + }, + { + "generatedToken": { + "token": "\u2581you\u2581need\u2581to\u2581make", + "logprob": -2.5708985328674316, + "raw_logprob": -2.5708985328674316, + }, + "topTokens": None, + "textRange": {"start": 31, "end": 48}, + }, + { + "generatedToken": { + "token": "\u2581timely", + "logprob": -9.624330520629883, + "raw_logprob": -9.624330520629883, + }, + "topTokens": None, + "textRange": {"start": 48, "end": 55}, + }, + { + "generatedToken": { + "token": "\u2581and", + "logprob": -1.5508010387420654, + "raw_logprob": -1.5508010387420654, + }, + "topTokens": None, + "textRange": {"start": 55, "end": 59}, + }, + { + "generatedToken": { + "token": "\u2581informed\u2581decisions", + "logprob": -0.5989360809326172, + "raw_logprob": -0.5989360809326172, + }, + "topTokens": None, + "textRange": {"start": 59, "end": 78}, + }, + { + "generatedToken": { + "token": "\u2581on\u2581behalf\u2581of", + "logprob": -5.749756336212158, + "raw_logprob": -5.749756336212158, + }, + "topTokens": None, + "textRange": {"start": 78, "end": 91}, + }, + { + "generatedToken": { + "token": "\u2581your\u2581team", + "logprob": -0.29448866844177246, + "raw_logprob": -0.29448866844177246, + }, + "topTokens": None, + "textRange": {"start": 91, "end": 101}, + }, + { + "generatedToken": { + "token": "\u2581or", + "logprob": -2.9078853130340576, + "raw_logprob": -2.9078853130340576, + }, + "topTokens": None, + "textRange": {"start": 101, "end": 104}, + }, + { + "generatedToken": { + "token": "\u2581company", + "logprob": -0.4439607262611389, + "raw_logprob": -0.4439607262611389, + }, + "topTokens": None, + "textRange": {"start": 104, "end": 112}, + }, + { + "generatedToken": { + "token": ".", + "logprob": -0.004392143338918686, + "raw_logprob": -0.004392143338918686, + }, + "topTokens": None, + "textRange": {"start": 112, "end": 113}, + }, + { + "generatedToken": { + "token": "\u2581You\u2581have", + "logprob": -6.982149600982666, + "raw_logprob": -6.982149600982666, + }, + "topTokens": None, + "textRange": {"start": 113, "end": 122}, + }, + { + "generatedToken": { + "token": "\u2581to\u2581consider", + "logprob": -2.413727283477783, + "raw_logprob": -2.413727283477783, + }, + "topTokens": None, + "textRange": {"start": 122, "end": 134}, + }, + { + "generatedToken": { + "token": "\u2581multiple", + "logprob": -2.61666202545166, + "raw_logprob": -2.61666202545166, + }, + "topTokens": None, + "textRange": {"start": 134, "end": 143}, + }, + { + "generatedToken": { + "token": "\u2581factors", + "logprob": -0.11320021003484726, + "raw_logprob": -0.11320021003484726, + }, + "topTokens": None, + "textRange": {"start": 143, "end": 151}, + }, + { + "generatedToken": { + "token": "\u2581and", + "logprob": -1.4593441486358643, + "raw_logprob": -1.4593441486358643, + }, + "topTokens": None, + "textRange": {"start": 151, "end": 155}, + }, + { + "generatedToken": { + "token": "\u2581variables", + "logprob": -2.3700382709503174, + "raw_logprob": -2.3700382709503174, + }, + "topTokens": None, + "textRange": {"start": 155, "end": 165}, + }, + { + "generatedToken": { + "token": ",", + "logprob": -0.9362450838088989, + "raw_logprob": -0.9362450838088989, + }, + "topTokens": None, + "textRange": {"start": 165, "end": 166}, + }, + { + "generatedToken": { + "token": "\u2581and\u2581analyze", + "logprob": -7.707818031311035, + "raw_logprob": -7.707818031311035, + }, + "topTokens": None, + "textRange": {"start": 166, "end": 178}, + }, + { + "generatedToken": { + "token": "\u2581data\u2581in", + "logprob": -7.114713668823242, + "raw_logprob": -7.114713668823242, + }, + "topTokens": None, + "textRange": {"start": 178, "end": 186}, + }, + { + "generatedToken": { + "token": "\u2581a\u2581way\u2581to\u2581make", + "logprob": -2.1352782249450684, + "raw_logprob": -2.1352782249450684, + }, + "topTokens": None, + "textRange": {"start": 186, "end": 200}, + }, + { + "generatedToken": { + "token": "\u2581the\u2581best\u2581possible", + "logprob": -1.202060341835022, + "raw_logprob": -1.202060341835022, + }, + "topTokens": None, + "textRange": {"start": 200, "end": 218}, + }, + { + "generatedToken": { + "token": "\u2581choice", + "logprob": -0.49673229455947876, + "raw_logprob": -0.49673229455947876, + }, + "topTokens": None, + "textRange": {"start": 218, "end": 225}, + }, + { + "generatedToken": { + "token": ".", + "logprob": -0.08440639078617096, + "raw_logprob": -0.08440639078617096, + }, + "topTokens": None, + "textRange": {"start": 225, "end": 226}, + }, + { + "generatedToken": { + "token": "<|newline|>", + "logprob": -1.4274420738220215, + "raw_logprob": -1.4274420738220215, + }, + "topTokens": None, + "textRange": {"start": 226, "end": 227}, + }, + { + "generatedToken": { + "token": "<|newline|>", + "logprob": -0.002755180699750781, + "raw_logprob": -0.002755180699750781, + }, + "topTokens": None, + "textRange": {"start": 227, "end": 228}, + }, + { + "generatedToken": { + "token": "\u2581However", + "logprob": -2.9974615573883057, + "raw_logprob": -2.9974615573883057, + }, + "topTokens": None, + "textRange": {"start": 228, "end": 235}, + }, + { + "generatedToken": { + "token": ",", + "logprob": -0.0017327546374872327, + "raw_logprob": -0.0017327546374872327, + }, + "topTokens": None, + "textRange": {"start": 235, "end": 236}, + }, + { + "generatedToken": { + "token": "\u2581sometimes", + "logprob": -2.893026113510132, + "raw_logprob": -2.893026113510132, + }, + "topTokens": None, + "textRange": {"start": 236, "end": 246}, + }, + { + "generatedToken": { + "token": "\u2581things", + "logprob": -4.238265037536621, + "raw_logprob": -4.238265037536621, + }, + "topTokens": None, + "textRange": {"start": 246, "end": 253}, + }, + { + "generatedToken": { + "token": "\u2581don't", + "logprob": -2.367069721221924, + "raw_logprob": -2.367069721221924, + }, + "topTokens": None, + "textRange": {"start": 253, "end": 259}, + }, + { + "generatedToken": { + "token": "\u2581turn\u2581out", + "logprob": -1.7048457860946655, + "raw_logprob": -1.7048457860946655, + }, + "topTokens": None, + "textRange": {"start": 259, "end": 268}, + }, + { + "generatedToken": { + "token": "\u2581the\u2581way\u2581you", + "logprob": -2.1934995651245117, + "raw_logprob": -2.1934995651245117, + }, + "topTokens": None, + "textRange": {"start": 268, "end": 280}, + }, + { + "generatedToken": { + "token": "\u2581intended", + "logprob": -3.7538819313049316, + "raw_logprob": -3.7538819313049316, + }, + "topTokens": None, + "textRange": {"start": 280, "end": 289}, + }, + { + "generatedToken": { + "token": ".", + "logprob": -0.41568616032600403, + "raw_logprob": -0.41568616032600403, + }, + "topTokens": None, + "textRange": {"start": 289, "end": 290}, + }, + { + "generatedToken": { + "token": "\u2581Your", + "logprob": -4.143064498901367, + "raw_logprob": -4.143064498901367, + }, + "topTokens": None, + "textRange": {"start": 290, "end": 295}, + }, + { + "generatedToken": { + "token": "\u2581decision", + "logprob": -1.1384129524230957, + "raw_logprob": -1.1384129524230957, + }, + "topTokens": None, + "textRange": {"start": 295, "end": 304}, + }, + { + "generatedToken": { + "token": "\u2581might\u2581not\u2581work", + "logprob": -2.4380242824554443, + "raw_logprob": -2.4380242824554443, + }, + "topTokens": None, + "textRange": {"start": 304, "end": 319}, + }, + { + "generatedToken": { + "token": "\u2581as\u2581intended", + "logprob": -2.9615366458892822, + "raw_logprob": -2.9615366458892822, + }, + "topTokens": None, + "textRange": {"start": 319, "end": 331}, + }, + { + "generatedToken": { + "token": ",", + "logprob": -0.22413745522499084, + "raw_logprob": -0.22413745522499084, + }, + "topTokens": None, + "textRange": {"start": 331, "end": 332}, + }, + { + "generatedToken": { + "token": "\u2581or", + "logprob": -0.4422154128551483, + "raw_logprob": -0.4422154128551483, + }, + "topTokens": None, + "textRange": {"start": 332, "end": 335}, + }, + { + "generatedToken": { + "token": "\u2581act\u2581in", + "logprob": -16.771242141723633, + "raw_logprob": -16.771242141723633, + }, + "topTokens": None, + "textRange": {"start": 335, "end": 342}, + }, + { + "generatedToken": { + "token": "\u2581unforeseen", + "logprob": -2.0343406200408936, + "raw_logprob": -2.0343406200408936, + }, + "topTokens": None, + "textRange": {"start": 342, "end": 353}, + }, + { + "generatedToken": { + "token": "\u2581ways", + "logprob": -0.03732850402593613, + "raw_logprob": -0.03732850402593613, + }, + "topTokens": None, + "textRange": {"start": 353, "end": 358}, + }, + { + "generatedToken": { + "token": ".", + "logprob": -0.07006527483463287, + "raw_logprob": -0.07006527483463287, + }, + "topTokens": None, + "textRange": {"start": 358, "end": 359}, + }, + { + "generatedToken": { + "token": "\u2581Or", + "logprob": -4.574007511138916, + "raw_logprob": -4.574007511138916, + }, + "topTokens": None, + "textRange": {"start": 359, "end": 362}, + }, + { + "generatedToken": { + "token": ",", + "logprob": -0.35941576957702637, + "raw_logprob": -0.35941576957702637, + }, + "topTokens": None, + "textRange": {"start": 362, "end": 363}, + }, + { + "generatedToken": { + "token": "\u2581you\u2581might\u2581find", + "logprob": -3.0860962867736816, + "raw_logprob": -3.0860962867736816, + }, + "topTokens": None, + "textRange": {"start": 363, "end": 378}, + }, + { + "generatedToken": { + "token": "\u2581new\u2581information", + "logprob": -3.0317506790161133, + "raw_logprob": -3.0317506790161133, + }, + "topTokens": None, + "textRange": {"start": 378, "end": 394}, + }, + { + "generatedToken": { + "token": "\u2581or", + "logprob": -3.251086950302124, + "raw_logprob": -3.251086950302124, + }, + "topTokens": None, + "textRange": {"start": 394, "end": 397}, + }, + { + "generatedToken": { + "token": "\u2581context", + "logprob": -4.189438343048096, + "raw_logprob": -4.189438343048096, + }, + "topTokens": None, + "textRange": {"start": 397, "end": 405}, + }, + { + "generatedToken": { + "token": "\u2581that\u2581causes", + "logprob": -4.464134216308594, + "raw_logprob": -4.464134216308594, + }, + "topTokens": None, + "textRange": {"start": 405, "end": 417}, + }, + { + "generatedToken": { + "token": "\u2581you", + "logprob": -0.2493533492088318, + "raw_logprob": -0.2493533492088318, + }, + "topTokens": None, + "textRange": {"start": 417, "end": 421}, + }, + { + "generatedToken": { + "token": "\u2581to\u2581question", + "logprob": -2.251695156097412, + "raw_logprob": -2.251695156097412, + }, + "topTokens": None, + "textRange": {"start": 421, "end": 433}, + }, + { + "generatedToken": { + "token": "\u2581your\u2581decision", + "logprob": -1.989322543144226, + "raw_logprob": -1.989322543144226, + }, + "topTokens": None, + "textRange": {"start": 433, "end": 447}, + }, + { + "generatedToken": { + "token": ".", + "logprob": -0.17142613232135773, + "raw_logprob": -0.17142613232135773, + }, + "topTokens": None, + "textRange": {"start": 447, "end": 448}, + }, + { + "generatedToken": { + "token": "\u2581That's", + "logprob": -5.326101303100586, + "raw_logprob": -5.326101303100586, + }, + "topTokens": None, + "textRange": {"start": 448, "end": 455}, + }, + { + "generatedToken": { + "token": "\u2581okay", + "logprob": -0.7236325740814209, + "raw_logprob": -0.7236325740814209, + }, + "topTokens": None, + "textRange": {"start": 455, "end": 460}, + }, + { + "generatedToken": { + "token": ".", + "logprob": -1.1485638618469238, + "raw_logprob": -1.1485638618469238, + }, + "topTokens": None, + "textRange": {"start": 460, "end": 461}, + }, + { + "generatedToken": { + "token": "<|newline|>", + "logprob": -1.3378857374191284, + "raw_logprob": -1.3378857374191284, + }, + "topTokens": None, + "textRange": {"start": 461, "end": 462}, + }, + { + "generatedToken": { + "token": "<|newline|>", + "logprob": -0.00016985881666187197, + "raw_logprob": -0.00016985881666187197, + }, + "topTokens": None, + "textRange": {"start": 462, "end": 463}, + }, + { + "generatedToken": { + "token": "\u2581It's\u2581important\u2581to", + "logprob": -3.5227854251861572, + "raw_logprob": -3.5227854251861572, + }, + "topTokens": None, + "textRange": {"start": 463, "end": 480}, + }, + { + "generatedToken": { + "token": "\u2581have", + "logprob": -2.9167816638946533, + "raw_logprob": -2.9167816638946533, + }, + "topTokens": None, + "textRange": {"start": 480, "end": 485}, + }, + { + "generatedToken": { + "token": "\u2581courage", + "logprob": -5.581697940826416, + "raw_logprob": -5.581697940826416, + }, + "topTokens": None, + "textRange": {"start": 485, "end": 493}, + }, + { + "generatedToken": { + "token": "\u2581when\u2581you're", + "logprob": -4.5586161613464355, + "raw_logprob": -4.5586161613464355, + }, + "topTokens": None, + "textRange": {"start": 493, "end": 505}, + }, + { + "generatedToken": { + "token": "\u2581a\u2581leader", + "logprob": -0.26272106170654297, + "raw_logprob": -0.26272106170654297, + }, + "topTokens": None, + "textRange": {"start": 505, "end": 514}, + }, + { + "generatedToken": { + "token": ".", + "logprob": -0.3965468406677246, + "raw_logprob": -0.3965468406677246, + }, + "topTokens": None, + "textRange": {"start": 514, "end": 515}, + }, + { + "generatedToken": { + "token": "\u2581This\u2581means", + "logprob": -2.841196298599243, + "raw_logprob": -2.841196298599243, + }, + "topTokens": None, + "textRange": {"start": 515, "end": 526}, + }, + { + "generatedToken": { + "token": "\u2581being", + "logprob": -0.4315812587738037, + "raw_logprob": -0.4315812587738037, + }, + "topTokens": None, + "textRange": {"start": 526, "end": 532}, + }, + { + "generatedToken": { + "token": "\u2581willing\u2581to", + "logprob": -0.03861286863684654, + "raw_logprob": -0.03861286863684654, + }, + "topTokens": None, + "textRange": {"start": 532, "end": 543}, + }, + { + "generatedToken": { + "token": "\u2581think", + "logprob": -7.899557113647461, + "raw_logprob": -7.899557113647461, + }, + "topTokens": None, + "textRange": {"start": 543, "end": 549}, + }, + { + "generatedToken": { + "token": "\u2581critically", + "logprob": -0.6595878601074219, + "raw_logprob": -0.6595878601074219, + }, + "topTokens": None, + "textRange": {"start": 549, "end": 560}, + }, + { + "generatedToken": { + "token": ",", + "logprob": -1.2396876811981201, + "raw_logprob": -1.2396876811981201, + }, + "topTokens": None, + "textRange": {"start": 560, "end": 561}, + }, + { + "generatedToken": { + "token": "\u2581reflect", + "logprob": -6.496954917907715, + "raw_logprob": -6.496954917907715, + }, + "topTokens": None, + "textRange": {"start": 561, "end": 569}, + }, + { + "generatedToken": { + "token": ",", + "logprob": -0.3813382685184479, + "raw_logprob": -0.3813382685184479, + }, + "topTokens": None, + "textRange": {"start": 569, "end": 570}, + }, + { + "generatedToken": { + "token": "\u2581learn\u2581from", + "logprob": -5.863975524902344, + "raw_logprob": -5.863975524902344, + }, + "topTokens": None, + "textRange": {"start": 570, "end": 581}, + }, + { + "generatedToken": { + "token": "\u2581mistakes", + "logprob": -1.1053953170776367, + "raw_logprob": -1.1053953170776367, + }, + "topTokens": None, + "textRange": {"start": 581, "end": 590}, + }, + { + "generatedToken": { + "token": ",", + "logprob": -0.010977472178637981, + "raw_logprob": -0.010977472178637981, + }, + "topTokens": None, + "textRange": {"start": 590, "end": 591}, + }, + { + "generatedToken": { + "token": "\u2581and", + "logprob": -0.5951434373855591, + "raw_logprob": -0.5951434373855591, + }, + "topTokens": None, + "textRange": {"start": 591, "end": 595}, + }, + { + "generatedToken": { + "token": "\u2581take\u2581action", + "logprob": -4.118521690368652, + "raw_logprob": -4.118521690368652, + }, + "topTokens": None, + "textRange": {"start": 595, "end": 607}, + }, + { + "generatedToken": { + "token": "\u2581steps", + "logprob": -8.071130752563477, + "raw_logprob": -8.071130752563477, + }, + "topTokens": None, + "textRange": {"start": 607, "end": 613}, + }, + { + "generatedToken": { + "token": "\u2581moving\u2581forward", + "logprob": -5.662147045135498, + "raw_logprob": -5.662147045135498, + }, + "topTokens": None, + "textRange": {"start": 613, "end": 628}, + }, + { + "generatedToken": { + "token": ".", + "logprob": -0.03737432509660721, + "raw_logprob": -0.03737432509660721, + }, + "topTokens": None, + "textRange": {"start": 628, "end": 629}, + }, + { + "generatedToken": { + "token": "<|newline|>", + "logprob": -1.0259989500045776, + "raw_logprob": -1.0259989500045776, + }, + "topTokens": None, + "textRange": {"start": 629, "end": 630}, + }, + { + "generatedToken": { + "token": "<|newline|>", + "logprob": -0.0006999903125688434, + "raw_logprob": -0.0006999903125688434, + }, + "topTokens": None, + "textRange": {"start": 630, "end": 631}, + }, + { + "generatedToken": { + "token": "\u2581There\u2581are\u2581three", + "logprob": -5.931296348571777, + "raw_logprob": -5.931296348571777, + }, + "topTokens": None, + "textRange": {"start": 631, "end": 646}, + }, + { + "generatedToken": { + "token": "\u2581steps", + "logprob": -2.7536213397979736, + "raw_logprob": -2.7536213397979736, + }, + "topTokens": None, + "textRange": {"start": 646, "end": 652}, + }, + { + "generatedToken": { + "token": "\u2581that\u2581can\u2581help\u2581you", + "logprob": -2.3474459648132324, + "raw_logprob": -2.3474459648132324, + }, + "topTokens": None, + "textRange": {"start": 652, "end": 670}, + }, + { + "generatedToken": { + "token": "\u2581grow", + "logprob": -7.027171611785889, + "raw_logprob": -7.027171611785889, + }, + "topTokens": None, + "textRange": {"start": 670, "end": 675}, + }, + { + "generatedToken": { + "token": "\u2581as\u2581a\u2581leader", + "logprob": -0.40542012453079224, + "raw_logprob": -0.40542012453079224, + }, + "topTokens": None, + "textRange": {"start": 675, "end": 687}, + }, + { + "generatedToken": { + "token": "\u2581and\u2581make", + "logprob": -0.7026352882385254, + "raw_logprob": -0.7026352882385254, + }, + "topTokens": None, + "textRange": {"start": 687, "end": 696}, + }, + { + "generatedToken": { + "token": "\u2581better", + "logprob": -2.1509532928466797, + "raw_logprob": -2.1509532928466797, + }, + "topTokens": None, + "textRange": {"start": 696, "end": 703}, + }, + { + "generatedToken": { + "token": "\u2581business\u2581decisions", + "logprob": -0.24822193384170532, + "raw_logprob": -0.24822193384170532, + }, + "topTokens": None, + "textRange": {"start": 703, "end": 722}, + }, + { + "generatedToken": { + "token": ":", + "logprob": -0.46704334020614624, + "raw_logprob": -0.46704334020614624, + }, + "topTokens": None, + "textRange": {"start": 722, "end": 723}, + }, + { + "generatedToken": { + "token": "<|newline|>", + "logprob": -0.02775048278272152, + "raw_logprob": -0.02775048278272152, + }, + "topTokens": None, + "textRange": {"start": 723, "end": 724}, + }, + { + "generatedToken": { + "token": "<|newline|>", + "logprob": -0.0020361661445349455, + "raw_logprob": -0.0020361661445349455, + }, + "topTokens": None, + "textRange": {"start": 724, "end": 725}, + }, + { + "generatedToken": { + "token": "\u2581Step", + "logprob": -1.442288875579834, + "raw_logprob": -1.442288875579834, + }, + "topTokens": None, + "textRange": {"start": 725, "end": 729}, + }, + { + "generatedToken": { + "token": "\u2581", + "logprob": -0.05165497958660126, + "raw_logprob": -0.05165497958660126, + }, + "topTokens": None, + "textRange": {"start": 729, "end": 730}, + }, + { + "generatedToken": { + "token": "1", + "logprob": -4.792098479811102e-05, + "raw_logprob": -4.792098479811102e-05, + }, + "topTokens": None, + "textRange": {"start": 730, "end": 731}, + }, + { + "generatedToken": { + "token": ":", + "logprob": -0.02608294039964676, + "raw_logprob": -0.02608294039964676, + }, + "topTokens": None, + "textRange": {"start": 731, "end": 732}, + }, + { + "generatedToken": { + "token": "\u2581Gather", + "logprob": -3.0909531116485596, + "raw_logprob": -3.0909531116485596, + }, + "topTokens": None, + "textRange": {"start": 732, "end": 739}, + }, + { + "generatedToken": { + "token": "\u2581information", + "logprob": -0.8507784605026245, + "raw_logprob": -0.8507784605026245, + }, + "topTokens": None, + "textRange": {"start": 739, "end": 751}, + }, + { + "generatedToken": { + "token": "<|newline|>", + "logprob": -0.6048281788825989, + "raw_logprob": -0.6048281788825989, + }, + "topTokens": None, + "textRange": {"start": 751, "end": 752}, + }, + { + "generatedToken": { + "token": "<|newline|>", + "logprob": -0.01351175270974636, + "raw_logprob": -0.01351175270974636, + }, + "topTokens": None, + "textRange": {"start": 752, "end": 753}, + }, + { + "generatedToken": { + "token": "\u2581The\u2581first\u2581step", + "logprob": -2.7363672256469727, + "raw_logprob": -2.7363672256469727, + }, + "topTokens": None, + "textRange": {"start": 753, "end": 767}, + }, + { + "generatedToken": { + "token": "\u2581to", + "logprob": -0.1339748501777649, + "raw_logprob": -0.1339748501777649, + }, + "topTokens": None, + "textRange": {"start": 767, "end": 770}, + }, + { + "generatedToken": { + "token": "\u2581making", + "logprob": -0.3207220137119293, + "raw_logprob": -0.3207220137119293, + }, + "topTokens": None, + "textRange": {"start": 770, "end": 777}, + }, + { + "generatedToken": { + "token": "\u2581a\u2581good", + "logprob": -0.6057114005088806, + "raw_logprob": -0.6057114005088806, + }, + "topTokens": None, + "textRange": {"start": 777, "end": 784}, + }, + { + "generatedToken": { + "token": "\u2581decision", + "logprob": -0.030523210763931274, + "raw_logprob": -0.030523210763931274, + }, + "topTokens": None, + "textRange": {"start": 784, "end": 793}, + }, + { + "generatedToken": { + "token": "\u2581is\u2581to", + "logprob": -3.0425467491149902, + "raw_logprob": -3.0425467491149902, + }, + "topTokens": None, + "textRange": {"start": 793, "end": 799}, + }, + { + "generatedToken": { + "token": "\u2581make\u2581sure\u2581that\u2581you\u2581have", + "logprob": -0.7047816514968872, + "raw_logprob": -0.7047816514968872, + }, + "topTokens": None, + "textRange": {"start": 799, "end": 823}, + }, + { + "generatedToken": { + "token": "\u2581all\u2581of\u2581the", + "logprob": -1.9955559968948364, + "raw_logprob": -1.9955559968948364, + }, + "topTokens": None, + "textRange": {"start": 823, "end": 834}, + }, + { + "generatedToken": { + "token": "\u2581facts", + "logprob": -2.409013271331787, + "raw_logprob": -2.409013271331787, + }, + "topTokens": None, + "textRange": {"start": 834, "end": 840}, + }, + { + "generatedToken": { + "token": ".", + "logprob": -0.2631763517856598, + "raw_logprob": -0.2631763517856598, + }, + "topTokens": None, + "textRange": {"start": 840, "end": 841}, + }, + { + "generatedToken": { + "token": "\u2581It's\u2581important\u2581to", + "logprob": -4.5646491050720215, + "raw_logprob": -4.5646491050720215, + }, + "topTokens": None, + "textRange": {"start": 841, "end": 859}, + }, + { + "generatedToken": { + "token": "\u2581know\u2581what", + "logprob": -6.077958106994629, + "raw_logprob": -6.077958106994629, + }, + "topTokens": None, + "textRange": {"start": 859, "end": 869}, + }, + { + "generatedToken": { + "token": "\u2581information", + "logprob": -2.0120184421539307, + "raw_logprob": -2.0120184421539307, + }, + "topTokens": None, + "textRange": {"start": 869, "end": 881}, + }, + { + "generatedToken": { + "token": "\u2581you\u2581need", + "logprob": -1.7770088911056519, + "raw_logprob": -1.7770088911056519, + }, + "topTokens": None, + "textRange": {"start": 881, "end": 890}, + }, + { + "generatedToken": { + "token": ",", + "logprob": -0.4962013363838196, + "raw_logprob": -0.4962013363838196, + }, + "topTokens": None, + "textRange": {"start": 890, "end": 891}, + }, + { + "generatedToken": { + "token": "\u2581and", + "logprob": -0.8423260450363159, + "raw_logprob": -0.8423260450363159, + }, + "topTokens": None, + "textRange": {"start": 891, "end": 895}, + }, + { + "generatedToken": { + "token": "\u2581from\u2581where", + "logprob": -8.261597633361816, + "raw_logprob": -8.261597633361816, + }, + "topTokens": None, + "textRange": {"start": 895, "end": 906}, + }, + { + "generatedToken": { + "token": "\u2581to\u2581get\u2581it", + "logprob": -0.985969066619873, + "raw_logprob": -0.985969066619873, + }, + "topTokens": None, + "textRange": {"start": 906, "end": 916}, + }, + { + "generatedToken": { + "token": ".", + "logprob": -0.0048598977737128735, + "raw_logprob": -0.0048598977737128735, + }, + "topTokens": None, + "textRange": {"start": 916, "end": 917}, + }, + { + "generatedToken": { + "token": "<|newline|>", + "logprob": -0.5743589401245117, + "raw_logprob": -0.5743589401245117, + }, + "topTokens": None, + "textRange": {"start": 917, "end": 918}, + }, + { + "generatedToken": { + "token": "<|newline|>", + "logprob": -0.000593962671700865, + "raw_logprob": -0.000593962671700865, + }, + "topTokens": None, + "textRange": {"start": 918, "end": 919}, + }, + { + "generatedToken": { + "token": "\u2581You\u2581can", + "logprob": -4.267513275146484, + "raw_logprob": -4.267513275146484, + }, + "topTokens": None, + "textRange": {"start": 919, "end": 926}, + }, + { + "generatedToken": { + "token": "\u2581gather", + "logprob": -0.007923126220703125, + "raw_logprob": -0.007923126220703125, + }, + "topTokens": None, + "textRange": {"start": 926, "end": 933}, + }, + { + "generatedToken": { + "token": "\u2581information", + "logprob": -0.3179577887058258, + "raw_logprob": -0.3179577887058258, + }, + "topTokens": None, + "textRange": {"start": 933, "end": 945}, + }, + { + "generatedToken": { + "token": "\u2581by\u2581doing", + "logprob": -5.132864952087402, + "raw_logprob": -5.132864952087402, + }, + "topTokens": None, + "textRange": {"start": 945, "end": 954}, + }, + { + "generatedToken": { + "token": "\u2581things\u2581like", + "logprob": -2.202630043029785, + "raw_logprob": -2.202630043029785, + }, + "topTokens": None, + "textRange": {"start": 954, "end": 966}, + }, + { + "generatedToken": { + "token": "\u2581reading", + "logprob": -3.232940196990967, + "raw_logprob": -3.232940196990967, + }, + "topTokens": None, + "textRange": {"start": 966, "end": 974}, + }, + { + "generatedToken": { + "token": "\u2581reports", + "logprob": -0.329463928937912, + "raw_logprob": -0.329463928937912, + }, + "topTokens": None, + "textRange": {"start": 974, "end": 982}, + }, + { + "generatedToken": { + "token": ",", + "logprob": -0.002441998338326812, + "raw_logprob": -0.002441998338326812, + }, + "topTokens": None, + "textRange": {"start": 982, "end": 983}, + }, + { + "generatedToken": { + "token": "\u2581talking\u2581to", + "logprob": -0.12298407405614853, + "raw_logprob": -0.12298407405614853, + }, + "topTokens": None, + "textRange": {"start": 983, "end": 994}, + }, + { + "generatedToken": { + "token": "\u2581stakeholders", + "logprob": -2.3864426612854004, + "raw_logprob": -2.3864426612854004, + }, + "topTokens": None, + "textRange": {"start": 994, "end": 1007}, + }, + { + "generatedToken": { + "token": ",", + "logprob": -0.012393603101372719, + "raw_logprob": -0.012393603101372719, + }, + "topTokens": None, + "textRange": {"start": 1007, "end": 1008}, + }, + { + "generatedToken": { + "token": "\u2581and", + "logprob": -0.1544899344444275, + "raw_logprob": -0.1544899344444275, + }, + "topTokens": None, + "textRange": {"start": 1008, "end": 1012}, + }, + { + "generatedToken": { + "token": "\u2581conducting\u2581research", + "logprob": -0.731350839138031, + "raw_logprob": -0.731350839138031, + }, + "topTokens": None, + "textRange": {"start": 1012, "end": 1032}, + }, + { + "generatedToken": { + "token": ".", + "logprob": -0.010276736691594124, + "raw_logprob": -0.010276736691594124, + }, + "topTokens": None, + "textRange": {"start": 1032, "end": 1033}, + }, + { + "generatedToken": { + "token": "<|newline|>", + "logprob": -1.247491478919983, + "raw_logprob": -1.247491478919983, + }, + "topTokens": None, + "textRange": {"start": 1033, "end": 1034}, + }, + { + "generatedToken": { + "token": "<|newline|>", + "logprob": -5.7338023907504976e-05, + "raw_logprob": -5.7338023907504976e-05, + }, + "topTokens": None, + "textRange": {"start": 1034, "end": 1035}, + }, + { + "generatedToken": { + "token": "\u2581Step", + "logprob": -0.43501779437065125, + "raw_logprob": -0.43501779437065125, + }, + "topTokens": None, + "textRange": {"start": 1035, "end": 1039}, + }, + { + "generatedToken": { + "token": "\u2581", + "logprob": -1.1920858014491387e-05, + "raw_logprob": -1.1920858014491387e-05, + }, + "topTokens": None, + "textRange": {"start": 1039, "end": 1040}, + }, + { + "generatedToken": { + "token": "2", + "logprob": -0.00016342257731594145, + "raw_logprob": -0.00016342257731594145, + }, + "topTokens": None, + "textRange": {"start": 1040, "end": 1041}, + }, + { + "generatedToken": { + "token": ":", + "logprob": -0.00010644822759786621, + "raw_logprob": -0.00010644822759786621, + }, + "topTokens": None, + "textRange": {"start": 1041, "end": 1042}, + }, + { + "generatedToken": { + "token": "\u2581Analyze", + "logprob": -0.15760670602321625, + "raw_logprob": -0.15760670602321625, + }, + "topTokens": None, + "textRange": {"start": 1042, "end": 1050}, + }, + { + "generatedToken": { + "token": "\u2581information", + "logprob": -1.612084984779358, + "raw_logprob": -1.612084984779358, + }, + "topTokens": None, + "textRange": {"start": 1050, "end": 1062}, + }, + { + "generatedToken": { + "token": "<|newline|>", + "logprob": -9.583967766957358e-05, + "raw_logprob": -9.583967766957358e-05, + }, + "topTokens": None, + "textRange": {"start": 1062, "end": 1063}, + }, + { + "generatedToken": { + "token": "<|newline|>", + "logprob": -0.00024685196694917977, + "raw_logprob": -0.00024685196694917977, + }, + "topTokens": None, + "textRange": {"start": 1063, "end": 1064}, + }, + { + "generatedToken": { + "token": "\u2581Once\u2581you've", + "logprob": -2.3116512298583984, + "raw_logprob": -2.3116512298583984, + }, + "topTokens": None, + "textRange": {"start": 1064, "end": 1075}, + }, + { + "generatedToken": { + "token": "\u2581gathered", + "logprob": -0.002062814310193062, + "raw_logprob": -0.002062814310193062, + }, + "topTokens": None, + "textRange": {"start": 1075, "end": 1084}, + }, + { + "generatedToken": { + "token": "\u2581all\u2581of\u2581your", + "logprob": -2.685849666595459, + "raw_logprob": -2.685849666595459, + }, + "topTokens": None, + "textRange": {"start": 1084, "end": 1096}, + }, + { + "generatedToken": { + "token": "\u2581information", + "logprob": -0.003219066886231303, + "raw_logprob": -0.003219066886231303, + }, + "topTokens": None, + "textRange": {"start": 1096, "end": 1108}, + }, + { + "generatedToken": { + "token": ",", + "logprob": -3.361645576660521e-05, + "raw_logprob": -3.361645576660521e-05, + }, + "topTokens": None, + "textRange": {"start": 1108, "end": 1109}, + }, + { + "generatedToken": { + "token": "\u2581you\u2581need\u2581to", + "logprob": -1.4020256996154785, + "raw_logprob": -1.4020256996154785, + }, + "topTokens": None, + "textRange": {"start": 1109, "end": 1121}, + }, + { + "generatedToken": { + "token": "\u2581take\u2581some\u2581time\u2581to", + "logprob": -2.1766977310180664, + "raw_logprob": -2.1766977310180664, + }, + "topTokens": None, + "textRange": {"start": 1121, "end": 1139}, + }, + { + "generatedToken": { + "token": "\u2581think\u2581about\u2581it", + "logprob": -0.4216986298561096, + "raw_logprob": -0.4216986298561096, + }, + "topTokens": None, + "textRange": {"start": 1139, "end": 1154}, + }, + { + "generatedToken": { + "token": ".", + "logprob": -0.24139046669006348, + "raw_logprob": -0.24139046669006348, + }, + "topTokens": None, + "textRange": {"start": 1154, "end": 1155}, + }, + { + "generatedToken": { + "token": "\u2581You\u2581need\u2581to", + "logprob": -1.129857063293457, + "raw_logprob": -1.129857063293457, + }, + "topTokens": None, + "textRange": {"start": 1155, "end": 1167}, + }, + { + "generatedToken": { + "token": "\u2581analyze\u2581the", + "logprob": -1.3527189493179321, + "raw_logprob": -1.3527189493179321, + }, + "topTokens": None, + "textRange": {"start": 1167, "end": 1179}, + }, + { + "generatedToken": { + "token": "\u2581data", + "logprob": -1.0173096656799316, + "raw_logprob": -1.0173096656799316, + }, + "topTokens": None, + "textRange": {"start": 1179, "end": 1184}, + }, + { + "generatedToken": { + "token": "\u2581and\u2581identify", + "logprob": -3.182776927947998, + "raw_logprob": -3.182776927947998, + }, + "topTokens": None, + "textRange": {"start": 1184, "end": 1197}, + }, + { + "generatedToken": { + "token": "\u2581patterns", + "logprob": -0.6117339134216309, + "raw_logprob": -0.6117339134216309, + }, + "topTokens": None, + "textRange": {"start": 1197, "end": 1206}, + }, + { + "generatedToken": { + "token": ",", + "logprob": -0.4564504325389862, + "raw_logprob": -0.4564504325389862, + }, + "topTokens": None, + "textRange": {"start": 1206, "end": 1207}, + }, + { + "generatedToken": { + "token": "\u2581trends", + "logprob": -0.0026252351235598326, + "raw_logprob": -0.0026252351235598326, + }, + "topTokens": None, + "textRange": {"start": 1207, "end": 1214}, + }, + { + "generatedToken": { + "token": ",", + "logprob": -2.706014311115723e-05, + "raw_logprob": -2.706014311115723e-05, + }, + "topTokens": None, + "textRange": {"start": 1214, "end": 1215}, + }, + { + "generatedToken": { + "token": "\u2581and", + "logprob": -0.16668428480625153, + "raw_logprob": -0.16668428480625153, + }, + "topTokens": None, + "textRange": {"start": 1215, "end": 1219}, + }, + { + "generatedToken": { + "token": "\u2581trends", + "logprob": -2.091916084289551, + "raw_logprob": -2.091916084289551, + }, + "topTokens": None, + "textRange": {"start": 1219, "end": 1226}, + }, + { + "generatedToken": { + "token": "\u2581that", + "logprob": -2.99127197265625, + "raw_logprob": -2.99127197265625, + }, + "topTokens": None, + "textRange": {"start": 1226, "end": 1231}, + }, + { + "generatedToken": { + "token": "\u2581might\u2581not\u2581be", + "logprob": -2.1681160926818848, + "raw_logprob": -2.1681160926818848, + }, + "topTokens": None, + "textRange": {"start": 1231, "end": 1244}, + }, + { + "generatedToken": { + "token": "\u2581immediately", + "logprob": -0.5720977783203125, + "raw_logprob": -0.5720977783203125, + }, + "topTokens": None, + "textRange": {"start": 1244, "end": 1256}, + }, + { + "generatedToken": { + "token": "\u2581obvious", + "logprob": -0.38135844469070435, + "raw_logprob": -0.38135844469070435, + }, + "topTokens": None, + "textRange": {"start": 1256, "end": 1264}, + }, + { + "generatedToken": { + "token": ".", + "logprob": -0.0025424794293940067, + "raw_logprob": -0.0025424794293940067, + }, + "topTokens": None, + "textRange": {"start": 1264, "end": 1265}, + }, + { + "generatedToken": { + "token": "<|newline|>", + "logprob": -0.005445053335279226, + "raw_logprob": -0.005445053335279226, + }, + "topTokens": None, + "textRange": {"start": 1265, "end": 1266}, + }, + { + "generatedToken": { + "token": "<|newline|>", + "logprob": -1.156323378381785e-05, + "raw_logprob": -1.156323378381785e-05, + }, + "topTokens": None, + "textRange": {"start": 1266, "end": 1267}, + }, + { + "generatedToken": { + "token": "\u2581There\u2581are\u2581a\u2581few", + "logprob": -4.2585649490356445, + "raw_logprob": -4.2585649490356445, + }, + "topTokens": None, + "textRange": {"start": 1267, "end": 1282}, + }, + { + "generatedToken": { + "token": "\u2581things", + "logprob": -2.04957914352417, + "raw_logprob": -2.04957914352417, + }, + "topTokens": None, + "textRange": {"start": 1282, "end": 1289}, + }, + { + "generatedToken": { + "token": "\u2581you\u2581should", + "logprob": -1.8114514350891113, + "raw_logprob": -1.8114514350891113, + }, + "topTokens": None, + "textRange": {"start": 1289, "end": 1300}, + }, + { + "generatedToken": { + "token": "\u2581keep\u2581in\u2581mind", + "logprob": -0.2850663959980011, + "raw_logprob": -0.2850663959980011, + }, + "topTokens": None, + "textRange": {"start": 1300, "end": 1313}, + }, + { + "generatedToken": { + "token": "\u2581when\u2581you're", + "logprob": -0.40983426570892334, + "raw_logprob": -0.40983426570892334, + }, + "topTokens": None, + "textRange": {"start": 1313, "end": 1325}, + }, + { + "generatedToken": { + "token": "\u2581analyzing", + "logprob": -0.049553561955690384, + "raw_logprob": -0.049553561955690384, + }, + "topTokens": None, + "textRange": {"start": 1325, "end": 1335}, + }, + { + "generatedToken": { + "token": "\u2581information", + "logprob": -0.0341101810336113, + "raw_logprob": -0.0341101810336113, + }, + "topTokens": None, + "textRange": {"start": 1335, "end": 1347}, + }, + { + "generatedToken": { + "token": ":", + "logprob": -0.4348779022693634, + "raw_logprob": -0.4348779022693634, + }, + "topTokens": None, + "textRange": {"start": 1347, "end": 1348}, + }, + { + "generatedToken": { + "token": "<|newline|>", + "logprob": -0.006193492095917463, + "raw_logprob": -0.006193492095917463, + }, + "topTokens": None, + "textRange": {"start": 1348, "end": 1349}, + }, + { + "generatedToken": { + "token": "<|newline|>", + "logprob": -0.000358159770257771, + "raw_logprob": -0.000358159770257771, + }, + "topTokens": None, + "textRange": {"start": 1349, "end": 1350}, + }, + { + "generatedToken": { + "token": "\u2581*", + "logprob": -1.0053796768188477, + "raw_logprob": -1.0053796768188477, + }, + "topTokens": None, + "textRange": {"start": 1350, "end": 1351}, + }, + { + "generatedToken": { + "token": "\u2581Identify", + "logprob": -4.100193977355957, + "raw_logprob": -4.100193977355957, + }, + "topTokens": None, + "textRange": {"start": 1351, "end": 1360}, + }, + { + "generatedToken": { + "token": "\u2581the", + "logprob": -1.141700029373169, + "raw_logprob": -1.141700029373169, + }, + "topTokens": None, + "textRange": {"start": 1360, "end": 1364}, + }, + { + "generatedToken": { + "token": "\u2581key\u2581points", + "logprob": -0.9346644282341003, + "raw_logprob": -0.9346644282341003, + }, + "topTokens": None, + "textRange": {"start": 1364, "end": 1375}, + }, + { + "generatedToken": { + "token": ":", + "logprob": -0.29478567838668823, + "raw_logprob": -0.29478567838668823, + }, + "topTokens": None, + "textRange": {"start": 1375, "end": 1376}, + }, + { + "generatedToken": { + "token": "\u2581What\u2581are\u2581the", + "logprob": -0.2456199824810028, + "raw_logprob": -0.2456199824810028, + }, + "topTokens": None, + "textRange": {"start": 1376, "end": 1389}, + }, + { + "generatedToken": { + "token": "\u2581key", + "logprob": -0.8171483278274536, + "raw_logprob": -0.8171483278274536, + }, + "topTokens": None, + "textRange": {"start": 1389, "end": 1393}, + }, + { + "generatedToken": { + "token": "\u2581takeaways", + "logprob": -0.5598645806312561, + "raw_logprob": -0.5598645806312561, + }, + "topTokens": None, + "textRange": {"start": 1393, "end": 1403}, + }, + { + "generatedToken": { + "token": "\u2581from", + "logprob": -1.6096564531326294, + "raw_logprob": -1.6096564531326294, + }, + "topTokens": None, + "textRange": {"start": 1403, "end": 1408}, + }, + { + "generatedToken": { + "token": "\u2581this\u2581information", + "logprob": -1.101968765258789, + "raw_logprob": -1.101968765258789, + }, + "topTokens": None, + "textRange": {"start": 1408, "end": 1425}, + }, + { + "generatedToken": { + "token": "?", + "logprob": -0.0003685271949507296, + "raw_logprob": -0.0003685271949507296, + }, + "topTokens": None, + "textRange": {"start": 1425, "end": 1426}, + }, + { + "generatedToken": { + "token": "\u2581What", + "logprob": -2.42529034614563, + "raw_logprob": -2.42529034614563, + }, + "topTokens": None, + "textRange": {"start": 1426, "end": 1431}, + }, + ], + }, + "finishReason": {"reason": "length", "length": 200}, + } + ], + }, + ], + "cohere.command-text-v14::Write me a blog about making strong business decisions as a leader.": [ + {}, + { + "generations": [ + { + "id": "7449e005-a317-42ab-8e47-6bf0fa119088", + "text": " As a leader, one of the most important things you can do is make strong business decisions. Your choices can make or break your company, so it's essential to take the time to think things through and consider all your options. Here are a few tips for making sound business decisions:\n\n1. Do your research. Before making any decision, it's important to gather as much information as possible. This means talking to your team, looking at data and trends, and considering all of your options. The more information you have, the better equipped you'll be to make a decision.\n\n2. Consider the consequences. Every decision has consequences, so it's important to think about what might happen as a result of your choice. What will the impact be on your team, your company, and your customers? It's also important to think about how your decision might affect your own career and personal life.\n\n3. Seek advice. If you're struggling to make a decision, it", + } + ], + "id": "4e3ebf15-98d2-4aaf-a2da-61d0e262e862", + "prompt": "Write me a blog about making strong business decisions as a leader.", + }, + ], +} + +MODEL_PATH_RE = re.compile(r"/model/([^/]+)/invoke") + + +def simple_get(self): + content_len = int(self.headers.get("content-length")) + content = json.loads(self.rfile.read(content_len).decode("utf-8")) + + model = MODEL_PATH_RE.match(self.path).group(1) + prompt = extract_shortened_prompt(content, model) + if not prompt: + self.send_response(500) + self.end_headers() + self.wfile.write("Could not parse prompt.".encode("utf-8")) + return + + headers, response = ({}, "") + for k, v in RESPONSES.items(): + if prompt.startswith(k): + headers, response = v + break + else: # If no matches found + self.send_response(500) + self.end_headers() + self.wfile.write(("Unknown Prompt:\n%s" % prompt).encode("utf-8")) + return + + # Send response code + self.send_response(200) + + # Send headers + for k, v in headers.items(): + self.send_header(k, v) + self.end_headers() + + # Send response body + self.wfile.write(json.dumps(response).encode("utf-8")) + return + + +def extract_shortened_prompt(content, model): + prompt = content.get("inputText", None) or content.get("prompt", None) + prompt = "::".join((model, prompt)) # Prepend model name to prompt key to keep separate copies + return prompt.lstrip().split("\n")[0] + + +class MockExternalBedrockServer(MockExternalHTTPServer): + # To use this class in a test one needs to start and stop this server + # before and after making requests to the test app that makes the external + # calls. + + def __init__(self, handler=simple_get, port=None, *args, **kwargs): + super(MockExternalBedrockServer, self).__init__(handler=handler, port=port, *args, **kwargs) + + +if __name__ == "__main__": + with MockExternalBedrockServer() as server: + print("MockExternalBedrockServer serving on port %s" % str(server.port)) + while True: + pass # Serve forever diff --git a/tests/mlmodel_bedrock/conftest.py b/tests/mlmodel_bedrock/conftest.py new file mode 100644 index 0000000000..0464c2e079 --- /dev/null +++ b/tests/mlmodel_bedrock/conftest.py @@ -0,0 +1,136 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os + +import pytest +from _mock_external_bedrock_server import ( + MockExternalBedrockServer, + extract_shortened_prompt, +) +from testing_support.fixtures import ( # noqa: F401, pylint: disable=W0611 + collector_agent_registration_fixture, + collector_available_fixture, +) + +from newrelic.common.object_wrapper import wrap_function_wrapper + +_default_settings = { + "transaction_tracer.explain_threshold": 0.0, + "transaction_tracer.transaction_threshold": 0.0, + "transaction_tracer.stack_trace_threshold": 0.0, + "debug.log_data_collector_payloads": True, + "debug.record_transaction_failure": True, + "ml_insights_event.enabled": True, +} +collector_agent_registration = collector_agent_registration_fixture( + app_name="Python Agent Test (mlmodel_bedrock)", + default_settings=_default_settings, + linked_applications=["Python Agent Test (mlmodel_bedrock)"], +) + +BEDROCK_AUDIT_LOG_FILE = os.path.join(os.path.realpath(os.path.dirname(__file__)), "bedrock_audit.log") +BEDROCK_AUDIT_LOG_CONTENTS = {} + + +@pytest.fixture(autouse=True, scope="session") +def bedrock_server(): + """ + This fixture will either create a mocked backend for testing purposes, or will + set up an audit log file to log responses of the real Bedrock backend to a file. + The behavior can be controlled by setting NEW_RELIC_TESTING_RECORD_BEDROCK_RESPONSES=1 as + an environment variable to run using the real Bedrock backend. (Default: mocking) + """ + import boto3 + + from newrelic.core.config import _environ_as_bool + + if not _environ_as_bool("NEW_RELIC_TESTING_RECORD_BEDROCK_RESPONSES", False): + # Use mocked Bedrock backend and prerecorded responses + with MockExternalBedrockServer() as server: + client = boto3.client( + "bedrock-runtime", + "us-east-1", + endpoint_url="http://localhost:%d" % server.port, + aws_access_key_id="NOT-A-REAL-SECRET", + aws_secret_access_key="NOT-A-REAL-SECRET", + ) + + yield client + else: + # Use real Bedrock backend and record responses + assert ( + os.environ["AWS_ACCESS_KEY_ID"] and os.environ["AWS_SECRET_ACCESS_KEY"] + ), "AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are required." + + # Construct real client + client = boto3.client( + "bedrock-runtime", + "us-east-1", + ) + + # Apply function wrappers to record data + wrap_function_wrapper( + "botocore.client", "BaseClient._make_api_call", wrap_botocore_client_BaseClient__make_api_call + ) + yield client # Run tests + + # Write responses to audit log + with open(BEDROCK_AUDIT_LOG_FILE, "w") as audit_log_fp: + json.dump(BEDROCK_AUDIT_LOG_CONTENTS, fp=audit_log_fp, indent=4) + + +# Intercept outgoing requests and log to file for mocking +RECORDED_HEADERS = set(["x-request-id", "contentType"]) + + +def wrap_botocore_client_BaseClient__make_api_call(wrapped, instance, args, kwargs): + from io import BytesIO + + from botocore.response import StreamingBody + + params = bind_make_api_call_params(*args, **kwargs) + if not params: + return wrapped(*args, **kwargs) + + body = json.loads(params["body"]) + model = params["modelId"] + prompt = extract_shortened_prompt(body, model) + + # Send request + result = wrapped(*args, **kwargs) + + # Intercept body data, and replace stream + streamed_body = result["body"].read() + result["body"] = StreamingBody(BytesIO(streamed_body), len(streamed_body)) + + # Clean up data + data = json.loads(streamed_body.decode("utf-8")) + headers = dict(result["ResponseMetadata"].items()) + headers["contentType"] = result["contentType"] + headers = dict( + filter( + lambda k: k[0].lower() in RECORDED_HEADERS or k[0].lower().startswith("x-ratelimit"), + headers.items(), + ) + ) + + # Log response + BEDROCK_AUDIT_LOG_CONTENTS[prompt] = headers, data # Append response data to audit log + return result + + +def bind_make_api_call_params(operation_name, api_params): + return api_params diff --git a/tests/mlmodel_bedrock/test_chat_completion.py b/tests/mlmodel_bedrock/test_chat_completion.py new file mode 100644 index 0000000000..ad33001c49 --- /dev/null +++ b/tests/mlmodel_bedrock/test_chat_completion.py @@ -0,0 +1,40 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +import pytest + +_test_bedrock_chat_completion_prompt = "Write me a blog about making strong business decisions as a leader." + + +@pytest.mark.parametrize( + "model_id,payload", + [ + ("amazon.titan-text-express-v1", {"inputText": "Command: %s\n\nBlog:"}), + ("anthropic.claude-instant-v1", {"prompt": "Human: %s\n\nAssistant:", "max_tokens_to_sample": 500}), + ("ai21.j2-mid-v1", {"prompt": "%s", "maxTokens": 200}), + ("cohere.command-text-v14", {"prompt": "%s", "max_tokens": 200, "temperature": 0.75}), + ], +) +def test_bedrock_chat_completion(bedrock_server, model_id, payload): + body = json.dumps(payload) % _test_bedrock_chat_completion_prompt + response = bedrock_server.invoke_model( + body=body, + modelId=model_id, + accept="application/json", + contentType="application/json", + ) + response_body = json.loads(response.get("body").read()) + assert response_body diff --git a/tox.ini b/tox.ini index f64eb82b14..8862bcc17c 100644 --- a/tox.ini +++ b/tox.ini @@ -140,6 +140,7 @@ envlist = python-framework_starlette-{py37,py38}-starlette{002001}, python-framework_starlette-{py37,py38,py39,py310,py311,pypy38}-starlettelatest, python-framework_strawberry-{py37,py38,py39,py310,py311}-strawberrylatest, + python-mlmodel_bedrock-{py37,py38,py39,py310,py311,pypy38}, python-logger_logging-{py27,py37,py38,py39,py310,py311,pypy27,pypy38}, python-logger_loguru-{py37,py38,py39,py310,py311,pypy38}-logurulatest, python-logger_loguru-py39-loguru{06,05}, @@ -343,6 +344,7 @@ deps = framework_tornado: pycurl framework_tornado-tornadolatest: tornado framework_tornado-tornadomaster: https://github.com/tornadoweb/tornado/archive/master.zip + mlmodel_bedrock: boto3 logger_loguru-logurulatest: loguru logger_loguru-loguru06: loguru<0.7 logger_loguru-loguru05: loguru<0.6 @@ -462,6 +464,7 @@ changedir = framework_starlette: tests/framework_starlette framework_strawberry: tests/framework_strawberry framework_tornado: tests/framework_tornado + mlmodel_bedrock: tests/mlmodel_bedrock logger_logging: tests/logger_logging logger_loguru: tests/logger_loguru logger_structlog: tests/logger_structlog From b1ccfc156061b1c1a317b557bed354f2be77d28d Mon Sep 17 00:00:00 2001 From: Timothy Pansino <11214426+TimPansino@users.noreply.github.com> Date: Thu, 2 Nov 2023 08:58:32 -0700 Subject: [PATCH 02/16] Bedrock Sync Chat Completion Instrumentation (#953) * Add AWS Bedrock testing infrastructure * Squashed commit of the following: commit 2834663794c649124052e510c1c9557a830c060a Author: Timothy Pansino <11214426+TimPansino@users.noreply.github.com> Date: Mon Oct 9 17:42:05 2023 -0700 OpenAI Mock Backend (#929) * Add mock external openai server * Add mocked OpenAI server fixtures * Set up recorded responses. * Clean mock server to depend on http server * Linting * Pin flask version for flask restx tests. (#931) * Ignore new redis methods. (#932) Co-authored-by: Lalleh Rafeei <84813886+lrafeei@users.noreply.github.com> * Remove approved paths * Update CI Image (#930) * Update available python versions in CI * Update makefile with overrides * Fix default branch detection for arm builds --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> * Add mocking for embedding endpoint * [Mega-Linter] Apply linters fixes * Add ratelimit headers * [Mega-Linter] Apply linters fixes * Only get package version once (#928) * Only get package version once * Add disconnect method * Add disconnect method --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> * Add datalib dependency for embedding testing. * Add OpenAI Test Infrastructure (#926) * Add openai to tox * Add OpenAI test files. * Add test functions. * [Mega-Linter] Apply linters fixes --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: mergify[bot] * Add mock external openai server * Add mocked OpenAI server fixtures * Set up recorded responses. * Clean mock server to depend on http server * Linting * Remove approved paths * Add mocking for embedding endpoint * [Mega-Linter] Apply linters fixes * Add ratelimit headers * [Mega-Linter] Apply linters fixes * Add datalib dependency for embedding testing. --------- Co-authored-by: Uma Annamalai Co-authored-by: Lalleh Rafeei <84813886+lrafeei@users.noreply.github.com> Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: TimPansino Co-authored-by: Hannah Stepanek Co-authored-by: mergify[bot] commit db63d4598c94048986c0e00ebb2cd8827100b54c Author: Uma Annamalai Date: Mon Oct 2 15:31:38 2023 -0700 Add OpenAI Test Infrastructure (#926) * Add openai to tox * Add OpenAI test files. * Add test functions. * [Mega-Linter] Apply linters fixes --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: mergify[bot] * Squashed commit of the following: commit 182c7a8c8a91e2d0f234f7ed7d4a14a2422c8342 Author: Uma Annamalai Date: Fri Oct 13 10:12:55 2023 -0700 Add request/ response IDs. commit f6d13f822c22d2039ec32be86b2c54f9dc3de1c9 Author: Uma Annamalai Date: Thu Oct 12 13:23:39 2023 -0700 Test cleanup. commit d0576631d009e481bd5887a3243aac99b097d823 Author: Uma Annamalai Date: Tue Oct 10 10:23:00 2023 -0700 Remove commented code. commit dd29433e719482babbe5c724e7330b1f6324abd7 Author: Uma Annamalai Date: Tue Oct 10 10:19:01 2023 -0700 Add openai sync instrumentation. commit 2834663794c649124052e510c1c9557a830c060a Author: Timothy Pansino <11214426+TimPansino@users.noreply.github.com> Date: Mon Oct 9 17:42:05 2023 -0700 OpenAI Mock Backend (#929) * Add mock external openai server * Add mocked OpenAI server fixtures * Set up recorded responses. * Clean mock server to depend on http server * Linting * Pin flask version for flask restx tests. (#931) * Ignore new redis methods. (#932) Co-authored-by: Lalleh Rafeei <84813886+lrafeei@users.noreply.github.com> * Remove approved paths * Update CI Image (#930) * Update available python versions in CI * Update makefile with overrides * Fix default branch detection for arm builds --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> * Add mocking for embedding endpoint * [Mega-Linter] Apply linters fixes * Add ratelimit headers * [Mega-Linter] Apply linters fixes * Only get package version once (#928) * Only get package version once * Add disconnect method * Add disconnect method --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> * Add datalib dependency for embedding testing. * Add OpenAI Test Infrastructure (#926) * Add openai to tox * Add OpenAI test files. * Add test functions. * [Mega-Linter] Apply linters fixes --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: mergify[bot] * Add mock external openai server * Add mocked OpenAI server fixtures * Set up recorded responses. * Clean mock server to depend on http server * Linting * Remove approved paths * Add mocking for embedding endpoint * [Mega-Linter] Apply linters fixes * Add ratelimit headers * [Mega-Linter] Apply linters fixes * Add datalib dependency for embedding testing. --------- Co-authored-by: Uma Annamalai Co-authored-by: Lalleh Rafeei <84813886+lrafeei@users.noreply.github.com> Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: TimPansino Co-authored-by: Hannah Stepanek Co-authored-by: mergify[bot] commit db63d4598c94048986c0e00ebb2cd8827100b54c Author: Uma Annamalai Date: Mon Oct 2 15:31:38 2023 -0700 Add OpenAI Test Infrastructure (#926) * Add openai to tox * Add OpenAI test files. * Add test functions. * [Mega-Linter] Apply linters fixes --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: mergify[bot] * Cache Package Version Lookups (#946) * Cache _get_package_version * Add Python 2.7 support to get_package_version caching * [Mega-Linter] Apply linters fixes * Bump tests --------- Co-authored-by: SlavaSkvortsov <29122694+SlavaSkvortsov@users.noreply.github.com> Co-authored-by: TimPansino * Fix Redis Generator Methods (#947) * Fix scan_iter for redis * Replace generator methods * Update instance info instrumentation * Remove mistake from uninstrumented methods * Add skip condition to asyncio generator tests * Add skip condition to asyncio generator tests --------- Co-authored-by: Lalleh Rafeei Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> * TEMP * Automatic RPM System Updates (#948) * Checkout old action * Adding RPM action * Add dry run * Incorporating action into workflow * Wire secret into custom action * Enable action * Correct action name * Fix syntax * Fix quoting issues * Drop pre-verification. Does not work on python * Fix merge artifact * Bedrock titan extraction nearly complete * Cleaning up titan bedrock implementation * TEMP * Tests for bedrock passing Co-authored-by: Lalleh Rafeei * Cleaned up titan testing Co-authored-by: Lalleh Rafeei Co-authored-by: Hannah Stepanek * Parametrized bedrock testing * Add support for AI21-J2 models * Change to dynamic no conversation id events * Drop all openai refs * [Mega-Linter] Apply linters fixes * Adding response_id and response_model * Drop python 3.7 tests for Hypercorn (#954) * Apply suggestions from code review * Remove unused import --------- Co-authored-by: Uma Annamalai Co-authored-by: SlavaSkvortsov <29122694+SlavaSkvortsov@users.noreply.github.com> Co-authored-by: TimPansino Co-authored-by: Lalleh Rafeei Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: Lalleh Rafeei Co-authored-by: Hannah Stepanek Co-authored-by: Lalleh Rafeei <84813886+lrafeei@users.noreply.github.com> --- newrelic/hooks/external_botocore.py | 275 ++- .../_mock_external_bedrock_server.py | 1891 +---------------- .../mlmodel_bedrock/_test_chat_completion.py | 135 ++ tests/mlmodel_bedrock/conftest.py | 9 +- tests/mlmodel_bedrock/test_chat_completion.py | 139 +- .../validators/validate_ml_events.py | 3 +- tox.ini | 2 +- 7 files changed, 569 insertions(+), 1885 deletions(-) create mode 100644 tests/mlmodel_bedrock/_test_chat_completion.py diff --git a/newrelic/hooks/external_botocore.py b/newrelic/hooks/external_botocore.py index 7d49fbd031..018df2d320 100644 --- a/newrelic/hooks/external_botocore.py +++ b/newrelic/hooks/external_botocore.py @@ -12,15 +12,30 @@ # See the License for the specific language governing permissions and # limitations under the License. -from newrelic.api.message_trace import message_trace +import json +import logging +import uuid +from io import BytesIO + +from botocore.response import StreamingBody + from newrelic.api.datastore_trace import datastore_trace from newrelic.api.external_trace import ExternalTrace -from newrelic.common.object_wrapper import wrap_function_wrapper +from newrelic.api.function_trace import FunctionTrace +from newrelic.api.message_trace import message_trace +from newrelic.api.time_trace import get_trace_linking_metadata +from newrelic.api.transaction import current_transaction +from newrelic.common.object_names import callable_name +from newrelic.common.object_wrapper import function_wrapper, wrap_function_wrapper +from newrelic.core.config import global_settings + +_logger = logging.getLogger(__name__) +UNSUPPORTED_MODEL_WARNING_SENT = False def extract_sqs(*args, **kwargs): - queue_value = kwargs.get('QueueUrl', 'Unknown') - return queue_value.rsplit('/', 1)[-1] + queue_value = kwargs.get("QueueUrl", "Unknown") + return queue_value.rsplit("/", 1)[-1] def extract(argument_names, default=None): @@ -40,43 +55,218 @@ def extractor_string(*args, **kwargs): return extractor_list +def create_chat_completion_message_event( + transaction, + app_name, + message_list, + chat_completion_id, + span_id, + trace_id, + request_model, + request_id, + conversation_id, + response_id="", +): + if not transaction: + return + + for index, message in enumerate(message_list): + if response_id: + id_ = "%s-%d" % (response_id, index) # Response ID was set, append message index to it. + else: + id_ = str(uuid.uuid4()) # No response IDs, use random UUID + + chat_completion_message_dict = { + "id": id_, + "appName": app_name, + "conversation_id": conversation_id, + "request_id": request_id, + "span_id": span_id, + "trace_id": trace_id, + "transaction_id": transaction._transaction_id, + "content": message.get("content", ""), + "role": message.get("role"), + "completion_id": chat_completion_id, + "sequence": index, + "response.model": request_model, + "vendor": "bedrock", + "ingest_source": "Python", + } + transaction.record_ml_event("LlmChatCompletionMessage", chat_completion_message_dict) + + +def extract_bedrock_titan_model(request_body, response_body): + response_body = json.loads(response_body) + request_body = json.loads(request_body) + + input_tokens = response_body["inputTextTokenCount"] + completion_tokens = sum(result["tokenCount"] for result in response_body["results"]) + total_tokens = input_tokens + completion_tokens + + request_config = request_body.get("textGenerationConfig", {}) + message_list = [{"role": "user", "content": request_body.get("inputText", "")}] + message_list.extend( + {"role": "assistant", "content": result["outputText"]} for result in response_body.get("results", []) + ) + + chat_completion_summary_dict = { + "request.max_tokens": request_config.get("maxTokenCount", ""), + "request.temperature": request_config.get("temperature", ""), + "response.choices.finish_reason": response_body["results"][0]["completionReason"], + "response.usage.completion_tokens": completion_tokens, + "response.usage.prompt_tokens": input_tokens, + "response.usage.total_tokens": total_tokens, + "response.number_of_messages": len(message_list), + } + return message_list, chat_completion_summary_dict + + +def extract_bedrock_ai21_j2_model(request_body, response_body): + response_body = json.loads(response_body) + request_body = json.loads(request_body) + + message_list = [{"role": "user", "content": request_body.get("prompt", "")}] + message_list.extend( + {"role": "assistant", "content": result["data"]["text"]} for result in response_body.get("completions", []) + ) + + chat_completion_summary_dict = { + "request.max_tokens": request_body.get("maxTokens", ""), + "request.temperature": request_body.get("temperature", ""), + "response.choices.finish_reason": response_body["completions"][0]["finishReason"]["reason"], + "response.number_of_messages": len(message_list), + "response_id": str(response_body.get("id", "")), + } + return message_list, chat_completion_summary_dict + + +MODEL_EXTRACTORS = { + "amazon.titan": extract_bedrock_titan_model, + "ai21.j2": extract_bedrock_ai21_j2_model, +} + + +@function_wrapper +def wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): + # Wrapped function only takes keyword arguments, no need for binding + + transaction = current_transaction() + + if not transaction: + return wrapped(*args, **kwargs) + + # Read and replace request file stream bodies + request_body = kwargs["body"] + if hasattr(request_body, "read"): + request_body = request_body.read() + kwargs["body"] = request_body + + ft_name = callable_name(wrapped) + with FunctionTrace(ft_name) as ft: + response = wrapped(*args, **kwargs) + + if not response: + return response + + # Determine model to be used with extractor + model = kwargs.get("modelId") + if not model: + return response + + # Determine extractor by model type + for extractor_name, extractor in MODEL_EXTRACTORS.items(): + if model.startswith(extractor_name): + break + else: + # Model was not found in extractor list + global UNSUPPORTED_MODEL_WARNING_SENT + if not UNSUPPORTED_MODEL_WARNING_SENT: + # Only send warning once to avoid spam + _logger.warning( + "Unsupported Amazon Bedrock model in use (%s). Upgrade to a newer version of the agent, and contact New Relic support if the issue persists.", + model, + ) + UNSUPPORTED_MODEL_WARNING_SENT = True + + return response + + # Read and replace response streaming bodies + response_body = response["body"].read() + response["body"] = StreamingBody(BytesIO(response_body), len(response_body)) + + custom_attrs_dict = transaction._custom_params + conversation_id = custom_attrs_dict.get("conversation_id", "") + + chat_completion_id = str(uuid.uuid4()) + available_metadata = get_trace_linking_metadata() + span_id = available_metadata.get("span.id", "") + trace_id = available_metadata.get("trace.id", "") + + response_headers = response["ResponseMetadata"]["HTTPHeaders"] + request_id = response_headers.get("x-amzn-requestid", "") + settings = transaction.settings if transaction.settings is not None else global_settings() + + message_list, chat_completion_summary_dict = extractor(request_body, response_body) + response_id = chat_completion_summary_dict.get("response_id", "") + chat_completion_summary_dict.update( + { + "vendor": "bedrock", + "ingest_source": "Python", + "api_key_last_four_digits": instance._request_signer._credentials.access_key[-4:], + "id": chat_completion_id, + "appName": settings.app_name, + "conversation_id": conversation_id, + "span_id": span_id, + "trace_id": trace_id, + "transaction_id": transaction._transaction_id, + "request_id": request_id, + "duration": ft.duration, + "request.model": model, + "response.model": model, # Duplicate data required by the UI + } + ) + + transaction.record_ml_event("LlmChatCompletionSummary", chat_completion_summary_dict) + + create_chat_completion_message_event( + transaction=transaction, + app_name=settings.app_name, + message_list=message_list, + chat_completion_id=chat_completion_id, + span_id=span_id, + trace_id=trace_id, + request_model=model, + request_id=request_id, + conversation_id=conversation_id, + response_id=response_id, + ) + + return response + + CUSTOM_TRACE_POINTS = { - ('sns', 'publish'): message_trace( - 'SNS', 'Produce', 'Topic', - extract(('TopicArn', 'TargetArn'), 'PhoneNumber')), - ('dynamodb', 'put_item'): datastore_trace( - 'DynamoDB', extract('TableName'), 'put_item'), - ('dynamodb', 'get_item'): datastore_trace( - 'DynamoDB', extract('TableName'), 'get_item'), - ('dynamodb', 'update_item'): datastore_trace( - 'DynamoDB', extract('TableName'), 'update_item'), - ('dynamodb', 'delete_item'): datastore_trace( - 'DynamoDB', extract('TableName'), 'delete_item'), - ('dynamodb', 'create_table'): datastore_trace( - 'DynamoDB', extract('TableName'), 'create_table'), - ('dynamodb', 'delete_table'): datastore_trace( - 'DynamoDB', extract('TableName'), 'delete_table'), - ('dynamodb', 'query'): datastore_trace( - 'DynamoDB', extract('TableName'), 'query'), - ('dynamodb', 'scan'): datastore_trace( - 'DynamoDB', extract('TableName'), 'scan'), - ('sqs', 'send_message'): message_trace( - 'SQS', 'Produce', 'Queue', extract_sqs), - ('sqs', 'send_message_batch'): message_trace( - 'SQS', 'Produce', 'Queue', extract_sqs), - ('sqs', 'receive_message'): message_trace( - 'SQS', 'Consume', 'Queue', extract_sqs), + ("sns", "publish"): message_trace("SNS", "Produce", "Topic", extract(("TopicArn", "TargetArn"), "PhoneNumber")), + ("dynamodb", "put_item"): datastore_trace("DynamoDB", extract("TableName"), "put_item"), + ("dynamodb", "get_item"): datastore_trace("DynamoDB", extract("TableName"), "get_item"), + ("dynamodb", "update_item"): datastore_trace("DynamoDB", extract("TableName"), "update_item"), + ("dynamodb", "delete_item"): datastore_trace("DynamoDB", extract("TableName"), "delete_item"), + ("dynamodb", "create_table"): datastore_trace("DynamoDB", extract("TableName"), "create_table"), + ("dynamodb", "delete_table"): datastore_trace("DynamoDB", extract("TableName"), "delete_table"), + ("dynamodb", "query"): datastore_trace("DynamoDB", extract("TableName"), "query"), + ("dynamodb", "scan"): datastore_trace("DynamoDB", extract("TableName"), "scan"), + ("sqs", "send_message"): message_trace("SQS", "Produce", "Queue", extract_sqs), + ("sqs", "send_message_batch"): message_trace("SQS", "Produce", "Queue", extract_sqs), + ("sqs", "receive_message"): message_trace("SQS", "Consume", "Queue", extract_sqs), + ("bedrock-runtime", "invoke_model"): wrap_bedrock_runtime_invoke_model, } -def bind__create_api_method(py_operation_name, operation_name, service_model, - *args, **kwargs): +def bind__create_api_method(py_operation_name, operation_name, service_model, *args, **kwargs): return (py_operation_name, service_model) def _nr_clientcreator__create_api_method_(wrapped, instance, args, kwargs): - (py_operation_name, service_model) = \ - bind__create_api_method(*args, **kwargs) + (py_operation_name, service_model) = bind__create_api_method(*args, **kwargs) service_name = service_model.service_name.lower() tracer = CUSTOM_TRACE_POINTS.get((service_name, py_operation_name)) @@ -95,30 +285,27 @@ def _bind_make_request_params(operation_model, request_dict, *args, **kwargs): def _nr_endpoint_make_request_(wrapped, instance, args, kwargs): operation_model, request_dict = _bind_make_request_params(*args, **kwargs) - url = request_dict.get('url', '') - method = request_dict.get('method', None) - - with ExternalTrace(library='botocore', url=url, method=method, source=wrapped) as trace: + url = request_dict.get("url", "") + method = request_dict.get("method", None) + with ExternalTrace(library="botocore", url=url, method=method, source=wrapped) as trace: try: - trace._add_agent_attribute('aws.operation', operation_model.name) + trace._add_agent_attribute("aws.operation", operation_model.name) except: pass result = wrapped(*args, **kwargs) try: - request_id = result[1]['ResponseMetadata']['RequestId'] - trace._add_agent_attribute('aws.requestId', request_id) + request_id = result[1]["ResponseMetadata"]["RequestId"] + trace._add_agent_attribute("aws.requestId", request_id) except: pass return result def instrument_botocore_endpoint(module): - wrap_function_wrapper(module, 'Endpoint.make_request', - _nr_endpoint_make_request_) + wrap_function_wrapper(module, "Endpoint.make_request", _nr_endpoint_make_request_) def instrument_botocore_client(module): - wrap_function_wrapper(module, 'ClientCreator._create_api_method', - _nr_clientcreator__create_api_method_) + wrap_function_wrapper(module, "ClientCreator._create_api_method", _nr_clientcreator__create_api_method_) diff --git a/tests/mlmodel_bedrock/_mock_external_bedrock_server.py b/tests/mlmodel_bedrock/_mock_external_bedrock_server.py index 3d200449bf..de8a91aad3 100644 --- a/tests/mlmodel_bedrock/_mock_external_bedrock_server.py +++ b/tests/mlmodel_bedrock/_mock_external_bedrock_server.py @@ -29,1930 +29,191 @@ # 3) This app runs on a separate thread meaning it won't block the test app. RESPONSES = { - "amazon.titan-text-express-v1::Command: Write me a blog about making strong business decisions as a leader.": [ - {}, + "amazon.titan-text-express-v1::What is 212 degrees Fahrenheit converted to Celsius?": [ + {"content-type": "application/json", "x-amzn-requestid": "660d4de9-6804-460e-8556-4ab2a019d1e3"}, { - "inputTextTokenCount": 19, + "inputTextTokenCount": 12, "results": [ { - "tokenCount": 128, - "outputText": " Making strong business decisions as a leader requires a combination of strategic thinking, data analysis, and intuition. Here are some tips to help you make informed and effective decisions:\nDefine your goals and vision: Clearly understand your organization's goals and vision, and ensure that all decision-making aligns with these objectives. This will provide a roadmap for your decisions and help you stay focused on the bigger picture.\nGather relevant data and information: Collect and analyze data related to the decision you need to make. Consider multiple sources of information, such as market trends, financial reports, and stakeholder feedback. Use data to inform your decision-making process", - "completionReason": "LENGTH", + "tokenCount": 55, + "outputText": "\nUse the formula,\n\u00b0C = (\u00b0F - 32) x 5/9\n= 212 x 5/9\n= 100 degrees Celsius\n212 degrees Fahrenheit is 100 degrees Celsius.", + "completionReason": "FINISH", } ], }, ], - "anthropic.claude-instant-v1::Human: Write me a blog about making strong business decisions as a leader.": [ - {}, - { - "completion": " Here is a draft blog post on making strong business decisions as a leader:\n\nTitle: 5 Tips for Making Strong Business Decisions as a Leader\n\nBeing a leader means that tough business decisions will inevitably land on your desk. How you handle those decisions can have a huge impact on your company's success. Here are some tips to help you make strong, informed choices that move your business in the right direction.\n\n1. Gather all relevant data. Don't make a call until you've examined the issue from every angle. Seek out useful metrics, get feedback from various stakeholders, and look at historical trends and precedents. The more data you have, the clearer the right path will become. \n\n2. Consider both short and long-term implications. While it's important to address immediate needs, don't lose sight of how a decision may impact the future. Will your choice simply solve today's problem or help build sustainable growth? Carefully weigh short-term gains against potential long-term consequences.\n\n3. Trust your instincts but don't decide alone. Your gut feelings are valuable, but they shouldn't be the sole basis for a leadership decision. Consult with your management team and get differing perspectives. Encourage respectful debate to surface any risks or uncertainties that need discussion. \n\n4. Be willing todelaya decisionif youneed moretime.There'snobenefittomakingarushjudgement beforeallfactorshavebeenweighed.It'sbettertoletyourdecision\"bake\"athirdopinionormoredataratherthanpotentiallyregrettingahastycalllater.\n\n5. Follow through on the outcome. A good decision means little without effective implementation. Clearly communicate the rationale for your choice and gain organizational buy-in. Then follow up to ensure your solution is executed properly and intended goals are achieved. Are any adjustments needed along the way? \n\nLeaders are entrusted to make the calls that steer a business. With care, research and an open yet discerning approach, you can make decisions that propel your company confidently into the future.", - "stop_reason": "stop_sequence", - }, - ], - "ai21.j2-mid-v1::Write me a blog about making strong business decisions as a leader.": [ - {}, + "ai21.j2-mid-v1::What is 212 degrees Fahrenheit converted to Celsius?": [ + {"content-type": "application/json", "x-amzn-requestid": "3bf1bb6b-b6f0-4901-85a1-2fa0e814440e"}, { "id": 1234, "prompt": { - "text": "Write me a blog about making strong business decisions as a leader.", + "text": "What is 212 degrees Fahrenheit converted to Celsius?", "tokens": [ { "generatedToken": { - "token": "\u2581Write", - "logprob": -10.650314331054688, - "raw_logprob": -10.650314331054688, - }, - "topTokens": None, - "textRange": {"start": 0, "end": 5}, - }, - { - "generatedToken": { - "token": "\u2581me", - "logprob": -5.457987308502197, - "raw_logprob": -5.457987308502197, + "token": "\u2581What\u2581is", + "logprob": -7.446773529052734, + "raw_logprob": -7.446773529052734, }, "topTokens": None, - "textRange": {"start": 5, "end": 8}, + "textRange": {"start": 0, "end": 7}, }, { "generatedToken": { - "token": "\u2581a\u2581blog", - "logprob": -8.36896800994873, - "raw_logprob": -8.36896800994873, + "token": "\u2581", + "logprob": -3.8046724796295166, + "raw_logprob": -3.8046724796295166, }, "topTokens": None, - "textRange": {"start": 8, "end": 15}, + "textRange": {"start": 7, "end": 8}, }, { "generatedToken": { - "token": "\u2581about\u2581making", - "logprob": -14.223419189453125, - "raw_logprob": -14.223419189453125, + "token": "212", + "logprob": -9.287349700927734, + "raw_logprob": -9.287349700927734, }, "topTokens": None, - "textRange": {"start": 15, "end": 28}, + "textRange": {"start": 8, "end": 11}, }, { "generatedToken": { - "token": "\u2581strong", - "logprob": -9.367725372314453, - "raw_logprob": -9.367725372314453, + "token": "\u2581degrees\u2581Fahrenheit", + "logprob": -7.953181743621826, + "raw_logprob": -7.953181743621826, }, "topTokens": None, - "textRange": {"start": 28, "end": 35}, + "textRange": {"start": 11, "end": 30}, }, { "generatedToken": { - "token": "\u2581business\u2581decisions", - "logprob": -7.66295862197876, - "raw_logprob": -7.66295862197876, + "token": "\u2581converted\u2581to", + "logprob": -6.168096542358398, + "raw_logprob": -6.168096542358398, }, "topTokens": None, - "textRange": {"start": 35, "end": 54}, + "textRange": {"start": 30, "end": 43}, }, { "generatedToken": { - "token": "\u2581as\u2581a\u2581leader", - "logprob": -13.765915870666504, - "raw_logprob": -13.765915870666504, + "token": "\u2581Celsius", + "logprob": -0.09790332615375519, + "raw_logprob": -0.09790332615375519, }, "topTokens": None, - "textRange": {"start": 54, "end": 66}, + "textRange": {"start": 43, "end": 51}, }, { "generatedToken": { - "token": ".", - "logprob": -10.953210830688477, - "raw_logprob": -10.953210830688477, + "token": "?", + "logprob": -6.5795369148254395, + "raw_logprob": -6.5795369148254395, }, "topTokens": None, - "textRange": {"start": 66, "end": 67}, + "textRange": {"start": 51, "end": 52}, }, ], }, "completions": [ { "data": { - "text": "\nWhen you are a leader at work, you need to make timely and informed decisions on behalf of your team or company. You have to consider multiple factors and variables, and analyze data in a way to make the best possible choice.\n\nHowever, sometimes things don't turn out the way you intended. Your decision might not work as intended, or act in unforeseen ways. Or, you might find new information or context that causes you to question your decision. That's okay.\n\nIt's important to have courage when you're a leader. This means being willing to think critically, reflect, learn from mistakes, and take action steps moving forward.\n\nThere are three steps that can help you grow as a leader and make better business decisions:\n\nStep 1: Gather information\n\nThe first step to making a good decision is to make sure that you have all of the facts. It's important to know what information you need, and from where to get it.\n\nYou can gather information by doing things like reading reports, talking to stakeholders, and conducting research.\n\nStep 2: Analyze information\n\nOnce you've gathered all of your information, you need to take some time to think about it. You need to analyze the data and identify patterns, trends, and trends that might not be immediately obvious.\n\nThere are a few things you should keep in mind when you're analyzing information:\n\n* Identify the key points: What are the key takeaways from this information? What", + "text": "\n212 degrees Fahrenheit is equal to 100 degrees Celsius.", "tokens": [ { "generatedToken": { "token": "<|newline|>", - "logprob": -0.00011955977242905647, - "raw_logprob": -0.00011955977242905647, + "logprob": -1.6689286894688848e-06, + "raw_logprob": -0.00015984688070602715, }, "topTokens": None, "textRange": {"start": 0, "end": 1}, }, { "generatedToken": { - "token": "\u2581When\u2581you\u2581are", - "logprob": -6.066172122955322, - "raw_logprob": -6.066172122955322, - }, - "topTokens": None, - "textRange": {"start": 1, "end": 13}, - }, - { - "generatedToken": { - "token": "\u2581a\u2581leader", - "logprob": -0.8404027223587036, - "raw_logprob": -0.8404027223587036, - }, - "topTokens": None, - "textRange": {"start": 13, "end": 22}, - }, - { - "generatedToken": { - "token": "\u2581at\u2581work", - "logprob": -8.004234313964844, - "raw_logprob": -8.004234313964844, - }, - "topTokens": None, - "textRange": {"start": 22, "end": 30}, - }, - { - "generatedToken": { - "token": ",", - "logprob": -0.07083408534526825, - "raw_logprob": -0.07083408534526825, - }, - "topTokens": None, - "textRange": {"start": 30, "end": 31}, - }, - { - "generatedToken": { - "token": "\u2581you\u2581need\u2581to\u2581make", - "logprob": -2.5708985328674316, - "raw_logprob": -2.5708985328674316, - }, - "topTokens": None, - "textRange": {"start": 31, "end": 48}, - }, - { - "generatedToken": { - "token": "\u2581timely", - "logprob": -9.624330520629883, - "raw_logprob": -9.624330520629883, - }, - "topTokens": None, - "textRange": {"start": 48, "end": 55}, - }, - { - "generatedToken": { - "token": "\u2581and", - "logprob": -1.5508010387420654, - "raw_logprob": -1.5508010387420654, - }, - "topTokens": None, - "textRange": {"start": 55, "end": 59}, - }, - { - "generatedToken": { - "token": "\u2581informed\u2581decisions", - "logprob": -0.5989360809326172, - "raw_logprob": -0.5989360809326172, - }, - "topTokens": None, - "textRange": {"start": 59, "end": 78}, - }, - { - "generatedToken": { - "token": "\u2581on\u2581behalf\u2581of", - "logprob": -5.749756336212158, - "raw_logprob": -5.749756336212158, - }, - "topTokens": None, - "textRange": {"start": 78, "end": 91}, - }, - { - "generatedToken": { - "token": "\u2581your\u2581team", - "logprob": -0.29448866844177246, - "raw_logprob": -0.29448866844177246, - }, - "topTokens": None, - "textRange": {"start": 91, "end": 101}, - }, - { - "generatedToken": { - "token": "\u2581or", - "logprob": -2.9078853130340576, - "raw_logprob": -2.9078853130340576, - }, - "topTokens": None, - "textRange": {"start": 101, "end": 104}, - }, - { - "generatedToken": { - "token": "\u2581company", - "logprob": -0.4439607262611389, - "raw_logprob": -0.4439607262611389, - }, - "topTokens": None, - "textRange": {"start": 104, "end": 112}, - }, - { - "generatedToken": { - "token": ".", - "logprob": -0.004392143338918686, - "raw_logprob": -0.004392143338918686, - }, - "topTokens": None, - "textRange": {"start": 112, "end": 113}, - }, - { - "generatedToken": { - "token": "\u2581You\u2581have", - "logprob": -6.982149600982666, - "raw_logprob": -6.982149600982666, - }, - "topTokens": None, - "textRange": {"start": 113, "end": 122}, - }, - { - "generatedToken": { - "token": "\u2581to\u2581consider", - "logprob": -2.413727283477783, - "raw_logprob": -2.413727283477783, - }, - "topTokens": None, - "textRange": {"start": 122, "end": 134}, - }, - { - "generatedToken": { - "token": "\u2581multiple", - "logprob": -2.61666202545166, - "raw_logprob": -2.61666202545166, - }, - "topTokens": None, - "textRange": {"start": 134, "end": 143}, - }, - { - "generatedToken": { - "token": "\u2581factors", - "logprob": -0.11320021003484726, - "raw_logprob": -0.11320021003484726, - }, - "topTokens": None, - "textRange": {"start": 143, "end": 151}, - }, - { - "generatedToken": { - "token": "\u2581and", - "logprob": -1.4593441486358643, - "raw_logprob": -1.4593441486358643, - }, - "topTokens": None, - "textRange": {"start": 151, "end": 155}, - }, - { - "generatedToken": { - "token": "\u2581variables", - "logprob": -2.3700382709503174, - "raw_logprob": -2.3700382709503174, - }, - "topTokens": None, - "textRange": {"start": 155, "end": 165}, - }, - { - "generatedToken": { - "token": ",", - "logprob": -0.9362450838088989, - "raw_logprob": -0.9362450838088989, - }, - "topTokens": None, - "textRange": {"start": 165, "end": 166}, - }, - { - "generatedToken": { - "token": "\u2581and\u2581analyze", - "logprob": -7.707818031311035, - "raw_logprob": -7.707818031311035, - }, - "topTokens": None, - "textRange": {"start": 166, "end": 178}, - }, - { - "generatedToken": { - "token": "\u2581data\u2581in", - "logprob": -7.114713668823242, - "raw_logprob": -7.114713668823242, - }, - "topTokens": None, - "textRange": {"start": 178, "end": 186}, - }, - { - "generatedToken": { - "token": "\u2581a\u2581way\u2581to\u2581make", - "logprob": -2.1352782249450684, - "raw_logprob": -2.1352782249450684, - }, - "topTokens": None, - "textRange": {"start": 186, "end": 200}, - }, - { - "generatedToken": { - "token": "\u2581the\u2581best\u2581possible", - "logprob": -1.202060341835022, - "raw_logprob": -1.202060341835022, - }, - "topTokens": None, - "textRange": {"start": 200, "end": 218}, - }, - { - "generatedToken": { - "token": "\u2581choice", - "logprob": -0.49673229455947876, - "raw_logprob": -0.49673229455947876, - }, - "topTokens": None, - "textRange": {"start": 218, "end": 225}, - }, - { - "generatedToken": { - "token": ".", - "logprob": -0.08440639078617096, - "raw_logprob": -0.08440639078617096, - }, - "topTokens": None, - "textRange": {"start": 225, "end": 226}, - }, - { - "generatedToken": { - "token": "<|newline|>", - "logprob": -1.4274420738220215, - "raw_logprob": -1.4274420738220215, - }, - "topTokens": None, - "textRange": {"start": 226, "end": 227}, - }, - { - "generatedToken": { - "token": "<|newline|>", - "logprob": -0.002755180699750781, - "raw_logprob": -0.002755180699750781, - }, - "topTokens": None, - "textRange": {"start": 227, "end": 228}, - }, - { - "generatedToken": { - "token": "\u2581However", - "logprob": -2.9974615573883057, - "raw_logprob": -2.9974615573883057, - }, - "topTokens": None, - "textRange": {"start": 228, "end": 235}, - }, - { - "generatedToken": { - "token": ",", - "logprob": -0.0017327546374872327, - "raw_logprob": -0.0017327546374872327, - }, - "topTokens": None, - "textRange": {"start": 235, "end": 236}, - }, - { - "generatedToken": { - "token": "\u2581sometimes", - "logprob": -2.893026113510132, - "raw_logprob": -2.893026113510132, - }, - "topTokens": None, - "textRange": {"start": 236, "end": 246}, - }, - { - "generatedToken": { - "token": "\u2581things", - "logprob": -4.238265037536621, - "raw_logprob": -4.238265037536621, - }, - "topTokens": None, - "textRange": {"start": 246, "end": 253}, - }, - { - "generatedToken": { - "token": "\u2581don't", - "logprob": -2.367069721221924, - "raw_logprob": -2.367069721221924, - }, - "topTokens": None, - "textRange": {"start": 253, "end": 259}, - }, - { - "generatedToken": { - "token": "\u2581turn\u2581out", - "logprob": -1.7048457860946655, - "raw_logprob": -1.7048457860946655, - }, - "topTokens": None, - "textRange": {"start": 259, "end": 268}, - }, - { - "generatedToken": { - "token": "\u2581the\u2581way\u2581you", - "logprob": -2.1934995651245117, - "raw_logprob": -2.1934995651245117, - }, - "topTokens": None, - "textRange": {"start": 268, "end": 280}, - }, - { - "generatedToken": { - "token": "\u2581intended", - "logprob": -3.7538819313049316, - "raw_logprob": -3.7538819313049316, - }, - "topTokens": None, - "textRange": {"start": 280, "end": 289}, - }, - { - "generatedToken": { - "token": ".", - "logprob": -0.41568616032600403, - "raw_logprob": -0.41568616032600403, - }, - "topTokens": None, - "textRange": {"start": 289, "end": 290}, - }, - { - "generatedToken": { - "token": "\u2581Your", - "logprob": -4.143064498901367, - "raw_logprob": -4.143064498901367, - }, - "topTokens": None, - "textRange": {"start": 290, "end": 295}, - }, - { - "generatedToken": { - "token": "\u2581decision", - "logprob": -1.1384129524230957, - "raw_logprob": -1.1384129524230957, - }, - "topTokens": None, - "textRange": {"start": 295, "end": 304}, - }, - { - "generatedToken": { - "token": "\u2581might\u2581not\u2581work", - "logprob": -2.4380242824554443, - "raw_logprob": -2.4380242824554443, + "token": "\u2581", + "logprob": -0.03473362699151039, + "raw_logprob": -0.11261807382106781, }, "topTokens": None, - "textRange": {"start": 304, "end": 319}, + "textRange": {"start": 1, "end": 1}, }, { "generatedToken": { - "token": "\u2581as\u2581intended", - "logprob": -2.9615366458892822, - "raw_logprob": -2.9615366458892822, + "token": "212", + "logprob": -0.003316262038424611, + "raw_logprob": -0.019686665385961533, }, "topTokens": None, - "textRange": {"start": 319, "end": 331}, + "textRange": {"start": 1, "end": 4}, }, { "generatedToken": { - "token": ",", - "logprob": -0.22413745522499084, - "raw_logprob": -0.22413745522499084, + "token": "\u2581degrees\u2581Fahrenheit", + "logprob": -0.003579758107662201, + "raw_logprob": -0.03144374489784241, }, "topTokens": None, - "textRange": {"start": 331, "end": 332}, + "textRange": {"start": 4, "end": 23}, }, { "generatedToken": { - "token": "\u2581or", - "logprob": -0.4422154128551483, - "raw_logprob": -0.4422154128551483, + "token": "\u2581is\u2581equal\u2581to", + "logprob": -0.0027733694296330214, + "raw_logprob": -0.027207009494304657, }, "topTokens": None, - "textRange": {"start": 332, "end": 335}, + "textRange": {"start": 23, "end": 35}, }, { "generatedToken": { - "token": "\u2581act\u2581in", - "logprob": -16.771242141723633, - "raw_logprob": -16.771242141723633, + "token": "\u2581", + "logprob": -0.0003392120997887105, + "raw_logprob": -0.005458095110952854, }, "topTokens": None, - "textRange": {"start": 335, "end": 342}, + "textRange": {"start": 35, "end": 36}, }, { "generatedToken": { - "token": "\u2581unforeseen", - "logprob": -2.0343406200408936, - "raw_logprob": -2.0343406200408936, + "token": "100", + "logprob": -2.145764938177308e-06, + "raw_logprob": -0.00012730741582345217, }, "topTokens": None, - "textRange": {"start": 342, "end": 353}, + "textRange": {"start": 36, "end": 39}, }, { "generatedToken": { - "token": "\u2581ways", - "logprob": -0.03732850402593613, - "raw_logprob": -0.03732850402593613, + "token": "\u2581degrees\u2581Celsius", + "logprob": -0.31207239627838135, + "raw_logprob": -0.402545303106308, }, "topTokens": None, - "textRange": {"start": 353, "end": 358}, + "textRange": {"start": 39, "end": 55}, }, { "generatedToken": { "token": ".", - "logprob": -0.07006527483463287, - "raw_logprob": -0.07006527483463287, - }, - "topTokens": None, - "textRange": {"start": 358, "end": 359}, - }, - { - "generatedToken": { - "token": "\u2581Or", - "logprob": -4.574007511138916, - "raw_logprob": -4.574007511138916, - }, - "topTokens": None, - "textRange": {"start": 359, "end": 362}, - }, - { - "generatedToken": { - "token": ",", - "logprob": -0.35941576957702637, - "raw_logprob": -0.35941576957702637, - }, - "topTokens": None, - "textRange": {"start": 362, "end": 363}, - }, - { - "generatedToken": { - "token": "\u2581you\u2581might\u2581find", - "logprob": -3.0860962867736816, - "raw_logprob": -3.0860962867736816, - }, - "topTokens": None, - "textRange": {"start": 363, "end": 378}, - }, - { - "generatedToken": { - "token": "\u2581new\u2581information", - "logprob": -3.0317506790161133, - "raw_logprob": -3.0317506790161133, - }, - "topTokens": None, - "textRange": {"start": 378, "end": 394}, - }, - { - "generatedToken": { - "token": "\u2581or", - "logprob": -3.251086950302124, - "raw_logprob": -3.251086950302124, - }, - "topTokens": None, - "textRange": {"start": 394, "end": 397}, - }, - { - "generatedToken": { - "token": "\u2581context", - "logprob": -4.189438343048096, - "raw_logprob": -4.189438343048096, + "logprob": -0.023684674873948097, + "raw_logprob": -0.0769972875714302, }, "topTokens": None, - "textRange": {"start": 397, "end": 405}, + "textRange": {"start": 55, "end": 56}, }, { "generatedToken": { - "token": "\u2581that\u2581causes", - "logprob": -4.464134216308594, - "raw_logprob": -4.464134216308594, + "token": "<|endoftext|>", + "logprob": -0.0073706600815057755, + "raw_logprob": -0.06265579164028168, }, "topTokens": None, - "textRange": {"start": 405, "end": 417}, - }, - { - "generatedToken": { - "token": "\u2581you", - "logprob": -0.2493533492088318, - "raw_logprob": -0.2493533492088318, - }, - "topTokens": None, - "textRange": {"start": 417, "end": 421}, - }, - { - "generatedToken": { - "token": "\u2581to\u2581question", - "logprob": -2.251695156097412, - "raw_logprob": -2.251695156097412, - }, - "topTokens": None, - "textRange": {"start": 421, "end": 433}, - }, - { - "generatedToken": { - "token": "\u2581your\u2581decision", - "logprob": -1.989322543144226, - "raw_logprob": -1.989322543144226, - }, - "topTokens": None, - "textRange": {"start": 433, "end": 447}, - }, - { - "generatedToken": { - "token": ".", - "logprob": -0.17142613232135773, - "raw_logprob": -0.17142613232135773, - }, - "topTokens": None, - "textRange": {"start": 447, "end": 448}, - }, - { - "generatedToken": { - "token": "\u2581That's", - "logprob": -5.326101303100586, - "raw_logprob": -5.326101303100586, - }, - "topTokens": None, - "textRange": {"start": 448, "end": 455}, - }, - { - "generatedToken": { - "token": "\u2581okay", - "logprob": -0.7236325740814209, - "raw_logprob": -0.7236325740814209, - }, - "topTokens": None, - "textRange": {"start": 455, "end": 460}, - }, - { - "generatedToken": { - "token": ".", - "logprob": -1.1485638618469238, - "raw_logprob": -1.1485638618469238, - }, - "topTokens": None, - "textRange": {"start": 460, "end": 461}, - }, - { - "generatedToken": { - "token": "<|newline|>", - "logprob": -1.3378857374191284, - "raw_logprob": -1.3378857374191284, - }, - "topTokens": None, - "textRange": {"start": 461, "end": 462}, - }, - { - "generatedToken": { - "token": "<|newline|>", - "logprob": -0.00016985881666187197, - "raw_logprob": -0.00016985881666187197, - }, - "topTokens": None, - "textRange": {"start": 462, "end": 463}, - }, - { - "generatedToken": { - "token": "\u2581It's\u2581important\u2581to", - "logprob": -3.5227854251861572, - "raw_logprob": -3.5227854251861572, - }, - "topTokens": None, - "textRange": {"start": 463, "end": 480}, - }, - { - "generatedToken": { - "token": "\u2581have", - "logprob": -2.9167816638946533, - "raw_logprob": -2.9167816638946533, - }, - "topTokens": None, - "textRange": {"start": 480, "end": 485}, - }, - { - "generatedToken": { - "token": "\u2581courage", - "logprob": -5.581697940826416, - "raw_logprob": -5.581697940826416, - }, - "topTokens": None, - "textRange": {"start": 485, "end": 493}, - }, - { - "generatedToken": { - "token": "\u2581when\u2581you're", - "logprob": -4.5586161613464355, - "raw_logprob": -4.5586161613464355, - }, - "topTokens": None, - "textRange": {"start": 493, "end": 505}, - }, - { - "generatedToken": { - "token": "\u2581a\u2581leader", - "logprob": -0.26272106170654297, - "raw_logprob": -0.26272106170654297, - }, - "topTokens": None, - "textRange": {"start": 505, "end": 514}, - }, - { - "generatedToken": { - "token": ".", - "logprob": -0.3965468406677246, - "raw_logprob": -0.3965468406677246, - }, - "topTokens": None, - "textRange": {"start": 514, "end": 515}, - }, - { - "generatedToken": { - "token": "\u2581This\u2581means", - "logprob": -2.841196298599243, - "raw_logprob": -2.841196298599243, - }, - "topTokens": None, - "textRange": {"start": 515, "end": 526}, - }, - { - "generatedToken": { - "token": "\u2581being", - "logprob": -0.4315812587738037, - "raw_logprob": -0.4315812587738037, - }, - "topTokens": None, - "textRange": {"start": 526, "end": 532}, - }, - { - "generatedToken": { - "token": "\u2581willing\u2581to", - "logprob": -0.03861286863684654, - "raw_logprob": -0.03861286863684654, - }, - "topTokens": None, - "textRange": {"start": 532, "end": 543}, - }, - { - "generatedToken": { - "token": "\u2581think", - "logprob": -7.899557113647461, - "raw_logprob": -7.899557113647461, - }, - "topTokens": None, - "textRange": {"start": 543, "end": 549}, - }, - { - "generatedToken": { - "token": "\u2581critically", - "logprob": -0.6595878601074219, - "raw_logprob": -0.6595878601074219, - }, - "topTokens": None, - "textRange": {"start": 549, "end": 560}, - }, - { - "generatedToken": { - "token": ",", - "logprob": -1.2396876811981201, - "raw_logprob": -1.2396876811981201, - }, - "topTokens": None, - "textRange": {"start": 560, "end": 561}, - }, - { - "generatedToken": { - "token": "\u2581reflect", - "logprob": -6.496954917907715, - "raw_logprob": -6.496954917907715, - }, - "topTokens": None, - "textRange": {"start": 561, "end": 569}, - }, - { - "generatedToken": { - "token": ",", - "logprob": -0.3813382685184479, - "raw_logprob": -0.3813382685184479, - }, - "topTokens": None, - "textRange": {"start": 569, "end": 570}, - }, - { - "generatedToken": { - "token": "\u2581learn\u2581from", - "logprob": -5.863975524902344, - "raw_logprob": -5.863975524902344, - }, - "topTokens": None, - "textRange": {"start": 570, "end": 581}, - }, - { - "generatedToken": { - "token": "\u2581mistakes", - "logprob": -1.1053953170776367, - "raw_logprob": -1.1053953170776367, - }, - "topTokens": None, - "textRange": {"start": 581, "end": 590}, - }, - { - "generatedToken": { - "token": ",", - "logprob": -0.010977472178637981, - "raw_logprob": -0.010977472178637981, - }, - "topTokens": None, - "textRange": {"start": 590, "end": 591}, - }, - { - "generatedToken": { - "token": "\u2581and", - "logprob": -0.5951434373855591, - "raw_logprob": -0.5951434373855591, - }, - "topTokens": None, - "textRange": {"start": 591, "end": 595}, - }, - { - "generatedToken": { - "token": "\u2581take\u2581action", - "logprob": -4.118521690368652, - "raw_logprob": -4.118521690368652, - }, - "topTokens": None, - "textRange": {"start": 595, "end": 607}, - }, - { - "generatedToken": { - "token": "\u2581steps", - "logprob": -8.071130752563477, - "raw_logprob": -8.071130752563477, - }, - "topTokens": None, - "textRange": {"start": 607, "end": 613}, - }, - { - "generatedToken": { - "token": "\u2581moving\u2581forward", - "logprob": -5.662147045135498, - "raw_logprob": -5.662147045135498, - }, - "topTokens": None, - "textRange": {"start": 613, "end": 628}, - }, - { - "generatedToken": { - "token": ".", - "logprob": -0.03737432509660721, - "raw_logprob": -0.03737432509660721, - }, - "topTokens": None, - "textRange": {"start": 628, "end": 629}, - }, - { - "generatedToken": { - "token": "<|newline|>", - "logprob": -1.0259989500045776, - "raw_logprob": -1.0259989500045776, - }, - "topTokens": None, - "textRange": {"start": 629, "end": 630}, - }, - { - "generatedToken": { - "token": "<|newline|>", - "logprob": -0.0006999903125688434, - "raw_logprob": -0.0006999903125688434, - }, - "topTokens": None, - "textRange": {"start": 630, "end": 631}, - }, - { - "generatedToken": { - "token": "\u2581There\u2581are\u2581three", - "logprob": -5.931296348571777, - "raw_logprob": -5.931296348571777, - }, - "topTokens": None, - "textRange": {"start": 631, "end": 646}, - }, - { - "generatedToken": { - "token": "\u2581steps", - "logprob": -2.7536213397979736, - "raw_logprob": -2.7536213397979736, - }, - "topTokens": None, - "textRange": {"start": 646, "end": 652}, - }, - { - "generatedToken": { - "token": "\u2581that\u2581can\u2581help\u2581you", - "logprob": -2.3474459648132324, - "raw_logprob": -2.3474459648132324, - }, - "topTokens": None, - "textRange": {"start": 652, "end": 670}, - }, - { - "generatedToken": { - "token": "\u2581grow", - "logprob": -7.027171611785889, - "raw_logprob": -7.027171611785889, - }, - "topTokens": None, - "textRange": {"start": 670, "end": 675}, - }, - { - "generatedToken": { - "token": "\u2581as\u2581a\u2581leader", - "logprob": -0.40542012453079224, - "raw_logprob": -0.40542012453079224, - }, - "topTokens": None, - "textRange": {"start": 675, "end": 687}, - }, - { - "generatedToken": { - "token": "\u2581and\u2581make", - "logprob": -0.7026352882385254, - "raw_logprob": -0.7026352882385254, - }, - "topTokens": None, - "textRange": {"start": 687, "end": 696}, - }, - { - "generatedToken": { - "token": "\u2581better", - "logprob": -2.1509532928466797, - "raw_logprob": -2.1509532928466797, - }, - "topTokens": None, - "textRange": {"start": 696, "end": 703}, - }, - { - "generatedToken": { - "token": "\u2581business\u2581decisions", - "logprob": -0.24822193384170532, - "raw_logprob": -0.24822193384170532, - }, - "topTokens": None, - "textRange": {"start": 703, "end": 722}, - }, - { - "generatedToken": { - "token": ":", - "logprob": -0.46704334020614624, - "raw_logprob": -0.46704334020614624, - }, - "topTokens": None, - "textRange": {"start": 722, "end": 723}, - }, - { - "generatedToken": { - "token": "<|newline|>", - "logprob": -0.02775048278272152, - "raw_logprob": -0.02775048278272152, - }, - "topTokens": None, - "textRange": {"start": 723, "end": 724}, - }, - { - "generatedToken": { - "token": "<|newline|>", - "logprob": -0.0020361661445349455, - "raw_logprob": -0.0020361661445349455, - }, - "topTokens": None, - "textRange": {"start": 724, "end": 725}, - }, - { - "generatedToken": { - "token": "\u2581Step", - "logprob": -1.442288875579834, - "raw_logprob": -1.442288875579834, - }, - "topTokens": None, - "textRange": {"start": 725, "end": 729}, - }, - { - "generatedToken": { - "token": "\u2581", - "logprob": -0.05165497958660126, - "raw_logprob": -0.05165497958660126, - }, - "topTokens": None, - "textRange": {"start": 729, "end": 730}, - }, - { - "generatedToken": { - "token": "1", - "logprob": -4.792098479811102e-05, - "raw_logprob": -4.792098479811102e-05, - }, - "topTokens": None, - "textRange": {"start": 730, "end": 731}, - }, - { - "generatedToken": { - "token": ":", - "logprob": -0.02608294039964676, - "raw_logprob": -0.02608294039964676, - }, - "topTokens": None, - "textRange": {"start": 731, "end": 732}, - }, - { - "generatedToken": { - "token": "\u2581Gather", - "logprob": -3.0909531116485596, - "raw_logprob": -3.0909531116485596, - }, - "topTokens": None, - "textRange": {"start": 732, "end": 739}, - }, - { - "generatedToken": { - "token": "\u2581information", - "logprob": -0.8507784605026245, - "raw_logprob": -0.8507784605026245, - }, - "topTokens": None, - "textRange": {"start": 739, "end": 751}, - }, - { - "generatedToken": { - "token": "<|newline|>", - "logprob": -0.6048281788825989, - "raw_logprob": -0.6048281788825989, - }, - "topTokens": None, - "textRange": {"start": 751, "end": 752}, - }, - { - "generatedToken": { - "token": "<|newline|>", - "logprob": -0.01351175270974636, - "raw_logprob": -0.01351175270974636, - }, - "topTokens": None, - "textRange": {"start": 752, "end": 753}, - }, - { - "generatedToken": { - "token": "\u2581The\u2581first\u2581step", - "logprob": -2.7363672256469727, - "raw_logprob": -2.7363672256469727, - }, - "topTokens": None, - "textRange": {"start": 753, "end": 767}, - }, - { - "generatedToken": { - "token": "\u2581to", - "logprob": -0.1339748501777649, - "raw_logprob": -0.1339748501777649, - }, - "topTokens": None, - "textRange": {"start": 767, "end": 770}, - }, - { - "generatedToken": { - "token": "\u2581making", - "logprob": -0.3207220137119293, - "raw_logprob": -0.3207220137119293, - }, - "topTokens": None, - "textRange": {"start": 770, "end": 777}, - }, - { - "generatedToken": { - "token": "\u2581a\u2581good", - "logprob": -0.6057114005088806, - "raw_logprob": -0.6057114005088806, - }, - "topTokens": None, - "textRange": {"start": 777, "end": 784}, - }, - { - "generatedToken": { - "token": "\u2581decision", - "logprob": -0.030523210763931274, - "raw_logprob": -0.030523210763931274, - }, - "topTokens": None, - "textRange": {"start": 784, "end": 793}, - }, - { - "generatedToken": { - "token": "\u2581is\u2581to", - "logprob": -3.0425467491149902, - "raw_logprob": -3.0425467491149902, - }, - "topTokens": None, - "textRange": {"start": 793, "end": 799}, - }, - { - "generatedToken": { - "token": "\u2581make\u2581sure\u2581that\u2581you\u2581have", - "logprob": -0.7047816514968872, - "raw_logprob": -0.7047816514968872, - }, - "topTokens": None, - "textRange": {"start": 799, "end": 823}, - }, - { - "generatedToken": { - "token": "\u2581all\u2581of\u2581the", - "logprob": -1.9955559968948364, - "raw_logprob": -1.9955559968948364, - }, - "topTokens": None, - "textRange": {"start": 823, "end": 834}, - }, - { - "generatedToken": { - "token": "\u2581facts", - "logprob": -2.409013271331787, - "raw_logprob": -2.409013271331787, - }, - "topTokens": None, - "textRange": {"start": 834, "end": 840}, - }, - { - "generatedToken": { - "token": ".", - "logprob": -0.2631763517856598, - "raw_logprob": -0.2631763517856598, - }, - "topTokens": None, - "textRange": {"start": 840, "end": 841}, - }, - { - "generatedToken": { - "token": "\u2581It's\u2581important\u2581to", - "logprob": -4.5646491050720215, - "raw_logprob": -4.5646491050720215, - }, - "topTokens": None, - "textRange": {"start": 841, "end": 859}, - }, - { - "generatedToken": { - "token": "\u2581know\u2581what", - "logprob": -6.077958106994629, - "raw_logprob": -6.077958106994629, - }, - "topTokens": None, - "textRange": {"start": 859, "end": 869}, - }, - { - "generatedToken": { - "token": "\u2581information", - "logprob": -2.0120184421539307, - "raw_logprob": -2.0120184421539307, - }, - "topTokens": None, - "textRange": {"start": 869, "end": 881}, - }, - { - "generatedToken": { - "token": "\u2581you\u2581need", - "logprob": -1.7770088911056519, - "raw_logprob": -1.7770088911056519, - }, - "topTokens": None, - "textRange": {"start": 881, "end": 890}, - }, - { - "generatedToken": { - "token": ",", - "logprob": -0.4962013363838196, - "raw_logprob": -0.4962013363838196, - }, - "topTokens": None, - "textRange": {"start": 890, "end": 891}, - }, - { - "generatedToken": { - "token": "\u2581and", - "logprob": -0.8423260450363159, - "raw_logprob": -0.8423260450363159, - }, - "topTokens": None, - "textRange": {"start": 891, "end": 895}, - }, - { - "generatedToken": { - "token": "\u2581from\u2581where", - "logprob": -8.261597633361816, - "raw_logprob": -8.261597633361816, - }, - "topTokens": None, - "textRange": {"start": 895, "end": 906}, - }, - { - "generatedToken": { - "token": "\u2581to\u2581get\u2581it", - "logprob": -0.985969066619873, - "raw_logprob": -0.985969066619873, - }, - "topTokens": None, - "textRange": {"start": 906, "end": 916}, - }, - { - "generatedToken": { - "token": ".", - "logprob": -0.0048598977737128735, - "raw_logprob": -0.0048598977737128735, - }, - "topTokens": None, - "textRange": {"start": 916, "end": 917}, - }, - { - "generatedToken": { - "token": "<|newline|>", - "logprob": -0.5743589401245117, - "raw_logprob": -0.5743589401245117, - }, - "topTokens": None, - "textRange": {"start": 917, "end": 918}, - }, - { - "generatedToken": { - "token": "<|newline|>", - "logprob": -0.000593962671700865, - "raw_logprob": -0.000593962671700865, - }, - "topTokens": None, - "textRange": {"start": 918, "end": 919}, - }, - { - "generatedToken": { - "token": "\u2581You\u2581can", - "logprob": -4.267513275146484, - "raw_logprob": -4.267513275146484, - }, - "topTokens": None, - "textRange": {"start": 919, "end": 926}, - }, - { - "generatedToken": { - "token": "\u2581gather", - "logprob": -0.007923126220703125, - "raw_logprob": -0.007923126220703125, - }, - "topTokens": None, - "textRange": {"start": 926, "end": 933}, - }, - { - "generatedToken": { - "token": "\u2581information", - "logprob": -0.3179577887058258, - "raw_logprob": -0.3179577887058258, - }, - "topTokens": None, - "textRange": {"start": 933, "end": 945}, - }, - { - "generatedToken": { - "token": "\u2581by\u2581doing", - "logprob": -5.132864952087402, - "raw_logprob": -5.132864952087402, - }, - "topTokens": None, - "textRange": {"start": 945, "end": 954}, - }, - { - "generatedToken": { - "token": "\u2581things\u2581like", - "logprob": -2.202630043029785, - "raw_logprob": -2.202630043029785, - }, - "topTokens": None, - "textRange": {"start": 954, "end": 966}, - }, - { - "generatedToken": { - "token": "\u2581reading", - "logprob": -3.232940196990967, - "raw_logprob": -3.232940196990967, - }, - "topTokens": None, - "textRange": {"start": 966, "end": 974}, - }, - { - "generatedToken": { - "token": "\u2581reports", - "logprob": -0.329463928937912, - "raw_logprob": -0.329463928937912, - }, - "topTokens": None, - "textRange": {"start": 974, "end": 982}, - }, - { - "generatedToken": { - "token": ",", - "logprob": -0.002441998338326812, - "raw_logprob": -0.002441998338326812, - }, - "topTokens": None, - "textRange": {"start": 982, "end": 983}, - }, - { - "generatedToken": { - "token": "\u2581talking\u2581to", - "logprob": -0.12298407405614853, - "raw_logprob": -0.12298407405614853, - }, - "topTokens": None, - "textRange": {"start": 983, "end": 994}, - }, - { - "generatedToken": { - "token": "\u2581stakeholders", - "logprob": -2.3864426612854004, - "raw_logprob": -2.3864426612854004, - }, - "topTokens": None, - "textRange": {"start": 994, "end": 1007}, - }, - { - "generatedToken": { - "token": ",", - "logprob": -0.012393603101372719, - "raw_logprob": -0.012393603101372719, - }, - "topTokens": None, - "textRange": {"start": 1007, "end": 1008}, - }, - { - "generatedToken": { - "token": "\u2581and", - "logprob": -0.1544899344444275, - "raw_logprob": -0.1544899344444275, - }, - "topTokens": None, - "textRange": {"start": 1008, "end": 1012}, - }, - { - "generatedToken": { - "token": "\u2581conducting\u2581research", - "logprob": -0.731350839138031, - "raw_logprob": -0.731350839138031, - }, - "topTokens": None, - "textRange": {"start": 1012, "end": 1032}, - }, - { - "generatedToken": { - "token": ".", - "logprob": -0.010276736691594124, - "raw_logprob": -0.010276736691594124, - }, - "topTokens": None, - "textRange": {"start": 1032, "end": 1033}, - }, - { - "generatedToken": { - "token": "<|newline|>", - "logprob": -1.247491478919983, - "raw_logprob": -1.247491478919983, - }, - "topTokens": None, - "textRange": {"start": 1033, "end": 1034}, - }, - { - "generatedToken": { - "token": "<|newline|>", - "logprob": -5.7338023907504976e-05, - "raw_logprob": -5.7338023907504976e-05, - }, - "topTokens": None, - "textRange": {"start": 1034, "end": 1035}, - }, - { - "generatedToken": { - "token": "\u2581Step", - "logprob": -0.43501779437065125, - "raw_logprob": -0.43501779437065125, - }, - "topTokens": None, - "textRange": {"start": 1035, "end": 1039}, - }, - { - "generatedToken": { - "token": "\u2581", - "logprob": -1.1920858014491387e-05, - "raw_logprob": -1.1920858014491387e-05, - }, - "topTokens": None, - "textRange": {"start": 1039, "end": 1040}, - }, - { - "generatedToken": { - "token": "2", - "logprob": -0.00016342257731594145, - "raw_logprob": -0.00016342257731594145, - }, - "topTokens": None, - "textRange": {"start": 1040, "end": 1041}, - }, - { - "generatedToken": { - "token": ":", - "logprob": -0.00010644822759786621, - "raw_logprob": -0.00010644822759786621, - }, - "topTokens": None, - "textRange": {"start": 1041, "end": 1042}, - }, - { - "generatedToken": { - "token": "\u2581Analyze", - "logprob": -0.15760670602321625, - "raw_logprob": -0.15760670602321625, - }, - "topTokens": None, - "textRange": {"start": 1042, "end": 1050}, - }, - { - "generatedToken": { - "token": "\u2581information", - "logprob": -1.612084984779358, - "raw_logprob": -1.612084984779358, - }, - "topTokens": None, - "textRange": {"start": 1050, "end": 1062}, - }, - { - "generatedToken": { - "token": "<|newline|>", - "logprob": -9.583967766957358e-05, - "raw_logprob": -9.583967766957358e-05, - }, - "topTokens": None, - "textRange": {"start": 1062, "end": 1063}, - }, - { - "generatedToken": { - "token": "<|newline|>", - "logprob": -0.00024685196694917977, - "raw_logprob": -0.00024685196694917977, - }, - "topTokens": None, - "textRange": {"start": 1063, "end": 1064}, - }, - { - "generatedToken": { - "token": "\u2581Once\u2581you've", - "logprob": -2.3116512298583984, - "raw_logprob": -2.3116512298583984, - }, - "topTokens": None, - "textRange": {"start": 1064, "end": 1075}, - }, - { - "generatedToken": { - "token": "\u2581gathered", - "logprob": -0.002062814310193062, - "raw_logprob": -0.002062814310193062, - }, - "topTokens": None, - "textRange": {"start": 1075, "end": 1084}, - }, - { - "generatedToken": { - "token": "\u2581all\u2581of\u2581your", - "logprob": -2.685849666595459, - "raw_logprob": -2.685849666595459, - }, - "topTokens": None, - "textRange": {"start": 1084, "end": 1096}, - }, - { - "generatedToken": { - "token": "\u2581information", - "logprob": -0.003219066886231303, - "raw_logprob": -0.003219066886231303, - }, - "topTokens": None, - "textRange": {"start": 1096, "end": 1108}, - }, - { - "generatedToken": { - "token": ",", - "logprob": -3.361645576660521e-05, - "raw_logprob": -3.361645576660521e-05, - }, - "topTokens": None, - "textRange": {"start": 1108, "end": 1109}, - }, - { - "generatedToken": { - "token": "\u2581you\u2581need\u2581to", - "logprob": -1.4020256996154785, - "raw_logprob": -1.4020256996154785, - }, - "topTokens": None, - "textRange": {"start": 1109, "end": 1121}, - }, - { - "generatedToken": { - "token": "\u2581take\u2581some\u2581time\u2581to", - "logprob": -2.1766977310180664, - "raw_logprob": -2.1766977310180664, - }, - "topTokens": None, - "textRange": {"start": 1121, "end": 1139}, - }, - { - "generatedToken": { - "token": "\u2581think\u2581about\u2581it", - "logprob": -0.4216986298561096, - "raw_logprob": -0.4216986298561096, - }, - "topTokens": None, - "textRange": {"start": 1139, "end": 1154}, - }, - { - "generatedToken": { - "token": ".", - "logprob": -0.24139046669006348, - "raw_logprob": -0.24139046669006348, - }, - "topTokens": None, - "textRange": {"start": 1154, "end": 1155}, - }, - { - "generatedToken": { - "token": "\u2581You\u2581need\u2581to", - "logprob": -1.129857063293457, - "raw_logprob": -1.129857063293457, - }, - "topTokens": None, - "textRange": {"start": 1155, "end": 1167}, - }, - { - "generatedToken": { - "token": "\u2581analyze\u2581the", - "logprob": -1.3527189493179321, - "raw_logprob": -1.3527189493179321, - }, - "topTokens": None, - "textRange": {"start": 1167, "end": 1179}, - }, - { - "generatedToken": { - "token": "\u2581data", - "logprob": -1.0173096656799316, - "raw_logprob": -1.0173096656799316, - }, - "topTokens": None, - "textRange": {"start": 1179, "end": 1184}, - }, - { - "generatedToken": { - "token": "\u2581and\u2581identify", - "logprob": -3.182776927947998, - "raw_logprob": -3.182776927947998, - }, - "topTokens": None, - "textRange": {"start": 1184, "end": 1197}, - }, - { - "generatedToken": { - "token": "\u2581patterns", - "logprob": -0.6117339134216309, - "raw_logprob": -0.6117339134216309, - }, - "topTokens": None, - "textRange": {"start": 1197, "end": 1206}, - }, - { - "generatedToken": { - "token": ",", - "logprob": -0.4564504325389862, - "raw_logprob": -0.4564504325389862, - }, - "topTokens": None, - "textRange": {"start": 1206, "end": 1207}, - }, - { - "generatedToken": { - "token": "\u2581trends", - "logprob": -0.0026252351235598326, - "raw_logprob": -0.0026252351235598326, - }, - "topTokens": None, - "textRange": {"start": 1207, "end": 1214}, - }, - { - "generatedToken": { - "token": ",", - "logprob": -2.706014311115723e-05, - "raw_logprob": -2.706014311115723e-05, - }, - "topTokens": None, - "textRange": {"start": 1214, "end": 1215}, - }, - { - "generatedToken": { - "token": "\u2581and", - "logprob": -0.16668428480625153, - "raw_logprob": -0.16668428480625153, - }, - "topTokens": None, - "textRange": {"start": 1215, "end": 1219}, - }, - { - "generatedToken": { - "token": "\u2581trends", - "logprob": -2.091916084289551, - "raw_logprob": -2.091916084289551, - }, - "topTokens": None, - "textRange": {"start": 1219, "end": 1226}, - }, - { - "generatedToken": { - "token": "\u2581that", - "logprob": -2.99127197265625, - "raw_logprob": -2.99127197265625, - }, - "topTokens": None, - "textRange": {"start": 1226, "end": 1231}, - }, - { - "generatedToken": { - "token": "\u2581might\u2581not\u2581be", - "logprob": -2.1681160926818848, - "raw_logprob": -2.1681160926818848, - }, - "topTokens": None, - "textRange": {"start": 1231, "end": 1244}, - }, - { - "generatedToken": { - "token": "\u2581immediately", - "logprob": -0.5720977783203125, - "raw_logprob": -0.5720977783203125, - }, - "topTokens": None, - "textRange": {"start": 1244, "end": 1256}, - }, - { - "generatedToken": { - "token": "\u2581obvious", - "logprob": -0.38135844469070435, - "raw_logprob": -0.38135844469070435, - }, - "topTokens": None, - "textRange": {"start": 1256, "end": 1264}, - }, - { - "generatedToken": { - "token": ".", - "logprob": -0.0025424794293940067, - "raw_logprob": -0.0025424794293940067, - }, - "topTokens": None, - "textRange": {"start": 1264, "end": 1265}, - }, - { - "generatedToken": { - "token": "<|newline|>", - "logprob": -0.005445053335279226, - "raw_logprob": -0.005445053335279226, - }, - "topTokens": None, - "textRange": {"start": 1265, "end": 1266}, - }, - { - "generatedToken": { - "token": "<|newline|>", - "logprob": -1.156323378381785e-05, - "raw_logprob": -1.156323378381785e-05, - }, - "topTokens": None, - "textRange": {"start": 1266, "end": 1267}, - }, - { - "generatedToken": { - "token": "\u2581There\u2581are\u2581a\u2581few", - "logprob": -4.2585649490356445, - "raw_logprob": -4.2585649490356445, - }, - "topTokens": None, - "textRange": {"start": 1267, "end": 1282}, - }, - { - "generatedToken": { - "token": "\u2581things", - "logprob": -2.04957914352417, - "raw_logprob": -2.04957914352417, - }, - "topTokens": None, - "textRange": {"start": 1282, "end": 1289}, - }, - { - "generatedToken": { - "token": "\u2581you\u2581should", - "logprob": -1.8114514350891113, - "raw_logprob": -1.8114514350891113, - }, - "topTokens": None, - "textRange": {"start": 1289, "end": 1300}, - }, - { - "generatedToken": { - "token": "\u2581keep\u2581in\u2581mind", - "logprob": -0.2850663959980011, - "raw_logprob": -0.2850663959980011, - }, - "topTokens": None, - "textRange": {"start": 1300, "end": 1313}, - }, - { - "generatedToken": { - "token": "\u2581when\u2581you're", - "logprob": -0.40983426570892334, - "raw_logprob": -0.40983426570892334, - }, - "topTokens": None, - "textRange": {"start": 1313, "end": 1325}, - }, - { - "generatedToken": { - "token": "\u2581analyzing", - "logprob": -0.049553561955690384, - "raw_logprob": -0.049553561955690384, - }, - "topTokens": None, - "textRange": {"start": 1325, "end": 1335}, - }, - { - "generatedToken": { - "token": "\u2581information", - "logprob": -0.0341101810336113, - "raw_logprob": -0.0341101810336113, - }, - "topTokens": None, - "textRange": {"start": 1335, "end": 1347}, - }, - { - "generatedToken": { - "token": ":", - "logprob": -0.4348779022693634, - "raw_logprob": -0.4348779022693634, - }, - "topTokens": None, - "textRange": {"start": 1347, "end": 1348}, - }, - { - "generatedToken": { - "token": "<|newline|>", - "logprob": -0.006193492095917463, - "raw_logprob": -0.006193492095917463, - }, - "topTokens": None, - "textRange": {"start": 1348, "end": 1349}, - }, - { - "generatedToken": { - "token": "<|newline|>", - "logprob": -0.000358159770257771, - "raw_logprob": -0.000358159770257771, - }, - "topTokens": None, - "textRange": {"start": 1349, "end": 1350}, - }, - { - "generatedToken": { - "token": "\u2581*", - "logprob": -1.0053796768188477, - "raw_logprob": -1.0053796768188477, - }, - "topTokens": None, - "textRange": {"start": 1350, "end": 1351}, - }, - { - "generatedToken": { - "token": "\u2581Identify", - "logprob": -4.100193977355957, - "raw_logprob": -4.100193977355957, - }, - "topTokens": None, - "textRange": {"start": 1351, "end": 1360}, - }, - { - "generatedToken": { - "token": "\u2581the", - "logprob": -1.141700029373169, - "raw_logprob": -1.141700029373169, - }, - "topTokens": None, - "textRange": {"start": 1360, "end": 1364}, - }, - { - "generatedToken": { - "token": "\u2581key\u2581points", - "logprob": -0.9346644282341003, - "raw_logprob": -0.9346644282341003, - }, - "topTokens": None, - "textRange": {"start": 1364, "end": 1375}, - }, - { - "generatedToken": { - "token": ":", - "logprob": -0.29478567838668823, - "raw_logprob": -0.29478567838668823, - }, - "topTokens": None, - "textRange": {"start": 1375, "end": 1376}, - }, - { - "generatedToken": { - "token": "\u2581What\u2581are\u2581the", - "logprob": -0.2456199824810028, - "raw_logprob": -0.2456199824810028, - }, - "topTokens": None, - "textRange": {"start": 1376, "end": 1389}, - }, - { - "generatedToken": { - "token": "\u2581key", - "logprob": -0.8171483278274536, - "raw_logprob": -0.8171483278274536, - }, - "topTokens": None, - "textRange": {"start": 1389, "end": 1393}, - }, - { - "generatedToken": { - "token": "\u2581takeaways", - "logprob": -0.5598645806312561, - "raw_logprob": -0.5598645806312561, - }, - "topTokens": None, - "textRange": {"start": 1393, "end": 1403}, - }, - { - "generatedToken": { - "token": "\u2581from", - "logprob": -1.6096564531326294, - "raw_logprob": -1.6096564531326294, - }, - "topTokens": None, - "textRange": {"start": 1403, "end": 1408}, - }, - { - "generatedToken": { - "token": "\u2581this\u2581information", - "logprob": -1.101968765258789, - "raw_logprob": -1.101968765258789, - }, - "topTokens": None, - "textRange": {"start": 1408, "end": 1425}, - }, - { - "generatedToken": { - "token": "?", - "logprob": -0.0003685271949507296, - "raw_logprob": -0.0003685271949507296, - }, - "topTokens": None, - "textRange": {"start": 1425, "end": 1426}, - }, - { - "generatedToken": { - "token": "\u2581What", - "logprob": -2.42529034614563, - "raw_logprob": -2.42529034614563, - }, - "topTokens": None, - "textRange": {"start": 1426, "end": 1431}, + "textRange": {"start": 56, "end": 56}, }, ], }, - "finishReason": {"reason": "length", "length": 200}, - } - ], - }, - ], - "cohere.command-text-v14::Write me a blog about making strong business decisions as a leader.": [ - {}, - { - "generations": [ - { - "id": "7449e005-a317-42ab-8e47-6bf0fa119088", - "text": " As a leader, one of the most important things you can do is make strong business decisions. Your choices can make or break your company, so it's essential to take the time to think things through and consider all your options. Here are a few tips for making sound business decisions:\n\n1. Do your research. Before making any decision, it's important to gather as much information as possible. This means talking to your team, looking at data and trends, and considering all of your options. The more information you have, the better equipped you'll be to make a decision.\n\n2. Consider the consequences. Every decision has consequences, so it's important to think about what might happen as a result of your choice. What will the impact be on your team, your company, and your customers? It's also important to think about how your decision might affect your own career and personal life.\n\n3. Seek advice. If you're struggling to make a decision, it", + "finishReason": {"reason": "endoftext"}, } ], - "id": "4e3ebf15-98d2-4aaf-a2da-61d0e262e862", - "prompt": "Write me a blog about making strong business decisions as a leader.", }, ], } diff --git a/tests/mlmodel_bedrock/_test_chat_completion.py b/tests/mlmodel_bedrock/_test_chat_completion.py new file mode 100644 index 0000000000..9ccbe5b644 --- /dev/null +++ b/tests/mlmodel_bedrock/_test_chat_completion.py @@ -0,0 +1,135 @@ +chat_completion_payload_templates = { + "amazon.titan-text-express-v1": '{ "inputText": "%s", "textGenerationConfig": {"temperature": %f, "maxTokenCount": %d }}', + "ai21.j2-mid-v1": '{"prompt": "%s", "temperature": %f, "maxTokens": %d}', +} + +chat_completion_expected_events = { + "amazon.titan-text-express-v1": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "appName": "Python Agent Test (mlmodel_bedrock)", + "conversation_id": "my-awesome-id", + "transaction_id": None, + "span_id": "span-id", + "trace_id": "trace-id", + "request_id": "660d4de9-6804-460e-8556-4ab2a019d1e3", + "api_key_last_four_digits": "CRET", + "duration": None, # Response time varies each test run + "request.model": "amazon.titan-text-express-v1", + "response.model": "amazon.titan-text-express-v1", + "response.usage.completion_tokens": 55, + "response.usage.total_tokens": 67, + "response.usage.prompt_tokens": 12, + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.choices.finish_reason": "FINISH", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "appName": "Python Agent Test (mlmodel_bedrock)", + "conversation_id": "my-awesome-id", + "request_id": "660d4de9-6804-460e-8556-4ab2a019d1e3", + "span_id": "span-id", + "trace_id": "trace-id", + "transaction_id": None, + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "amazon.titan-text-express-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "appName": "Python Agent Test (mlmodel_bedrock)", + "conversation_id": "my-awesome-id", + "request_id": "660d4de9-6804-460e-8556-4ab2a019d1e3", + "span_id": "span-id", + "trace_id": "trace-id", + "transaction_id": None, + "content": "\nUse the formula,\n°C = (°F - 32) x 5/9\n= 212 x 5/9\n= 100 degrees Celsius\n212 degrees Fahrenheit is 100 degrees Celsius.", + "role": "assistant", + "completion_id": None, + "sequence": 1, + "response.model": "amazon.titan-text-express-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ], + "ai21.j2-mid-v1": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "appName": "Python Agent Test (mlmodel_bedrock)", + "conversation_id": "my-awesome-id", + "transaction_id": None, + "span_id": "span-id", + "trace_id": "trace-id", + "request_id": "3bf1bb6b-b6f0-4901-85a1-2fa0e814440e", + "response_id": "1234", + "api_key_last_four_digits": "CRET", + "duration": None, # Response time varies each test run + "request.model": "ai21.j2-mid-v1", + "response.model": "ai21.j2-mid-v1", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.choices.finish_reason": "endoftext", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": "1234-0", + "appName": "Python Agent Test (mlmodel_bedrock)", + "conversation_id": "my-awesome-id", + "request_id": "3bf1bb6b-b6f0-4901-85a1-2fa0e814440e", + "span_id": "span-id", + "trace_id": "trace-id", + "transaction_id": None, + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "ai21.j2-mid-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": "1234-1", + "appName": "Python Agent Test (mlmodel_bedrock)", + "conversation_id": "my-awesome-id", + "request_id": "3bf1bb6b-b6f0-4901-85a1-2fa0e814440e", + "span_id": "span-id", + "trace_id": "trace-id", + "transaction_id": None, + "content": "\n212 degrees Fahrenheit is equal to 100 degrees Celsius.", + "role": "assistant", + "completion_id": None, + "sequence": 1, + "response.model": "ai21.j2-mid-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ], +} diff --git a/tests/mlmodel_bedrock/conftest.py b/tests/mlmodel_bedrock/conftest.py index 0464c2e079..67ecfc46b9 100644 --- a/tests/mlmodel_bedrock/conftest.py +++ b/tests/mlmodel_bedrock/conftest.py @@ -33,7 +33,7 @@ "transaction_tracer.stack_trace_threshold": 0.0, "debug.log_data_collector_payloads": True, "debug.record_transaction_failure": True, - "ml_insights_event.enabled": True, + "ml_insights_events.enabled": True, } collector_agent_registration = collector_agent_registration_fixture( app_name="Python Agent Test (mlmodel_bedrock)", @@ -93,7 +93,7 @@ def bedrock_server(): # Intercept outgoing requests and log to file for mocking -RECORDED_HEADERS = set(["x-request-id", "contentType"]) +RECORDED_HEADERS = set(["x-amzn-requestid", "content-type"]) def wrap_botocore_client_BaseClient__make_api_call(wrapped, instance, args, kwargs): @@ -118,11 +118,10 @@ def wrap_botocore_client_BaseClient__make_api_call(wrapped, instance, args, kwar # Clean up data data = json.loads(streamed_body.decode("utf-8")) - headers = dict(result["ResponseMetadata"].items()) - headers["contentType"] = result["contentType"] + headers = dict(result["ResponseMetadata"]["HTTPHeaders"].items()) headers = dict( filter( - lambda k: k[0].lower() in RECORDED_HEADERS or k[0].lower().startswith("x-ratelimit"), + lambda k: k[0] in RECORDED_HEADERS or k[0].startswith("x-ratelimit"), headers.items(), ) ) diff --git a/tests/mlmodel_bedrock/test_chat_completion.py b/tests/mlmodel_bedrock/test_chat_completion.py index ad33001c49..5747762993 100644 --- a/tests/mlmodel_bedrock/test_chat_completion.py +++ b/tests/mlmodel_bedrock/test_chat_completion.py @@ -12,29 +12,132 @@ # See the License for the specific language governing permissions and # limitations under the License. +import copy import json +from io import BytesIO import pytest +from _test_chat_completion import ( + chat_completion_expected_events, + chat_completion_payload_templates, +) +from testing_support.fixtures import ( + override_application_settings, + reset_core_stats_engine, +) +from testing_support.validators.validate_ml_event_count import validate_ml_event_count +from testing_support.validators.validate_ml_events import validate_ml_events + +from newrelic.api.background_task import background_task +from newrelic.api.time_trace import current_trace +from newrelic.api.transaction import add_custom_attribute, current_transaction + -_test_bedrock_chat_completion_prompt = "Write me a blog about making strong business decisions as a leader." +def set_trace_info(): + txn = current_transaction() + if txn: + txn._trace_id = "trace-id" + trace = current_trace() + if trace: + trace.guid = "span-id" -@pytest.mark.parametrize( - "model_id,payload", - [ - ("amazon.titan-text-express-v1", {"inputText": "Command: %s\n\nBlog:"}), - ("anthropic.claude-instant-v1", {"prompt": "Human: %s\n\nAssistant:", "max_tokens_to_sample": 500}), - ("ai21.j2-mid-v1", {"prompt": "%s", "maxTokens": 200}), - ("cohere.command-text-v14", {"prompt": "%s", "max_tokens": 200, "temperature": 0.75}), +@pytest.fixture(scope="session", params=[False, True], ids=["Bytes", "Stream"]) +def is_file_payload(request): + return request.param + + +@pytest.fixture( + scope="session", + params=[ + "amazon.titan-text-express-v1", + "ai21.j2-mid-v1", + # ("anthropic.claude-instant-v1", '{"prompt": "Human: {prompt}\n\nAssistant:", "max_tokens_to_sample": {max_tokens:d}}'), + # ("cohere.command-text-v14", '{"prompt": "{prompt}", "max_tokens": {max_tokens:d}, "temperature": {temperature:f}}'), ], ) -def test_bedrock_chat_completion(bedrock_server, model_id, payload): - body = json.dumps(payload) % _test_bedrock_chat_completion_prompt - response = bedrock_server.invoke_model( - body=body, - modelId=model_id, - accept="application/json", - contentType="application/json", - ) - response_body = json.loads(response.get("body").read()) - assert response_body +def model_id(request): + return request.param + + +@pytest.fixture(scope="session") +def exercise_model(bedrock_server, model_id, is_file_payload): + payload_template = chat_completion_payload_templates[model_id] + + def _exercise_model(prompt, temperature=0.7, max_tokens=100): + body = (payload_template % (prompt, temperature, max_tokens)).encode("utf-8") + if is_file_payload: + body = BytesIO(body) + + response = bedrock_server.invoke_model( + body=body, + modelId=model_id, + accept="application/json", + contentType="application/json", + ) + response_body = json.loads(response.get("body").read()) + assert response_body + + return _exercise_model + + +@pytest.fixture(scope="session") +def expected_events(model_id): + return chat_completion_expected_events[model_id] + + +@pytest.fixture(scope="session") +def expected_events_no_convo_id(model_id): + events = copy.deepcopy(chat_completion_expected_events[model_id]) + for event in events: + event[1]["conversation_id"] = "" + return events + + +_test_bedrock_chat_completion_prompt = "What is 212 degrees Fahrenheit converted to Celsius?" + + +@reset_core_stats_engine() +def test_bedrock_chat_completion_in_txn(exercise_model, expected_events): + @validate_ml_events(expected_events) + # One summary event, one user message, and one response message from the assistant + @validate_ml_event_count(count=3) + @background_task() + def _test(): + set_trace_info() + add_custom_attribute("conversation_id", "my-awesome-id") + exercise_model(prompt=_test_bedrock_chat_completion_prompt, temperature=0.7, max_tokens=100) + + _test() + + +@reset_core_stats_engine() +def test_bedrock_chat_completion_in_txn_no_convo_id(exercise_model, expected_events_no_convo_id): + @validate_ml_events(expected_events_no_convo_id) + # One summary event, one user message, and one response message from the assistant + @validate_ml_event_count(count=3) + @background_task() + def _test(): + set_trace_info() + exercise_model(prompt=_test_bedrock_chat_completion_prompt, temperature=0.7, max_tokens=100) + + _test() + + +@reset_core_stats_engine() +@validate_ml_event_count(count=0) +def test_bedrock_chat_completion_outside_txn(exercise_model): + set_trace_info() + add_custom_attribute("conversation_id", "my-awesome-id") + exercise_model(prompt=_test_bedrock_chat_completion_prompt, temperature=0.7, max_tokens=100) + + +disabled_ml_settings = {"machine_learning.enabled": False, "ml_insights_events.enabled": False} + + +@override_application_settings(disabled_ml_settings) +@reset_core_stats_engine() +@validate_ml_event_count(count=0) +def test_bedrock_chat_completion_disabled_settings(exercise_model): + set_trace_info() + exercise_model(prompt=_test_bedrock_chat_completion_prompt, temperature=0.7, max_tokens=100) diff --git a/tests/testing_support/validators/validate_ml_events.py b/tests/testing_support/validators/validate_ml_events.py index 251e8dbe79..275a9b2e1b 100644 --- a/tests/testing_support/validators/validate_ml_events.py +++ b/tests/testing_support/validators/validate_ml_events.py @@ -24,7 +24,6 @@ def validate_ml_events(events): @function_wrapper def _validate_wrapper(wrapped, instance, args, kwargs): - record_called = [] recorded_events = [] @@ -55,7 +54,7 @@ def _validate_ml_events(wrapped, instance, args, kwargs): for captured in found_events: if _check_event_attributes(expected, captured, mismatches): matching_ml_events += 1 - assert matching_ml_events == 1, _event_details(matching_ml_events, events, mismatches) + assert matching_ml_events == 1, _event_details(matching_ml_events, found_events, mismatches) return val diff --git a/tox.ini b/tox.ini index 8862bcc17c..cc9488b30a 100644 --- a/tox.ini +++ b/tox.ini @@ -49,7 +49,7 @@ envlist = python-adapter_daphne-{py37,py38,py39,py310,py311}-daphnelatest, python-adapter_gevent-{py27,py37,py38,py310,py311}, python-adapter_gunicorn-{py37,py38,py39,py310,py311}-aiohttp3-gunicornlatest, - python-adapter_hypercorn-{py37,py38,py39,py310,py311}-hypercornlatest, + python-adapter_hypercorn-{py38,py39,py310,py311}-hypercornlatest, python-adapter_hypercorn-py38-hypercorn{0010,0011,0012,0013}, python-adapter_uvicorn-{py37,py38,py39,py310,py311}-uvicorn{014,latest}, python-adapter_waitress-{py37,py38,py39,py310}-waitress02, From 989e38caec4a614a1e7ec89bb8fd1162ae1a3f89 Mon Sep 17 00:00:00 2001 From: Lalleh Rafeei <84813886+lrafeei@users.noreply.github.com> Date: Fri, 3 Nov 2023 09:05:22 -0700 Subject: [PATCH 03/16] Feature bedrock cohere instrumentation (#955) * Add AWS Bedrock testing infrastructure * Squashed commit of the following: commit 2834663794c649124052e510c1c9557a830c060a Author: Timothy Pansino <11214426+TimPansino@users.noreply.github.com> Date: Mon Oct 9 17:42:05 2023 -0700 OpenAI Mock Backend (#929) * Add mock external openai server * Add mocked OpenAI server fixtures * Set up recorded responses. * Clean mock server to depend on http server * Linting * Pin flask version for flask restx tests. (#931) * Ignore new redis methods. (#932) Co-authored-by: Lalleh Rafeei <84813886+lrafeei@users.noreply.github.com> * Remove approved paths * Update CI Image (#930) * Update available python versions in CI * Update makefile with overrides * Fix default branch detection for arm builds --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> * Add mocking for embedding endpoint * [Mega-Linter] Apply linters fixes * Add ratelimit headers * [Mega-Linter] Apply linters fixes * Only get package version once (#928) * Only get package version once * Add disconnect method * Add disconnect method --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> * Add datalib dependency for embedding testing. * Add OpenAI Test Infrastructure (#926) * Add openai to tox * Add OpenAI test files. * Add test functions. * [Mega-Linter] Apply linters fixes --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: mergify[bot] * Add mock external openai server * Add mocked OpenAI server fixtures * Set up recorded responses. * Clean mock server to depend on http server * Linting * Remove approved paths * Add mocking for embedding endpoint * [Mega-Linter] Apply linters fixes * Add ratelimit headers * [Mega-Linter] Apply linters fixes * Add datalib dependency for embedding testing. --------- Co-authored-by: Uma Annamalai Co-authored-by: Lalleh Rafeei <84813886+lrafeei@users.noreply.github.com> Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: TimPansino Co-authored-by: Hannah Stepanek Co-authored-by: mergify[bot] commit db63d4598c94048986c0e00ebb2cd8827100b54c Author: Uma Annamalai Date: Mon Oct 2 15:31:38 2023 -0700 Add OpenAI Test Infrastructure (#926) * Add openai to tox * Add OpenAI test files. * Add test functions. * [Mega-Linter] Apply linters fixes --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: mergify[bot] * Squashed commit of the following: commit 182c7a8c8a91e2d0f234f7ed7d4a14a2422c8342 Author: Uma Annamalai Date: Fri Oct 13 10:12:55 2023 -0700 Add request/ response IDs. commit f6d13f822c22d2039ec32be86b2c54f9dc3de1c9 Author: Uma Annamalai Date: Thu Oct 12 13:23:39 2023 -0700 Test cleanup. commit d0576631d009e481bd5887a3243aac99b097d823 Author: Uma Annamalai Date: Tue Oct 10 10:23:00 2023 -0700 Remove commented code. commit dd29433e719482babbe5c724e7330b1f6324abd7 Author: Uma Annamalai Date: Tue Oct 10 10:19:01 2023 -0700 Add openai sync instrumentation. commit 2834663794c649124052e510c1c9557a830c060a Author: Timothy Pansino <11214426+TimPansino@users.noreply.github.com> Date: Mon Oct 9 17:42:05 2023 -0700 OpenAI Mock Backend (#929) * Add mock external openai server * Add mocked OpenAI server fixtures * Set up recorded responses. * Clean mock server to depend on http server * Linting * Pin flask version for flask restx tests. (#931) * Ignore new redis methods. (#932) Co-authored-by: Lalleh Rafeei <84813886+lrafeei@users.noreply.github.com> * Remove approved paths * Update CI Image (#930) * Update available python versions in CI * Update makefile with overrides * Fix default branch detection for arm builds --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> * Add mocking for embedding endpoint * [Mega-Linter] Apply linters fixes * Add ratelimit headers * [Mega-Linter] Apply linters fixes * Only get package version once (#928) * Only get package version once * Add disconnect method * Add disconnect method --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> * Add datalib dependency for embedding testing. * Add OpenAI Test Infrastructure (#926) * Add openai to tox * Add OpenAI test files. * Add test functions. * [Mega-Linter] Apply linters fixes --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: mergify[bot] * Add mock external openai server * Add mocked OpenAI server fixtures * Set up recorded responses. * Clean mock server to depend on http server * Linting * Remove approved paths * Add mocking for embedding endpoint * [Mega-Linter] Apply linters fixes * Add ratelimit headers * [Mega-Linter] Apply linters fixes * Add datalib dependency for embedding testing. --------- Co-authored-by: Uma Annamalai Co-authored-by: Lalleh Rafeei <84813886+lrafeei@users.noreply.github.com> Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: TimPansino Co-authored-by: Hannah Stepanek Co-authored-by: mergify[bot] commit db63d4598c94048986c0e00ebb2cd8827100b54c Author: Uma Annamalai Date: Mon Oct 2 15:31:38 2023 -0700 Add OpenAI Test Infrastructure (#926) * Add openai to tox * Add OpenAI test files. * Add test functions. * [Mega-Linter] Apply linters fixes --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: mergify[bot] * TEMP * Bedrock titan extraction nearly complete * Cleaning up titan bedrock implementation * TEMP * Tests for bedrock passing Co-authored-by: Lalleh Rafeei * Cleaned up titan testing Co-authored-by: Lalleh Rafeei Co-authored-by: Hannah Stepanek * Parametrized bedrock testing * Add support for AI21-J2 models * Change to dynamic no conversation id events * Add cohere model * Remove openai instrumentation from this branch * Remove OpenAI from newrelic/config.py --------- Co-authored-by: Uma Annamalai Co-authored-by: Tim Pansino Co-authored-by: Lalleh Rafeei Co-authored-by: Hannah Stepanek --- newrelic/hooks/external_botocore.py | 20 ++++++ .../_mock_external_bedrock_server.py | 14 ++++ .../mlmodel_bedrock/_test_chat_completion.py | 64 +++++++++++++++++++ tests/mlmodel_bedrock/conftest.py | 2 +- tests/mlmodel_bedrock/test_chat_completion.py | 2 +- 5 files changed, 100 insertions(+), 2 deletions(-) diff --git a/newrelic/hooks/external_botocore.py b/newrelic/hooks/external_botocore.py index 018df2d320..4131e2bf77 100644 --- a/newrelic/hooks/external_botocore.py +++ b/newrelic/hooks/external_botocore.py @@ -140,9 +140,29 @@ def extract_bedrock_ai21_j2_model(request_body, response_body): return message_list, chat_completion_summary_dict +def extract_bedrock_cohere_model(request_body, response_body): + response_body = json.loads(response_body) + request_body = json.loads(request_body) + + message_list = [{"role": "user", "content": request_body.get("prompt", "")}] + message_list.extend( + {"role": "assistant", "content": result["text"]} for result in response_body.get("generations", []) + ) + + chat_completion_summary_dict = { + "request.max_tokens": request_body.get("max_tokens", ""), + "request.temperature": request_body.get("temperature", ""), + "response.choices.finish_reason": response_body["generations"][0]["finish_reason"], + "response.number_of_messages": len(message_list), + "response_id": str(response_body.get("id", "")), + } + return message_list, chat_completion_summary_dict + + MODEL_EXTRACTORS = { "amazon.titan": extract_bedrock_titan_model, "ai21.j2": extract_bedrock_ai21_j2_model, + "cohere": extract_bedrock_cohere_model, } diff --git a/tests/mlmodel_bedrock/_mock_external_bedrock_server.py b/tests/mlmodel_bedrock/_mock_external_bedrock_server.py index de8a91aad3..126c663cef 100644 --- a/tests/mlmodel_bedrock/_mock_external_bedrock_server.py +++ b/tests/mlmodel_bedrock/_mock_external_bedrock_server.py @@ -42,6 +42,20 @@ ], }, ], + "cohere.command-text-v14::What is 212 degrees Fahrenheit converted to Celsius?": [ + {"content-type": "application/json", "x-amzn-requestid": "c5188fb5-dc58-4cbe-948d-af173c69ce0d"}, + { + "generations": [ + { + "finish_reason": "MAX_TOKENS", + "id": "0730f5c0-9a49-4f35-af94-cf8f77327740", + "text": " To convert 212 degrees Fahrenheit to Celsius, we can use the conversion factor that Celsius is equal to (Fahrenheit - 32) x 5/9. \\n\\nApplying this formula, we have:\\n212°F = (212°F - 32) x 5/9\\n= (180) x 5/9\\n= 100°C.\\n\\nTherefore, 212 degrees F", + } + ], + "id": "a9cc8ce6-50b6-40b6-bf77-cf24561d8de7", + "prompt": "What is 212 degrees Fahrenheit converted to Celsius?", + }, + ], "ai21.j2-mid-v1::What is 212 degrees Fahrenheit converted to Celsius?": [ {"content-type": "application/json", "x-amzn-requestid": "3bf1bb6b-b6f0-4901-85a1-2fa0e814440e"}, { diff --git a/tests/mlmodel_bedrock/_test_chat_completion.py b/tests/mlmodel_bedrock/_test_chat_completion.py index 9ccbe5b644..17fa8549cb 100644 --- a/tests/mlmodel_bedrock/_test_chat_completion.py +++ b/tests/mlmodel_bedrock/_test_chat_completion.py @@ -1,6 +1,7 @@ chat_completion_payload_templates = { "amazon.titan-text-express-v1": '{ "inputText": "%s", "textGenerationConfig": {"temperature": %f, "maxTokenCount": %d }}', "ai21.j2-mid-v1": '{"prompt": "%s", "temperature": %f, "maxTokens": %d}', + "cohere.command-text-v14": '{"prompt": "%s", "temperature": %f, "max_tokens": %d}', } chat_completion_expected_events = { @@ -132,4 +133,67 @@ }, ), ], + "cohere.command-text-v14": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "appName": "Python Agent Test (mlmodel_bedrock)", + "conversation_id": "my-awesome-id", + "transaction_id": None, + "span_id": "span-id", + "trace_id": "trace-id", + "request_id": "c5188fb5-dc58-4cbe-948d-af173c69ce0d", + "response_id": None, # UUID that varies with each run + "api_key_last_four_digits": "CRET", + "duration": None, # Response time varies each test run + "request.model": "cohere.command-text-v14", + "response.model": "cohere.command-text-v14", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.choices.finish_reason": "MAX_TOKENS", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "appName": "Python Agent Test (mlmodel_bedrock)", + "conversation_id": "my-awesome-id", + "request_id": "c5188fb5-dc58-4cbe-948d-af173c69ce0d", + "span_id": "span-id", + "trace_id": "trace-id", + "transaction_id": None, + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "cohere.command-text-v14", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "appName": "Python Agent Test (mlmodel_bedrock)", + "conversation_id": "my-awesome-id", + "request_id": "c5188fb5-dc58-4cbe-948d-af173c69ce0d", + "span_id": "span-id", + "trace_id": "trace-id", + "transaction_id": None, + "content": " To convert 212 degrees Fahrenheit to Celsius, we can use the conversion factor that Celsius is equal to (Fahrenheit - 32) x 5/9. \\n\\nApplying this formula, we have:\\n212°F = (212°F - 32) x 5/9\\n= (180) x 5/9\\n= 100°C.\\n\\nTherefore, 212 degrees F", + "role": "assistant", + "completion_id": None, + "sequence": 1, + "response.model": "cohere.command-text-v14", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ], } diff --git a/tests/mlmodel_bedrock/conftest.py b/tests/mlmodel_bedrock/conftest.py index 67ecfc46b9..b0aa70c575 100644 --- a/tests/mlmodel_bedrock/conftest.py +++ b/tests/mlmodel_bedrock/conftest.py @@ -60,7 +60,7 @@ def bedrock_server(): if not _environ_as_bool("NEW_RELIC_TESTING_RECORD_BEDROCK_RESPONSES", False): # Use mocked Bedrock backend and prerecorded responses with MockExternalBedrockServer() as server: - client = boto3.client( + client = boto3.client( # nosec "bedrock-runtime", "us-east-1", endpoint_url="http://localhost:%d" % server.port, diff --git a/tests/mlmodel_bedrock/test_chat_completion.py b/tests/mlmodel_bedrock/test_chat_completion.py index 5747762993..50f851f92d 100644 --- a/tests/mlmodel_bedrock/test_chat_completion.py +++ b/tests/mlmodel_bedrock/test_chat_completion.py @@ -53,7 +53,7 @@ def is_file_payload(request): "amazon.titan-text-express-v1", "ai21.j2-mid-v1", # ("anthropic.claude-instant-v1", '{"prompt": "Human: {prompt}\n\nAssistant:", "max_tokens_to_sample": {max_tokens:d}}'), - # ("cohere.command-text-v14", '{"prompt": "{prompt}", "max_tokens": {max_tokens:d}, "temperature": {temperature:f}}'), + "cohere.command-text-v14", ], ) def model_id(request): From d478b0de7ad27ff98d4db4aebcaedeb002a9afde Mon Sep 17 00:00:00 2001 From: Timothy Pansino <11214426+TimPansino@users.noreply.github.com> Date: Fri, 3 Nov 2023 15:51:38 -0700 Subject: [PATCH 04/16] AWS Bedrock Embedding Instrumentation (#957) * AWS Bedrock embedding instrumentation * Correct symbol name --- newrelic/hooks/external_botocore.py | 76 +- .../_mock_external_bedrock_server.py | 3088 +++++++++++++++++ tests/mlmodel_bedrock/_test_embeddings.py | 51 + tests/mlmodel_bedrock/conftest.py | 15 + tests/mlmodel_bedrock/test_chat_completion.py | 51 +- tests/mlmodel_bedrock/test_embeddings.py | 117 + 6 files changed, 3366 insertions(+), 32 deletions(-) create mode 100644 tests/mlmodel_bedrock/_test_embeddings.py create mode 100644 tests/mlmodel_bedrock/test_embeddings.py diff --git a/newrelic/hooks/external_botocore.py b/newrelic/hooks/external_botocore.py index 4131e2bf77..91011f2a8b 100644 --- a/newrelic/hooks/external_botocore.py +++ b/newrelic/hooks/external_botocore.py @@ -95,12 +95,12 @@ def create_chat_completion_message_event( transaction.record_ml_event("LlmChatCompletionMessage", chat_completion_message_dict) -def extract_bedrock_titan_model(request_body, response_body): +def extract_bedrock_titan_text_model(request_body, response_body): response_body = json.loads(response_body) request_body = json.loads(request_body) input_tokens = response_body["inputTextTokenCount"] - completion_tokens = sum(result["tokenCount"] for result in response_body["results"]) + completion_tokens = sum(result["tokenCount"] for result in response_body.get("results", [])) total_tokens = input_tokens + completion_tokens request_config = request_body.get("textGenerationConfig", {}) @@ -121,6 +121,20 @@ def extract_bedrock_titan_model(request_body, response_body): return message_list, chat_completion_summary_dict +def extract_bedrock_titan_embedding_model(request_body, response_body): + response_body = json.loads(response_body) + request_body = json.loads(request_body) + + input_tokens = response_body["inputTextTokenCount"] + + embedding_dict = { + "input": request_body.get("inputText", ""), + "response.usage.prompt_tokens": input_tokens, + "response.usage.total_tokens": input_tokens, + } + return embedding_dict + + def extract_bedrock_ai21_j2_model(request_body, response_body): response_body = json.loads(response_body) request_body = json.loads(request_body) @@ -159,11 +173,12 @@ def extract_bedrock_cohere_model(request_body, response_body): return message_list, chat_completion_summary_dict -MODEL_EXTRACTORS = { - "amazon.titan": extract_bedrock_titan_model, - "ai21.j2": extract_bedrock_ai21_j2_model, - "cohere": extract_bedrock_cohere_model, -} +MODEL_EXTRACTORS = [ # Order is important here, avoiding dictionaries + ("amazon.titan-embed", extract_bedrock_titan_embedding_model), + ("amazon.titan", extract_bedrock_titan_text_model), + ("ai21.j2", extract_bedrock_ai21_j2_model), + ("cohere", extract_bedrock_cohere_model), +] @function_wrapper @@ -194,7 +209,7 @@ def wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): return response # Determine extractor by model type - for extractor_name, extractor in MODEL_EXTRACTORS.items(): + for extractor_name, extractor in MODEL_EXTRACTORS: if model.startswith(extractor_name): break else: @@ -213,7 +228,45 @@ def wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): # Read and replace response streaming bodies response_body = response["body"].read() response["body"] = StreamingBody(BytesIO(response_body), len(response_body)) + response_headers = response["ResponseMetadata"]["HTTPHeaders"] + + if model.startswith("amazon.titan-embed"): # Only available embedding models + handle_embedding_event(instance, transaction, extractor, model, response_body, response_headers, request_body, ft.duration) + else: + handle_chat_completion_event(instance, transaction, extractor, model, response_body, response_headers, request_body, ft.duration) + + return response +def handle_embedding_event(client, transaction, extractor, model, response_body, response_headers, request_body, duration): + embedding_id = str(uuid.uuid4()) + available_metadata = get_trace_linking_metadata() + span_id = available_metadata.get("span.id", "") + trace_id = available_metadata.get("trace.id", "") + + request_id = response_headers.get("x-amzn-requestid", "") + settings = transaction.settings if transaction.settings is not None else global_settings() + + embedding_dict = extractor(request_body, response_body) + + embedding_dict.update({ + "vendor": "bedrock", + "ingest_source": "Python", + "id": embedding_id, + "appName": settings.app_name, + "span_id": span_id, + "trace_id": trace_id, + "request_id": request_id, + "transaction_id": transaction._transaction_id, + "api_key_last_four_digits": client._request_signer._credentials.access_key[-4:], + "duration": duration, + "request.model": model, + "response.model": model, + }) + + transaction.record_ml_event("LlmEmbedding", embedding_dict) + + +def handle_chat_completion_event(client, transaction, extractor, model, response_body, response_headers, request_body, duration): custom_attrs_dict = transaction._custom_params conversation_id = custom_attrs_dict.get("conversation_id", "") @@ -222,7 +275,6 @@ def wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): span_id = available_metadata.get("span.id", "") trace_id = available_metadata.get("trace.id", "") - response_headers = response["ResponseMetadata"]["HTTPHeaders"] request_id = response_headers.get("x-amzn-requestid", "") settings = transaction.settings if transaction.settings is not None else global_settings() @@ -232,7 +284,7 @@ def wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): { "vendor": "bedrock", "ingest_source": "Python", - "api_key_last_four_digits": instance._request_signer._credentials.access_key[-4:], + "api_key_last_four_digits": client._request_signer._credentials.access_key[-4:], "id": chat_completion_id, "appName": settings.app_name, "conversation_id": conversation_id, @@ -240,7 +292,7 @@ def wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): "trace_id": trace_id, "transaction_id": transaction._transaction_id, "request_id": request_id, - "duration": ft.duration, + "duration": duration, "request.model": model, "response.model": model, # Duplicate data required by the UI } @@ -261,8 +313,6 @@ def wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): response_id=response_id, ) - return response - CUSTOM_TRACE_POINTS = { ("sns", "publish"): message_trace("SNS", "Produce", "Topic", extract(("TopicArn", "TargetArn"), "PhoneNumber")), diff --git a/tests/mlmodel_bedrock/_mock_external_bedrock_server.py b/tests/mlmodel_bedrock/_mock_external_bedrock_server.py index 126c663cef..96598a02a5 100644 --- a/tests/mlmodel_bedrock/_mock_external_bedrock_server.py +++ b/tests/mlmodel_bedrock/_mock_external_bedrock_server.py @@ -230,6 +230,3094 @@ ], }, ], + "amazon.titan-embed-text-v1::This is an embedding test.": [ + {"content-type": "application/json", "x-amzn-requestid": "75f1d3fe-6cde-4cf5-bdaf-7101f746ccfe"}, + { + "embedding": [ + -0.14160156, + 0.034423828, + 0.54296875, + 0.10986328, + 0.053466797, + 0.3515625, + 0.12988281, + -0.0002708435, + -0.21484375, + 0.060302734, + 0.58984375, + -0.5859375, + 0.52734375, + 0.82421875, + -0.91015625, + -0.19628906, + 0.45703125, + 0.609375, + -0.67578125, + 0.39453125, + -0.46875, + -0.25390625, + -0.21191406, + 0.114746094, + 0.31640625, + -0.41015625, + -0.32617188, + -0.43554688, + 0.4765625, + -0.4921875, + 0.40429688, + 0.06542969, + 0.859375, + -0.008056641, + -0.19921875, + 0.072753906, + 0.33203125, + 0.69921875, + 0.39453125, + 0.15527344, + 0.08886719, + -0.25, + 0.859375, + 0.22949219, + -0.19042969, + 0.13769531, + -0.078125, + 0.41210938, + 0.875, + 0.5234375, + 0.59765625, + -0.22949219, + -0.22558594, + -0.47460938, + 0.37695312, + 0.51953125, + -0.5703125, + 0.46679688, + 0.43554688, + 0.17480469, + -0.080566406, + -0.16699219, + -0.734375, + -1.0625, + -0.33984375, + 0.390625, + -0.18847656, + -0.5234375, + -0.48828125, + 0.44921875, + -0.09814453, + -0.3359375, + 0.087402344, + 0.36914062, + 1.3203125, + 0.25585938, + 0.14746094, + -0.059570312, + -0.15820312, + -0.037353516, + -0.61328125, + -0.6484375, + -0.35351562, + 0.55078125, + -0.26953125, + 0.90234375, + 0.3671875, + 0.31054688, + 0.00014019012, + -0.171875, + 0.025512695, + 0.5078125, + 0.11621094, + 0.33203125, + 0.8125, + -0.3046875, + -1.078125, + -0.5703125, + 0.26171875, + -0.4609375, + 0.203125, + 0.44726562, + -0.5078125, + 0.41601562, + -0.1953125, + 0.028930664, + -0.57421875, + 0.2265625, + 0.13574219, + -0.040039062, + -0.22949219, + -0.515625, + -0.19042969, + -0.30078125, + 0.10058594, + -0.66796875, + 0.6015625, + 0.296875, + -0.765625, + -0.87109375, + 0.2265625, + 0.068847656, + -0.088378906, + -0.1328125, + -0.796875, + -0.37304688, + 0.47460938, + -0.3515625, + -0.8125, + -0.32226562, + 0.265625, + 0.3203125, + -0.4140625, + -0.49023438, + 0.859375, + -0.19140625, + -0.6328125, + 0.10546875, + -0.5625, + 0.66015625, + 0.26171875, + -0.2109375, + 0.421875, + -0.82421875, + 0.29296875, + 0.17773438, + 0.24023438, + 0.5078125, + -0.49804688, + -0.10205078, + 0.10498047, + -0.36132812, + -0.47460938, + -0.20996094, + 0.010070801, + -0.546875, + 0.66796875, + -0.123046875, + -0.75390625, + 0.19628906, + 0.17480469, + 0.18261719, + -0.96875, + -0.26171875, + 0.4921875, + -0.40039062, + 0.296875, + 0.1640625, + -0.20507812, + -0.36132812, + 0.76171875, + -1.234375, + -0.625, + 0.060058594, + -0.09375, + -0.14746094, + 1.09375, + 0.057861328, + 0.22460938, + -0.703125, + 0.07470703, + 0.23828125, + -0.083984375, + -0.54296875, + 0.5546875, + -0.5, + -0.390625, + 0.106933594, + 0.6640625, + 0.27734375, + -0.953125, + 0.35351562, + -0.7734375, + -0.77734375, + 0.16503906, + -0.42382812, + 0.36914062, + 0.020141602, + -1.3515625, + 0.18847656, + 0.13476562, + -0.034179688, + -0.03930664, + -0.03857422, + -0.027954102, + 0.73828125, + -0.18945312, + -0.09814453, + -0.46289062, + 0.36914062, + 0.033203125, + 0.020874023, + -0.703125, + 0.91796875, + 0.38671875, + 0.625, + -0.19335938, + -0.16796875, + -0.58203125, + 0.21386719, + -0.032470703, + -0.296875, + -0.15625, + -0.1640625, + -0.74609375, + 0.328125, + 0.5546875, + -0.1953125, + 1.0546875, + 0.171875, + -0.099609375, + 0.5234375, + 0.05078125, + -0.35742188, + -0.2734375, + -1.3203125, + -0.8515625, + -0.16015625, + 0.01574707, + 0.29296875, + 0.18457031, + -0.265625, + 0.048339844, + 0.045654297, + -0.32226562, + 0.087890625, + -0.0047302246, + 0.38671875, + 0.10644531, + -0.06225586, + 1.03125, + 0.94140625, + -0.3203125, + 0.20800781, + -1.171875, + 0.48046875, + -0.091796875, + 0.20800781, + -0.1328125, + -0.20507812, + 0.28125, + -0.47070312, + -0.09033203, + 0.0013809204, + -0.08203125, + 0.43359375, + -0.03100586, + -0.060791016, + -0.53515625, + -1.46875, + 0.000101566315, + 0.515625, + 0.40625, + -0.10498047, + -0.15820312, + -0.009460449, + -0.77734375, + -0.5859375, + 0.9765625, + 0.099609375, + 0.51953125, + 0.38085938, + -0.09667969, + -0.100097656, + -0.5, + -1.3125, + -0.18066406, + -0.099121094, + 0.26171875, + -0.14453125, + -0.546875, + 0.17578125, + 0.484375, + 0.765625, + 0.45703125, + 0.2734375, + 0.0028076172, + 0.17089844, + -0.32421875, + -0.37695312, + 0.30664062, + -0.48046875, + 0.07128906, + 0.031982422, + -0.31054688, + -0.055419922, + -0.29296875, + 0.3359375, + -0.296875, + 0.47851562, + -0.05126953, + 0.18457031, + -0.01953125, + -0.35742188, + 0.017944336, + -0.25, + 0.10595703, + 0.17382812, + -0.73828125, + 0.36914062, + -0.15234375, + -0.8125, + 0.17382812, + 0.048095703, + 0.5625, + -0.33789062, + 0.023071289, + -0.21972656, + 0.16015625, + 0.032958984, + -1.1171875, + -0.984375, + 0.83984375, + 0.009033203, + -0.042236328, + -0.46484375, + -0.08203125, + 0.44726562, + -0.765625, + -0.3984375, + -0.40820312, + -0.234375, + 0.044189453, + 0.119628906, + -0.7578125, + -0.55078125, + -0.4453125, + 0.7578125, + 0.34960938, + 0.96484375, + 0.35742188, + 0.36914062, + -0.35351562, + -0.36132812, + 1.109375, + 0.5859375, + 0.85546875, + -0.10644531, + -0.6953125, + -0.0066833496, + 0.042236328, + -0.06689453, + 0.36914062, + 0.9765625, + -0.3046875, + 0.59765625, + -0.6640625, + 0.21484375, + -0.07128906, + 1.1328125, + -0.51953125, + 0.86328125, + -0.11328125, + 0.15722656, + -0.36328125, + -0.04638672, + 1.4375, + 0.18457031, + -0.18359375, + 0.10595703, + -0.49023438, + -0.07324219, + -0.73046875, + -0.119140625, + 0.021118164, + 0.4921875, + -0.46875, + 0.28710938, + 0.3359375, + 0.11767578, + -0.2109375, + -0.14550781, + 0.39648438, + -0.27734375, + 0.48046875, + 0.12988281, + 0.45507812, + -0.375, + -0.84765625, + 0.25585938, + -0.36523438, + 0.8046875, + 0.42382812, + -0.24511719, + 0.54296875, + 0.71875, + 0.010009766, + -0.04296875, + 0.083984375, + -0.52734375, + 0.13964844, + -0.27539062, + -0.30273438, + 1.1484375, + -0.515625, + -0.19335938, + 0.58984375, + 0.049072266, + 0.703125, + -0.04272461, + 0.5078125, + 0.34960938, + -0.3359375, + -0.47460938, + 0.049316406, + 0.36523438, + 0.7578125, + -0.022827148, + -0.71484375, + 0.21972656, + 0.09716797, + -0.203125, + -0.36914062, + 1.34375, + 0.34179688, + 0.46679688, + 1.078125, + 0.26171875, + 0.41992188, + 0.22363281, + -0.515625, + -0.5703125, + 0.13378906, + 0.26757812, + -0.22558594, + -0.5234375, + 0.06689453, + 0.08251953, + -0.625, + 0.16796875, + 0.43164062, + -0.55859375, + 0.28125, + 0.078125, + 0.6328125, + 0.23242188, + -0.064941406, + -0.004486084, + -0.20703125, + 0.2734375, + 0.453125, + -0.734375, + 0.04272461, + 0.36132812, + -0.19628906, + -0.12402344, + 1.3515625, + 0.25585938, + 0.4921875, + -0.29296875, + -0.58984375, + 0.021240234, + -0.044677734, + 0.7578125, + -0.7890625, + 0.10253906, + -0.15820312, + -0.5078125, + -0.39453125, + -0.453125, + 0.35742188, + 0.921875, + 0.44335938, + -0.49804688, + 0.44335938, + 0.31445312, + 0.58984375, + -1.0078125, + -0.22460938, + 0.24121094, + 0.87890625, + 0.66015625, + -0.390625, + -0.05053711, + 0.059570312, + 0.36132812, + -0.00038719177, + -0.017089844, + 0.62890625, + 0.203125, + 0.17480469, + 0.025512695, + 0.47460938, + 0.3125, + 1.140625, + 0.32421875, + -0.057861328, + 0.36914062, + -0.7265625, + -0.51953125, + 0.26953125, + 0.42773438, + 0.064453125, + 0.6328125, + 0.27148438, + -0.11767578, + 0.66796875, + -0.38671875, + 0.5234375, + -0.59375, + 0.5078125, + 0.008239746, + -0.34179688, + -0.27539062, + 0.5234375, + 1.296875, + 0.29492188, + -0.010986328, + -0.41210938, + 0.59375, + 0.061767578, + -0.33398438, + -2.03125, + 0.87890625, + -0.010620117, + 0.53125, + 0.14257812, + -0.515625, + -1.03125, + 0.578125, + 0.1875, + 0.44335938, + -0.33203125, + -0.36328125, + -0.3203125, + 0.29296875, + -0.8203125, + 0.41015625, + -0.48242188, + 0.66015625, + 0.5625, + -0.16503906, + -0.54296875, + -0.38085938, + 0.26171875, + 0.62109375, + 0.29101562, + -0.31054688, + 0.23730469, + -0.8515625, + 0.5234375, + 0.15332031, + 0.52734375, + -0.079589844, + -0.080566406, + -0.15527344, + -0.022827148, + 0.030517578, + -0.1640625, + -0.421875, + 0.09716797, + 0.03930664, + -0.055908203, + -0.546875, + -0.47851562, + 0.091796875, + 0.32226562, + -0.94140625, + -0.04638672, + -1.203125, + -0.39648438, + 0.45507812, + 0.296875, + -0.45703125, + 0.37890625, + -0.122558594, + 0.28320312, + -0.01965332, + -0.11669922, + -0.34570312, + -0.53515625, + -0.091308594, + -0.9375, + -0.32617188, + 0.095214844, + -0.4765625, + 0.37890625, + -0.859375, + 1.1015625, + -0.08935547, + 0.46484375, + -0.19238281, + 0.7109375, + 0.040039062, + -0.5390625, + 0.22363281, + -0.70703125, + 0.4921875, + -0.119140625, + -0.26757812, + -0.08496094, + 0.0859375, + -0.00390625, + -0.013366699, + -0.03955078, + 0.07421875, + -0.13085938, + 0.29101562, + -0.12109375, + 0.45703125, + 0.021728516, + 0.38671875, + -0.3671875, + -0.52734375, + -0.115722656, + 0.125, + 0.5703125, + -1.234375, + 0.06298828, + -0.55859375, + 0.60546875, + 0.8125, + -0.0032958984, + -0.068359375, + -0.21191406, + 0.56640625, + 0.17285156, + -0.3515625, + 0.36328125, + -0.99609375, + 0.43554688, + -0.1015625, + 0.07080078, + -0.66796875, + 1.359375, + 0.41601562, + 0.15917969, + 0.17773438, + -0.28710938, + 0.021850586, + -0.46289062, + 0.17578125, + -0.03955078, + -0.026855469, + 0.5078125, + -0.65625, + 0.0012512207, + 0.044433594, + -0.18652344, + 0.4921875, + -0.75390625, + 0.0072021484, + 0.4375, + -0.31445312, + 0.20214844, + 0.15039062, + -0.63671875, + -0.296875, + -0.375, + -0.027709961, + 0.013427734, + 0.17089844, + 0.89453125, + 0.11621094, + -0.43945312, + -0.30859375, + 0.02709961, + 0.23242188, + -0.64453125, + -0.859375, + 0.22167969, + -0.023071289, + -0.052734375, + 0.3671875, + -0.18359375, + 0.81640625, + -0.11816406, + 0.028320312, + 0.19042969, + 0.012817383, + -0.43164062, + 0.55859375, + -0.27929688, + 0.14257812, + -0.140625, + -0.048583984, + -0.014526367, + 0.35742188, + 0.22753906, + 0.13183594, + 0.04638672, + 0.03930664, + -0.29296875, + -0.2109375, + -0.16308594, + -0.48046875, + -0.13378906, + -0.39257812, + 0.29296875, + -0.047851562, + -0.5546875, + 0.08300781, + -0.14941406, + -0.07080078, + 0.12451172, + 0.1953125, + -0.51171875, + -0.048095703, + 0.1953125, + -0.37695312, + 0.46875, + -0.084472656, + 0.19042969, + -0.39453125, + 0.69921875, + -0.0065307617, + 0.25390625, + -0.16992188, + -0.5078125, + 0.016845703, + 0.27929688, + -0.22070312, + 0.671875, + 0.18652344, + 0.25, + -0.046875, + -0.012023926, + -0.36523438, + 0.36523438, + -0.11279297, + 0.421875, + 0.079589844, + -0.100097656, + 0.37304688, + 0.29882812, + -0.10546875, + -0.36523438, + 0.040039062, + 0.546875, + 0.12890625, + -0.06542969, + -0.38085938, + -0.35742188, + -0.6484375, + -0.28515625, + 0.0107421875, + -0.055664062, + 0.45703125, + 0.33984375, + 0.26367188, + -0.23144531, + 0.012878418, + -0.875, + 0.11035156, + 0.33984375, + 0.203125, + 0.38867188, + 0.24902344, + -0.37304688, + -0.98046875, + -0.122558594, + -0.17871094, + -0.09277344, + 0.1796875, + 0.4453125, + -0.66796875, + 0.78515625, + 0.12988281, + 0.35546875, + 0.44140625, + 0.58984375, + 0.29492188, + 0.7734375, + -0.21972656, + -0.40234375, + -0.22265625, + 0.18359375, + 0.54296875, + 0.17382812, + 0.59375, + -0.390625, + -0.92578125, + -0.017456055, + -0.25, + 0.73828125, + 0.7578125, + -0.3828125, + -0.25976562, + 0.049072266, + 0.046875, + -0.3515625, + 0.30078125, + -1.03125, + -0.48828125, + 0.0017929077, + -0.26171875, + 0.20214844, + 0.29882812, + 0.064941406, + 0.21484375, + -0.55078125, + -0.021362305, + 0.12988281, + 0.27148438, + 0.38867188, + -0.19726562, + -0.55078125, + 0.1640625, + 0.32226562, + -0.72265625, + 0.36132812, + 1.21875, + -0.22070312, + -0.32421875, + -0.29882812, + 0.0024414062, + 0.19921875, + 0.734375, + 0.16210938, + 0.17871094, + -0.19140625, + 0.38476562, + -0.06591797, + -0.47070312, + -0.040039062, + -0.33007812, + -0.07910156, + -0.2890625, + 0.00970459, + 0.12695312, + -0.12060547, + -0.18847656, + 1.015625, + -0.032958984, + 0.12451172, + -0.38476562, + 0.063964844, + 1.0859375, + 0.067871094, + -0.24511719, + 0.125, + 0.10546875, + -0.22460938, + -0.29101562, + 0.24414062, + -0.017944336, + -0.15625, + -0.60546875, + -0.25195312, + -0.46875, + 0.80859375, + -0.34960938, + 0.42382812, + 0.796875, + 0.296875, + -0.067871094, + 0.39453125, + 0.07470703, + 0.033935547, + 0.24414062, + 0.32617188, + 0.023925781, + 0.73046875, + 0.2109375, + -0.43164062, + 0.14453125, + 0.63671875, + 0.21972656, + -0.1875, + -0.18066406, + -0.22167969, + -1.3359375, + 0.52734375, + -0.40625, + -0.12988281, + 0.17480469, + -0.18066406, + 0.58984375, + -0.32421875, + -0.13476562, + 0.39257812, + -0.19238281, + 0.068359375, + 0.7265625, + -0.7109375, + -0.125, + 0.328125, + 0.34179688, + -0.48828125, + -0.10058594, + -0.83984375, + 0.30273438, + 0.008239746, + -1.390625, + 0.171875, + 0.34960938, + 0.44921875, + 0.22167969, + 0.60546875, + -0.36914062, + -0.028808594, + -0.19921875, + 0.6875, + 0.52734375, + -0.07421875, + 0.35546875, + 0.546875, + 0.08691406, + 0.23339844, + -0.984375, + -0.20507812, + 0.08544922, + 0.453125, + -0.07421875, + -0.953125, + 0.74609375, + -0.796875, + 0.47851562, + 0.81640625, + -0.44921875, + -0.33398438, + -0.54296875, + 0.46484375, + -0.390625, + -0.24121094, + -0.0115356445, + 1.1328125, + 1.0390625, + 0.6484375, + 0.35742188, + -0.29492188, + -0.0007095337, + -0.060302734, + 0.21777344, + 0.15136719, + -0.6171875, + 0.11328125, + -0.025878906, + 0.19238281, + 0.140625, + 0.171875, + 0.25195312, + 0.10546875, + 0.0008354187, + -0.13476562, + -0.26953125, + 0.025024414, + -0.28320312, + -0.107910156, + 1.015625, + 0.05493164, + -0.12988281, + 0.30859375, + 0.22558594, + -0.60546875, + 0.11328125, + -1.203125, + 0.6484375, + 0.087402344, + 0.32226562, + 0.63671875, + -0.07714844, + -1.390625, + -0.71875, + -0.34179688, + -0.10546875, + -0.37304688, + -0.09863281, + -0.41210938, + -0.14941406, + 0.41210938, + -0.20898438, + 0.18261719, + 0.67578125, + 0.41601562, + 0.32617188, + 0.2421875, + -0.14257812, + -0.6796875, + 0.01953125, + 0.34179688, + 0.20800781, + -0.123046875, + 0.087402344, + 0.85546875, + 0.33984375, + 0.33203125, + -0.68359375, + 0.44921875, + 0.50390625, + 0.083496094, + 0.10888672, + -0.09863281, + 0.55078125, + 0.09765625, + -0.50390625, + 0.13378906, + -0.29882812, + 0.030761719, + -0.64453125, + 0.22949219, + 0.43945312, + 0.16503906, + 0.10888672, + -0.12792969, + -0.039794922, + -0.111328125, + -0.35742188, + 0.053222656, + -0.78125, + -0.4375, + 0.359375, + -0.88671875, + -0.21972656, + -0.053710938, + 0.91796875, + -0.10644531, + 0.55859375, + -0.7734375, + 0.5078125, + 0.46484375, + 0.32226562, + 0.16796875, + -0.28515625, + 0.045410156, + -0.45117188, + 0.38867188, + -0.33398438, + -0.5234375, + 0.296875, + 0.6015625, + 0.3515625, + -0.734375, + 0.3984375, + -0.08251953, + 0.359375, + -0.28515625, + -0.88671875, + 0.0051879883, + 0.045166016, + -0.7421875, + -0.36523438, + 0.140625, + 0.18066406, + -0.171875, + -0.15625, + -0.53515625, + 0.2421875, + -0.19140625, + -0.18066406, + 0.25390625, + 0.6875, + -0.01965332, + -0.33203125, + 0.29492188, + 0.107421875, + -0.048339844, + -0.82421875, + 0.52734375, + 0.78125, + 0.8203125, + -0.90625, + 0.765625, + 0.0390625, + 0.045410156, + 0.26367188, + -0.14355469, + -0.26367188, + 0.390625, + -0.10888672, + 0.33007812, + -0.5625, + 0.08105469, + -0.13769531, + 0.8515625, + -0.14453125, + 0.77734375, + -0.48046875, + -0.3515625, + -0.25390625, + -0.09277344, + 0.23925781, + -0.022338867, + -0.45898438, + 0.36132812, + -0.23828125, + 0.265625, + -0.48632812, + -0.46875, + -0.75390625, + 1.3125, + 0.78125, + -0.63671875, + -1.21875, + 0.5078125, + -0.27734375, + -0.118652344, + 0.041992188, + -0.14648438, + -0.8046875, + 0.21679688, + -0.79296875, + 0.28320312, + -0.09667969, + 0.42773438, + 0.49414062, + 0.44726562, + 0.21972656, + -0.02746582, + -0.03540039, + -0.14941406, + -0.515625, + -0.27929688, + 0.9609375, + -0.007598877, + 0.34765625, + -0.060546875, + -0.44726562, + 0.7421875, + 0.15332031, + 0.45117188, + -0.4921875, + 0.07080078, + 0.5625, + 0.3984375, + -0.20019531, + 0.014892578, + 0.63671875, + -0.0071411133, + 0.016357422, + 1.0625, + 0.049316406, + 0.18066406, + 0.09814453, + -0.52734375, + -0.359375, + -0.072265625, + -0.41992188, + 0.39648438, + 0.38671875, + -0.30273438, + -0.056640625, + -0.640625, + -0.44921875, + 0.49414062, + 0.29101562, + 0.49609375, + 0.40429688, + -0.10205078, + 0.49414062, + -0.28125, + -0.12695312, + -0.0022735596, + -0.37304688, + 0.122558594, + 0.07519531, + -0.12597656, + -0.38085938, + -0.19824219, + -0.40039062, + 0.56640625, + -1.140625, + -0.515625, + -0.17578125, + -0.765625, + -0.43945312, + 0.3359375, + -0.24707031, + 0.32617188, + -0.45117188, + -0.37109375, + 0.45117188, + -0.27539062, + -0.38867188, + 0.09082031, + 0.17675781, + 0.49414062, + 0.19921875, + 0.17480469, + 0.8515625, + -0.23046875, + -0.234375, + -0.28515625, + 0.10253906, + 0.29101562, + -0.3359375, + -0.203125, + 0.6484375, + 0.11767578, + -0.20214844, + -0.42382812, + 0.26367188, + 0.6328125, + 0.0059509277, + 0.08691406, + -1.5625, + -0.43554688, + 0.17675781, + 0.091796875, + -0.5234375, + -0.09863281, + 0.20605469, + 0.16601562, + -0.578125, + 0.017700195, + 0.41015625, + 1.03125, + -0.55078125, + 0.21289062, + -0.35351562, + 0.24316406, + -0.123535156, + 0.11035156, + -0.48242188, + -0.34179688, + 0.45117188, + 0.3125, + -0.071777344, + 0.12792969, + 0.55859375, + 0.063964844, + -0.21191406, + 0.01965332, + -1.359375, + -0.21582031, + -0.019042969, + 0.16308594, + -0.3671875, + -0.40625, + -1.0234375, + -0.21289062, + 0.24023438, + -0.28125, + 0.26953125, + -0.14550781, + -0.087890625, + 0.16113281, + -0.49804688, + -0.17675781, + -0.890625, + 0.27929688, + 0.484375, + 0.27148438, + 0.11816406, + 0.83984375, + 0.029052734, + -0.890625, + 0.66796875, + 0.78515625, + -0.953125, + 0.49414062, + -0.546875, + 0.106933594, + -0.08251953, + 0.2890625, + -0.1484375, + -0.85546875, + 0.32421875, + -0.0040893555, + -0.16601562, + -0.16699219, + 0.24414062, + -0.5078125, + 0.25390625, + -0.10253906, + 0.15625, + 0.140625, + -0.27539062, + -0.546875, + -0.5546875, + -0.71875, + 0.37304688, + 0.060058594, + -0.076171875, + 0.44921875, + 0.06933594, + -0.28710938, + -0.22949219, + 0.17578125, + 0.09814453, + 0.4765625, + -0.95703125, + -0.03540039, + 0.21289062, + -0.7578125, + -0.07373047, + 0.10546875, + 0.07128906, + 0.76171875, + 0.4296875, + -0.09375, + 0.27539062, + -0.55078125, + 0.29882812, + -0.42382812, + 0.32617188, + -0.39648438, + 0.12451172, + 0.16503906, + -0.22460938, + -0.65625, + -0.022094727, + 0.61328125, + -0.024780273, + 0.62109375, + -0.033447266, + 0.515625, + 0.12890625, + -0.21875, + -0.08642578, + 0.49804688, + -0.2265625, + -0.29296875, + 0.19238281, + 0.3515625, + -1.265625, + 0.57421875, + 0.20117188, + -0.28320312, + 0.1953125, + -0.30664062, + 0.2265625, + -0.11230469, + 0.83984375, + 0.111328125, + 0.265625, + 0.71484375, + -0.625, + 0.38867188, + 0.47070312, + -0.32617188, + -0.171875, + 1.0078125, + 0.19726562, + -0.118652344, + 0.63671875, + -0.068359375, + -0.25585938, + 0.4140625, + -0.29296875, + 0.21386719, + -0.064453125, + 0.15820312, + -0.89453125, + -0.16308594, + 0.48046875, + 0.14648438, + -0.5703125, + 0.84765625, + -0.19042969, + 0.03515625, + 0.42578125, + -0.27539062, + -0.5390625, + 0.95703125, + 0.2734375, + 0.16699219, + -0.328125, + 0.11279297, + 0.003250122, + 0.47265625, + -0.31640625, + 0.546875, + 0.55859375, + 0.06933594, + -0.61328125, + -0.16210938, + -0.375, + 0.100097656, + -0.088378906, + 0.12695312, + 0.079589844, + 0.123535156, + -1.0078125, + 0.6875, + 0.022949219, + -0.40039062, + -0.09863281, + 0.29101562, + -1.2890625, + -0.20996094, + 0.36328125, + -0.3515625, + 0.7890625, + 0.12207031, + 0.48046875, + -0.13671875, + -0.041015625, + 0.19824219, + 0.19921875, + 0.01171875, + -0.37695312, + -0.62890625, + 0.9375, + -0.671875, + 0.24609375, + 0.6484375, + -0.29101562, + 0.076171875, + 0.62109375, + -0.5546875, + 0.36523438, + 0.75390625, + -0.19140625, + -0.875, + -0.8203125, + -0.24414062, + -0.625, + 0.1796875, + -0.40039062, + 0.25390625, + -0.14550781, + -0.21679688, + -0.828125, + 0.3359375, + 0.43554688, + 0.55078125, + -0.44921875, + -0.28710938, + 0.24023438, + 0.18066406, + -0.6953125, + 0.020385742, + -0.11376953, + 0.13867188, + -0.92578125, + 0.33398438, + -0.328125, + 0.78125, + -0.45507812, + -0.07470703, + 0.34179688, + 0.07080078, + 0.76171875, + 0.37890625, + -0.10644531, + 0.90234375, + -0.21875, + -0.15917969, + -0.36132812, + 0.2109375, + -0.45703125, + -0.76953125, + 0.21289062, + 0.26367188, + 0.49804688, + 0.35742188, + -0.20019531, + 0.31054688, + 0.34179688, + 0.17089844, + -0.15429688, + 0.39648438, + -0.5859375, + 0.20996094, + -0.40039062, + 0.5703125, + -0.515625, + 0.5234375, + 0.049560547, + 0.328125, + 0.24804688, + 0.42578125, + 0.609375, + 0.19238281, + 0.27929688, + 0.19335938, + 0.78125, + -0.9921875, + 0.23925781, + -1.3828125, + -0.22949219, + -0.578125, + -0.13964844, + -0.17382812, + -0.011169434, + 0.26171875, + -0.73046875, + -1.4375, + 0.6953125, + -0.7421875, + 0.052246094, + 0.12207031, + 1.3046875, + 0.38867188, + 0.040283203, + -0.546875, + -0.0021514893, + 0.18457031, + -0.5546875, + -0.51171875, + -0.16308594, + -0.104003906, + -0.38867188, + -0.20996094, + -0.8984375, + 0.6015625, + -0.30078125, + -0.13769531, + 0.16113281, + 0.58203125, + -0.23730469, + -0.125, + -1.0234375, + 0.875, + -0.7109375, + 0.29101562, + 0.09667969, + -0.3203125, + -0.48046875, + 0.37890625, + 0.734375, + -0.28710938, + -0.29882812, + -0.05493164, + 0.34765625, + -0.84375, + 0.65625, + 0.578125, + -0.20019531, + 0.13769531, + 0.10058594, + -0.37109375, + 0.36523438, + -0.22167969, + 0.72265625, + ], + "inputTextTokenCount": 6, + }, + ], + "amazon.titan-embed-g1-text-02::This is an embedding test.": [ + {"content-type": "application/json", "x-amzn-requestid": "f7e78265-6b7c-4b3a-b750-0c1d00347258"}, + { + "embedding": [ + -0.14160156, + 0.034423828, + 0.54296875, + 0.10986328, + 0.053466797, + 0.3515625, + 0.12988281, + -0.0002708435, + -0.21484375, + 0.060302734, + 0.58984375, + -0.5859375, + 0.52734375, + 0.82421875, + -0.91015625, + -0.19628906, + 0.45703125, + 0.609375, + -0.67578125, + 0.39453125, + -0.46875, + -0.25390625, + -0.21191406, + 0.114746094, + 0.31640625, + -0.41015625, + -0.32617188, + -0.43554688, + 0.4765625, + -0.4921875, + 0.40429688, + 0.06542969, + 0.859375, + -0.008056641, + -0.19921875, + 0.072753906, + 0.33203125, + 0.69921875, + 0.39453125, + 0.15527344, + 0.08886719, + -0.25, + 0.859375, + 0.22949219, + -0.19042969, + 0.13769531, + -0.078125, + 0.41210938, + 0.875, + 0.5234375, + 0.59765625, + -0.22949219, + -0.22558594, + -0.47460938, + 0.37695312, + 0.51953125, + -0.5703125, + 0.46679688, + 0.43554688, + 0.17480469, + -0.080566406, + -0.16699219, + -0.734375, + -1.0625, + -0.33984375, + 0.390625, + -0.18847656, + -0.5234375, + -0.48828125, + 0.44921875, + -0.09814453, + -0.3359375, + 0.087402344, + 0.36914062, + 1.3203125, + 0.25585938, + 0.14746094, + -0.059570312, + -0.15820312, + -0.037353516, + -0.61328125, + -0.6484375, + -0.35351562, + 0.55078125, + -0.26953125, + 0.90234375, + 0.3671875, + 0.31054688, + 0.00014019012, + -0.171875, + 0.025512695, + 0.5078125, + 0.11621094, + 0.33203125, + 0.8125, + -0.3046875, + -1.078125, + -0.5703125, + 0.26171875, + -0.4609375, + 0.203125, + 0.44726562, + -0.5078125, + 0.41601562, + -0.1953125, + 0.028930664, + -0.57421875, + 0.2265625, + 0.13574219, + -0.040039062, + -0.22949219, + -0.515625, + -0.19042969, + -0.30078125, + 0.10058594, + -0.66796875, + 0.6015625, + 0.296875, + -0.765625, + -0.87109375, + 0.2265625, + 0.068847656, + -0.088378906, + -0.1328125, + -0.796875, + -0.37304688, + 0.47460938, + -0.3515625, + -0.8125, + -0.32226562, + 0.265625, + 0.3203125, + -0.4140625, + -0.49023438, + 0.859375, + -0.19140625, + -0.6328125, + 0.10546875, + -0.5625, + 0.66015625, + 0.26171875, + -0.2109375, + 0.421875, + -0.82421875, + 0.29296875, + 0.17773438, + 0.24023438, + 0.5078125, + -0.49804688, + -0.10205078, + 0.10498047, + -0.36132812, + -0.47460938, + -0.20996094, + 0.010070801, + -0.546875, + 0.66796875, + -0.123046875, + -0.75390625, + 0.19628906, + 0.17480469, + 0.18261719, + -0.96875, + -0.26171875, + 0.4921875, + -0.40039062, + 0.296875, + 0.1640625, + -0.20507812, + -0.36132812, + 0.76171875, + -1.234375, + -0.625, + 0.060058594, + -0.09375, + -0.14746094, + 1.09375, + 0.057861328, + 0.22460938, + -0.703125, + 0.07470703, + 0.23828125, + -0.083984375, + -0.54296875, + 0.5546875, + -0.5, + -0.390625, + 0.106933594, + 0.6640625, + 0.27734375, + -0.953125, + 0.35351562, + -0.7734375, + -0.77734375, + 0.16503906, + -0.42382812, + 0.36914062, + 0.020141602, + -1.3515625, + 0.18847656, + 0.13476562, + -0.034179688, + -0.03930664, + -0.03857422, + -0.027954102, + 0.73828125, + -0.18945312, + -0.09814453, + -0.46289062, + 0.36914062, + 0.033203125, + 0.020874023, + -0.703125, + 0.91796875, + 0.38671875, + 0.625, + -0.19335938, + -0.16796875, + -0.58203125, + 0.21386719, + -0.032470703, + -0.296875, + -0.15625, + -0.1640625, + -0.74609375, + 0.328125, + 0.5546875, + -0.1953125, + 1.0546875, + 0.171875, + -0.099609375, + 0.5234375, + 0.05078125, + -0.35742188, + -0.2734375, + -1.3203125, + -0.8515625, + -0.16015625, + 0.01574707, + 0.29296875, + 0.18457031, + -0.265625, + 0.048339844, + 0.045654297, + -0.32226562, + 0.087890625, + -0.0047302246, + 0.38671875, + 0.10644531, + -0.06225586, + 1.03125, + 0.94140625, + -0.3203125, + 0.20800781, + -1.171875, + 0.48046875, + -0.091796875, + 0.20800781, + -0.1328125, + -0.20507812, + 0.28125, + -0.47070312, + -0.09033203, + 0.0013809204, + -0.08203125, + 0.43359375, + -0.03100586, + -0.060791016, + -0.53515625, + -1.46875, + 0.000101566315, + 0.515625, + 0.40625, + -0.10498047, + -0.15820312, + -0.009460449, + -0.77734375, + -0.5859375, + 0.9765625, + 0.099609375, + 0.51953125, + 0.38085938, + -0.09667969, + -0.100097656, + -0.5, + -1.3125, + -0.18066406, + -0.099121094, + 0.26171875, + -0.14453125, + -0.546875, + 0.17578125, + 0.484375, + 0.765625, + 0.45703125, + 0.2734375, + 0.0028076172, + 0.17089844, + -0.32421875, + -0.37695312, + 0.30664062, + -0.48046875, + 0.07128906, + 0.031982422, + -0.31054688, + -0.055419922, + -0.29296875, + 0.3359375, + -0.296875, + 0.47851562, + -0.05126953, + 0.18457031, + -0.01953125, + -0.35742188, + 0.017944336, + -0.25, + 0.10595703, + 0.17382812, + -0.73828125, + 0.36914062, + -0.15234375, + -0.8125, + 0.17382812, + 0.048095703, + 0.5625, + -0.33789062, + 0.023071289, + -0.21972656, + 0.16015625, + 0.032958984, + -1.1171875, + -0.984375, + 0.83984375, + 0.009033203, + -0.042236328, + -0.46484375, + -0.08203125, + 0.44726562, + -0.765625, + -0.3984375, + -0.40820312, + -0.234375, + 0.044189453, + 0.119628906, + -0.7578125, + -0.55078125, + -0.4453125, + 0.7578125, + 0.34960938, + 0.96484375, + 0.35742188, + 0.36914062, + -0.35351562, + -0.36132812, + 1.109375, + 0.5859375, + 0.85546875, + -0.10644531, + -0.6953125, + -0.0066833496, + 0.042236328, + -0.06689453, + 0.36914062, + 0.9765625, + -0.3046875, + 0.59765625, + -0.6640625, + 0.21484375, + -0.07128906, + 1.1328125, + -0.51953125, + 0.86328125, + -0.11328125, + 0.15722656, + -0.36328125, + -0.04638672, + 1.4375, + 0.18457031, + -0.18359375, + 0.10595703, + -0.49023438, + -0.07324219, + -0.73046875, + -0.119140625, + 0.021118164, + 0.4921875, + -0.46875, + 0.28710938, + 0.3359375, + 0.11767578, + -0.2109375, + -0.14550781, + 0.39648438, + -0.27734375, + 0.48046875, + 0.12988281, + 0.45507812, + -0.375, + -0.84765625, + 0.25585938, + -0.36523438, + 0.8046875, + 0.42382812, + -0.24511719, + 0.54296875, + 0.71875, + 0.010009766, + -0.04296875, + 0.083984375, + -0.52734375, + 0.13964844, + -0.27539062, + -0.30273438, + 1.1484375, + -0.515625, + -0.19335938, + 0.58984375, + 0.049072266, + 0.703125, + -0.04272461, + 0.5078125, + 0.34960938, + -0.3359375, + -0.47460938, + 0.049316406, + 0.36523438, + 0.7578125, + -0.022827148, + -0.71484375, + 0.21972656, + 0.09716797, + -0.203125, + -0.36914062, + 1.34375, + 0.34179688, + 0.46679688, + 1.078125, + 0.26171875, + 0.41992188, + 0.22363281, + -0.515625, + -0.5703125, + 0.13378906, + 0.26757812, + -0.22558594, + -0.5234375, + 0.06689453, + 0.08251953, + -0.625, + 0.16796875, + 0.43164062, + -0.55859375, + 0.28125, + 0.078125, + 0.6328125, + 0.23242188, + -0.064941406, + -0.004486084, + -0.20703125, + 0.2734375, + 0.453125, + -0.734375, + 0.04272461, + 0.36132812, + -0.19628906, + -0.12402344, + 1.3515625, + 0.25585938, + 0.4921875, + -0.29296875, + -0.58984375, + 0.021240234, + -0.044677734, + 0.7578125, + -0.7890625, + 0.10253906, + -0.15820312, + -0.5078125, + -0.39453125, + -0.453125, + 0.35742188, + 0.921875, + 0.44335938, + -0.49804688, + 0.44335938, + 0.31445312, + 0.58984375, + -1.0078125, + -0.22460938, + 0.24121094, + 0.87890625, + 0.66015625, + -0.390625, + -0.05053711, + 0.059570312, + 0.36132812, + -0.00038719177, + -0.017089844, + 0.62890625, + 0.203125, + 0.17480469, + 0.025512695, + 0.47460938, + 0.3125, + 1.140625, + 0.32421875, + -0.057861328, + 0.36914062, + -0.7265625, + -0.51953125, + 0.26953125, + 0.42773438, + 0.064453125, + 0.6328125, + 0.27148438, + -0.11767578, + 0.66796875, + -0.38671875, + 0.5234375, + -0.59375, + 0.5078125, + 0.008239746, + -0.34179688, + -0.27539062, + 0.5234375, + 1.296875, + 0.29492188, + -0.010986328, + -0.41210938, + 0.59375, + 0.061767578, + -0.33398438, + -2.03125, + 0.87890625, + -0.010620117, + 0.53125, + 0.14257812, + -0.515625, + -1.03125, + 0.578125, + 0.1875, + 0.44335938, + -0.33203125, + -0.36328125, + -0.3203125, + 0.29296875, + -0.8203125, + 0.41015625, + -0.48242188, + 0.66015625, + 0.5625, + -0.16503906, + -0.54296875, + -0.38085938, + 0.26171875, + 0.62109375, + 0.29101562, + -0.31054688, + 0.23730469, + -0.8515625, + 0.5234375, + 0.15332031, + 0.52734375, + -0.079589844, + -0.080566406, + -0.15527344, + -0.022827148, + 0.030517578, + -0.1640625, + -0.421875, + 0.09716797, + 0.03930664, + -0.055908203, + -0.546875, + -0.47851562, + 0.091796875, + 0.32226562, + -0.94140625, + -0.04638672, + -1.203125, + -0.39648438, + 0.45507812, + 0.296875, + -0.45703125, + 0.37890625, + -0.122558594, + 0.28320312, + -0.01965332, + -0.11669922, + -0.34570312, + -0.53515625, + -0.091308594, + -0.9375, + -0.32617188, + 0.095214844, + -0.4765625, + 0.37890625, + -0.859375, + 1.1015625, + -0.08935547, + 0.46484375, + -0.19238281, + 0.7109375, + 0.040039062, + -0.5390625, + 0.22363281, + -0.70703125, + 0.4921875, + -0.119140625, + -0.26757812, + -0.08496094, + 0.0859375, + -0.00390625, + -0.013366699, + -0.03955078, + 0.07421875, + -0.13085938, + 0.29101562, + -0.12109375, + 0.45703125, + 0.021728516, + 0.38671875, + -0.3671875, + -0.52734375, + -0.115722656, + 0.125, + 0.5703125, + -1.234375, + 0.06298828, + -0.55859375, + 0.60546875, + 0.8125, + -0.0032958984, + -0.068359375, + -0.21191406, + 0.56640625, + 0.17285156, + -0.3515625, + 0.36328125, + -0.99609375, + 0.43554688, + -0.1015625, + 0.07080078, + -0.66796875, + 1.359375, + 0.41601562, + 0.15917969, + 0.17773438, + -0.28710938, + 0.021850586, + -0.46289062, + 0.17578125, + -0.03955078, + -0.026855469, + 0.5078125, + -0.65625, + 0.0012512207, + 0.044433594, + -0.18652344, + 0.4921875, + -0.75390625, + 0.0072021484, + 0.4375, + -0.31445312, + 0.20214844, + 0.15039062, + -0.63671875, + -0.296875, + -0.375, + -0.027709961, + 0.013427734, + 0.17089844, + 0.89453125, + 0.11621094, + -0.43945312, + -0.30859375, + 0.02709961, + 0.23242188, + -0.64453125, + -0.859375, + 0.22167969, + -0.023071289, + -0.052734375, + 0.3671875, + -0.18359375, + 0.81640625, + -0.11816406, + 0.028320312, + 0.19042969, + 0.012817383, + -0.43164062, + 0.55859375, + -0.27929688, + 0.14257812, + -0.140625, + -0.048583984, + -0.014526367, + 0.35742188, + 0.22753906, + 0.13183594, + 0.04638672, + 0.03930664, + -0.29296875, + -0.2109375, + -0.16308594, + -0.48046875, + -0.13378906, + -0.39257812, + 0.29296875, + -0.047851562, + -0.5546875, + 0.08300781, + -0.14941406, + -0.07080078, + 0.12451172, + 0.1953125, + -0.51171875, + -0.048095703, + 0.1953125, + -0.37695312, + 0.46875, + -0.084472656, + 0.19042969, + -0.39453125, + 0.69921875, + -0.0065307617, + 0.25390625, + -0.16992188, + -0.5078125, + 0.016845703, + 0.27929688, + -0.22070312, + 0.671875, + 0.18652344, + 0.25, + -0.046875, + -0.012023926, + -0.36523438, + 0.36523438, + -0.11279297, + 0.421875, + 0.079589844, + -0.100097656, + 0.37304688, + 0.29882812, + -0.10546875, + -0.36523438, + 0.040039062, + 0.546875, + 0.12890625, + -0.06542969, + -0.38085938, + -0.35742188, + -0.6484375, + -0.28515625, + 0.0107421875, + -0.055664062, + 0.45703125, + 0.33984375, + 0.26367188, + -0.23144531, + 0.012878418, + -0.875, + 0.11035156, + 0.33984375, + 0.203125, + 0.38867188, + 0.24902344, + -0.37304688, + -0.98046875, + -0.122558594, + -0.17871094, + -0.09277344, + 0.1796875, + 0.4453125, + -0.66796875, + 0.78515625, + 0.12988281, + 0.35546875, + 0.44140625, + 0.58984375, + 0.29492188, + 0.7734375, + -0.21972656, + -0.40234375, + -0.22265625, + 0.18359375, + 0.54296875, + 0.17382812, + 0.59375, + -0.390625, + -0.92578125, + -0.017456055, + -0.25, + 0.73828125, + 0.7578125, + -0.3828125, + -0.25976562, + 0.049072266, + 0.046875, + -0.3515625, + 0.30078125, + -1.03125, + -0.48828125, + 0.0017929077, + -0.26171875, + 0.20214844, + 0.29882812, + 0.064941406, + 0.21484375, + -0.55078125, + -0.021362305, + 0.12988281, + 0.27148438, + 0.38867188, + -0.19726562, + -0.55078125, + 0.1640625, + 0.32226562, + -0.72265625, + 0.36132812, + 1.21875, + -0.22070312, + -0.32421875, + -0.29882812, + 0.0024414062, + 0.19921875, + 0.734375, + 0.16210938, + 0.17871094, + -0.19140625, + 0.38476562, + -0.06591797, + -0.47070312, + -0.040039062, + -0.33007812, + -0.07910156, + -0.2890625, + 0.00970459, + 0.12695312, + -0.12060547, + -0.18847656, + 1.015625, + -0.032958984, + 0.12451172, + -0.38476562, + 0.063964844, + 1.0859375, + 0.067871094, + -0.24511719, + 0.125, + 0.10546875, + -0.22460938, + -0.29101562, + 0.24414062, + -0.017944336, + -0.15625, + -0.60546875, + -0.25195312, + -0.46875, + 0.80859375, + -0.34960938, + 0.42382812, + 0.796875, + 0.296875, + -0.067871094, + 0.39453125, + 0.07470703, + 0.033935547, + 0.24414062, + 0.32617188, + 0.023925781, + 0.73046875, + 0.2109375, + -0.43164062, + 0.14453125, + 0.63671875, + 0.21972656, + -0.1875, + -0.18066406, + -0.22167969, + -1.3359375, + 0.52734375, + -0.40625, + -0.12988281, + 0.17480469, + -0.18066406, + 0.58984375, + -0.32421875, + -0.13476562, + 0.39257812, + -0.19238281, + 0.068359375, + 0.7265625, + -0.7109375, + -0.125, + 0.328125, + 0.34179688, + -0.48828125, + -0.10058594, + -0.83984375, + 0.30273438, + 0.008239746, + -1.390625, + 0.171875, + 0.34960938, + 0.44921875, + 0.22167969, + 0.60546875, + -0.36914062, + -0.028808594, + -0.19921875, + 0.6875, + 0.52734375, + -0.07421875, + 0.35546875, + 0.546875, + 0.08691406, + 0.23339844, + -0.984375, + -0.20507812, + 0.08544922, + 0.453125, + -0.07421875, + -0.953125, + 0.74609375, + -0.796875, + 0.47851562, + 0.81640625, + -0.44921875, + -0.33398438, + -0.54296875, + 0.46484375, + -0.390625, + -0.24121094, + -0.0115356445, + 1.1328125, + 1.0390625, + 0.6484375, + 0.35742188, + -0.29492188, + -0.0007095337, + -0.060302734, + 0.21777344, + 0.15136719, + -0.6171875, + 0.11328125, + -0.025878906, + 0.19238281, + 0.140625, + 0.171875, + 0.25195312, + 0.10546875, + 0.0008354187, + -0.13476562, + -0.26953125, + 0.025024414, + -0.28320312, + -0.107910156, + 1.015625, + 0.05493164, + -0.12988281, + 0.30859375, + 0.22558594, + -0.60546875, + 0.11328125, + -1.203125, + 0.6484375, + 0.087402344, + 0.32226562, + 0.63671875, + -0.07714844, + -1.390625, + -0.71875, + -0.34179688, + -0.10546875, + -0.37304688, + -0.09863281, + -0.41210938, + -0.14941406, + 0.41210938, + -0.20898438, + 0.18261719, + 0.67578125, + 0.41601562, + 0.32617188, + 0.2421875, + -0.14257812, + -0.6796875, + 0.01953125, + 0.34179688, + 0.20800781, + -0.123046875, + 0.087402344, + 0.85546875, + 0.33984375, + 0.33203125, + -0.68359375, + 0.44921875, + 0.50390625, + 0.083496094, + 0.10888672, + -0.09863281, + 0.55078125, + 0.09765625, + -0.50390625, + 0.13378906, + -0.29882812, + 0.030761719, + -0.64453125, + 0.22949219, + 0.43945312, + 0.16503906, + 0.10888672, + -0.12792969, + -0.039794922, + -0.111328125, + -0.35742188, + 0.053222656, + -0.78125, + -0.4375, + 0.359375, + -0.88671875, + -0.21972656, + -0.053710938, + 0.91796875, + -0.10644531, + 0.55859375, + -0.7734375, + 0.5078125, + 0.46484375, + 0.32226562, + 0.16796875, + -0.28515625, + 0.045410156, + -0.45117188, + 0.38867188, + -0.33398438, + -0.5234375, + 0.296875, + 0.6015625, + 0.3515625, + -0.734375, + 0.3984375, + -0.08251953, + 0.359375, + -0.28515625, + -0.88671875, + 0.0051879883, + 0.045166016, + -0.7421875, + -0.36523438, + 0.140625, + 0.18066406, + -0.171875, + -0.15625, + -0.53515625, + 0.2421875, + -0.19140625, + -0.18066406, + 0.25390625, + 0.6875, + -0.01965332, + -0.33203125, + 0.29492188, + 0.107421875, + -0.048339844, + -0.82421875, + 0.52734375, + 0.78125, + 0.8203125, + -0.90625, + 0.765625, + 0.0390625, + 0.045410156, + 0.26367188, + -0.14355469, + -0.26367188, + 0.390625, + -0.10888672, + 0.33007812, + -0.5625, + 0.08105469, + -0.13769531, + 0.8515625, + -0.14453125, + 0.77734375, + -0.48046875, + -0.3515625, + -0.25390625, + -0.09277344, + 0.23925781, + -0.022338867, + -0.45898438, + 0.36132812, + -0.23828125, + 0.265625, + -0.48632812, + -0.46875, + -0.75390625, + 1.3125, + 0.78125, + -0.63671875, + -1.21875, + 0.5078125, + -0.27734375, + -0.118652344, + 0.041992188, + -0.14648438, + -0.8046875, + 0.21679688, + -0.79296875, + 0.28320312, + -0.09667969, + 0.42773438, + 0.49414062, + 0.44726562, + 0.21972656, + -0.02746582, + -0.03540039, + -0.14941406, + -0.515625, + -0.27929688, + 0.9609375, + -0.007598877, + 0.34765625, + -0.060546875, + -0.44726562, + 0.7421875, + 0.15332031, + 0.45117188, + -0.4921875, + 0.07080078, + 0.5625, + 0.3984375, + -0.20019531, + 0.014892578, + 0.63671875, + -0.0071411133, + 0.016357422, + 1.0625, + 0.049316406, + 0.18066406, + 0.09814453, + -0.52734375, + -0.359375, + -0.072265625, + -0.41992188, + 0.39648438, + 0.38671875, + -0.30273438, + -0.056640625, + -0.640625, + -0.44921875, + 0.49414062, + 0.29101562, + 0.49609375, + 0.40429688, + -0.10205078, + 0.49414062, + -0.28125, + -0.12695312, + -0.0022735596, + -0.37304688, + 0.122558594, + 0.07519531, + -0.12597656, + -0.38085938, + -0.19824219, + -0.40039062, + 0.56640625, + -1.140625, + -0.515625, + -0.17578125, + -0.765625, + -0.43945312, + 0.3359375, + -0.24707031, + 0.32617188, + -0.45117188, + -0.37109375, + 0.45117188, + -0.27539062, + -0.38867188, + 0.09082031, + 0.17675781, + 0.49414062, + 0.19921875, + 0.17480469, + 0.8515625, + -0.23046875, + -0.234375, + -0.28515625, + 0.10253906, + 0.29101562, + -0.3359375, + -0.203125, + 0.6484375, + 0.11767578, + -0.20214844, + -0.42382812, + 0.26367188, + 0.6328125, + 0.0059509277, + 0.08691406, + -1.5625, + -0.43554688, + 0.17675781, + 0.091796875, + -0.5234375, + -0.09863281, + 0.20605469, + 0.16601562, + -0.578125, + 0.017700195, + 0.41015625, + 1.03125, + -0.55078125, + 0.21289062, + -0.35351562, + 0.24316406, + -0.123535156, + 0.11035156, + -0.48242188, + -0.34179688, + 0.45117188, + 0.3125, + -0.071777344, + 0.12792969, + 0.55859375, + 0.063964844, + -0.21191406, + 0.01965332, + -1.359375, + -0.21582031, + -0.019042969, + 0.16308594, + -0.3671875, + -0.40625, + -1.0234375, + -0.21289062, + 0.24023438, + -0.28125, + 0.26953125, + -0.14550781, + -0.087890625, + 0.16113281, + -0.49804688, + -0.17675781, + -0.890625, + 0.27929688, + 0.484375, + 0.27148438, + 0.11816406, + 0.83984375, + 0.029052734, + -0.890625, + 0.66796875, + 0.78515625, + -0.953125, + 0.49414062, + -0.546875, + 0.106933594, + -0.08251953, + 0.2890625, + -0.1484375, + -0.85546875, + 0.32421875, + -0.0040893555, + -0.16601562, + -0.16699219, + 0.24414062, + -0.5078125, + 0.25390625, + -0.10253906, + 0.15625, + 0.140625, + -0.27539062, + -0.546875, + -0.5546875, + -0.71875, + 0.37304688, + 0.060058594, + -0.076171875, + 0.44921875, + 0.06933594, + -0.28710938, + -0.22949219, + 0.17578125, + 0.09814453, + 0.4765625, + -0.95703125, + -0.03540039, + 0.21289062, + -0.7578125, + -0.07373047, + 0.10546875, + 0.07128906, + 0.76171875, + 0.4296875, + -0.09375, + 0.27539062, + -0.55078125, + 0.29882812, + -0.42382812, + 0.32617188, + -0.39648438, + 0.12451172, + 0.16503906, + -0.22460938, + -0.65625, + -0.022094727, + 0.61328125, + -0.024780273, + 0.62109375, + -0.033447266, + 0.515625, + 0.12890625, + -0.21875, + -0.08642578, + 0.49804688, + -0.2265625, + -0.29296875, + 0.19238281, + 0.3515625, + -1.265625, + 0.57421875, + 0.20117188, + -0.28320312, + 0.1953125, + -0.30664062, + 0.2265625, + -0.11230469, + 0.83984375, + 0.111328125, + 0.265625, + 0.71484375, + -0.625, + 0.38867188, + 0.47070312, + -0.32617188, + -0.171875, + 1.0078125, + 0.19726562, + -0.118652344, + 0.63671875, + -0.068359375, + -0.25585938, + 0.4140625, + -0.29296875, + 0.21386719, + -0.064453125, + 0.15820312, + -0.89453125, + -0.16308594, + 0.48046875, + 0.14648438, + -0.5703125, + 0.84765625, + -0.19042969, + 0.03515625, + 0.42578125, + -0.27539062, + -0.5390625, + 0.95703125, + 0.2734375, + 0.16699219, + -0.328125, + 0.11279297, + 0.003250122, + 0.47265625, + -0.31640625, + 0.546875, + 0.55859375, + 0.06933594, + -0.61328125, + -0.16210938, + -0.375, + 0.100097656, + -0.088378906, + 0.12695312, + 0.079589844, + 0.123535156, + -1.0078125, + 0.6875, + 0.022949219, + -0.40039062, + -0.09863281, + 0.29101562, + -1.2890625, + -0.20996094, + 0.36328125, + -0.3515625, + 0.7890625, + 0.12207031, + 0.48046875, + -0.13671875, + -0.041015625, + 0.19824219, + 0.19921875, + 0.01171875, + -0.37695312, + -0.62890625, + 0.9375, + -0.671875, + 0.24609375, + 0.6484375, + -0.29101562, + 0.076171875, + 0.62109375, + -0.5546875, + 0.36523438, + 0.75390625, + -0.19140625, + -0.875, + -0.8203125, + -0.24414062, + -0.625, + 0.1796875, + -0.40039062, + 0.25390625, + -0.14550781, + -0.21679688, + -0.828125, + 0.3359375, + 0.43554688, + 0.55078125, + -0.44921875, + -0.28710938, + 0.24023438, + 0.18066406, + -0.6953125, + 0.020385742, + -0.11376953, + 0.13867188, + -0.92578125, + 0.33398438, + -0.328125, + 0.78125, + -0.45507812, + -0.07470703, + 0.34179688, + 0.07080078, + 0.76171875, + 0.37890625, + -0.10644531, + 0.90234375, + -0.21875, + -0.15917969, + -0.36132812, + 0.2109375, + -0.45703125, + -0.76953125, + 0.21289062, + 0.26367188, + 0.49804688, + 0.35742188, + -0.20019531, + 0.31054688, + 0.34179688, + 0.17089844, + -0.15429688, + 0.39648438, + -0.5859375, + 0.20996094, + -0.40039062, + 0.5703125, + -0.515625, + 0.5234375, + 0.049560547, + 0.328125, + 0.24804688, + 0.42578125, + 0.609375, + 0.19238281, + 0.27929688, + 0.19335938, + 0.78125, + -0.9921875, + 0.23925781, + -1.3828125, + -0.22949219, + -0.578125, + -0.13964844, + -0.17382812, + -0.011169434, + 0.26171875, + -0.73046875, + -1.4375, + 0.6953125, + -0.7421875, + 0.052246094, + 0.12207031, + 1.3046875, + 0.38867188, + 0.040283203, + -0.546875, + -0.0021514893, + 0.18457031, + -0.5546875, + -0.51171875, + -0.16308594, + -0.104003906, + -0.38867188, + -0.20996094, + -0.8984375, + 0.6015625, + -0.30078125, + -0.13769531, + 0.16113281, + 0.58203125, + -0.23730469, + -0.125, + -1.0234375, + 0.875, + -0.7109375, + 0.29101562, + 0.09667969, + -0.3203125, + -0.48046875, + 0.37890625, + 0.734375, + -0.28710938, + -0.29882812, + -0.05493164, + 0.34765625, + -0.84375, + 0.65625, + 0.578125, + -0.20019531, + 0.13769531, + 0.10058594, + -0.37109375, + 0.36523438, + -0.22167969, + 0.72265625, + ], + "inputTextTokenCount": 6, + }, + ], } MODEL_PATH_RE = re.compile(r"/model/([^/]+)/invoke") diff --git a/tests/mlmodel_bedrock/_test_embeddings.py b/tests/mlmodel_bedrock/_test_embeddings.py new file mode 100644 index 0000000000..ab724d93c7 --- /dev/null +++ b/tests/mlmodel_bedrock/_test_embeddings.py @@ -0,0 +1,51 @@ +embedding_payload_templates = { + "amazon.titan-embed-text-v1": '{ "inputText": "%s" }', + "amazon.titan-embed-g1-text-02": '{ "inputText": "%s" }', +} + +embedding_expected_events = { + "amazon.titan-embed-text-v1": [ + ( + {"type": "LlmEmbedding"}, + { + "id": None, # UUID that varies with each run + "appName": "Python Agent Test (mlmodel_bedrock)", + "transaction_id": None, + "span_id": "span-id", + "trace_id": "trace-id", + "input": "This is an embedding test.", + "api_key_last_four_digits": "CRET", + "duration": None, # Response time varies each test run + "response.model": "amazon.titan-embed-text-v1", + "request.model": "amazon.titan-embed-text-v1", + "request_id": "75f1d3fe-6cde-4cf5-bdaf-7101f746ccfe", + "response.usage.total_tokens": 6, + "response.usage.prompt_tokens": 6, + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ], + "amazon.titan-embed-g1-text-02": [ + ( + {"type": "LlmEmbedding"}, + { + "id": None, # UUID that varies with each run + "appName": "Python Agent Test (mlmodel_bedrock)", + "transaction_id": None, + "span_id": "span-id", + "trace_id": "trace-id", + "input": "This is an embedding test.", + "api_key_last_four_digits": "CRET", + "duration": None, # Response time varies each test run + "response.model": "amazon.titan-embed-g1-text-02", + "request.model": "amazon.titan-embed-g1-text-02", + "request_id": "f7e78265-6b7c-4b3a-b750-0c1d00347258", + "response.usage.total_tokens": 6, + "response.usage.prompt_tokens": 6, + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ] +} diff --git a/tests/mlmodel_bedrock/conftest.py b/tests/mlmodel_bedrock/conftest.py index b0aa70c575..a313398ed4 100644 --- a/tests/mlmodel_bedrock/conftest.py +++ b/tests/mlmodel_bedrock/conftest.py @@ -25,6 +25,8 @@ collector_available_fixture, ) +from newrelic.api.time_trace import current_trace +from newrelic.api.transaction import current_transaction from newrelic.common.object_wrapper import wrap_function_wrapper _default_settings = { @@ -133,3 +135,16 @@ def wrap_botocore_client_BaseClient__make_api_call(wrapped, instance, args, kwar def bind_make_api_call_params(operation_name, api_params): return api_params + + +@pytest.fixture(scope="session") +def set_trace_info(): + def _set_trace_info(): + txn = current_transaction() + if txn: + txn._trace_id = "trace-id" + trace = current_trace() + if trace: + trace.guid = "span-id" + + return _set_trace_info diff --git a/tests/mlmodel_bedrock/test_chat_completion.py b/tests/mlmodel_bedrock/test_chat_completion.py index 50f851f92d..7e6c04899d 100644 --- a/tests/mlmodel_bedrock/test_chat_completion.py +++ b/tests/mlmodel_bedrock/test_chat_completion.py @@ -33,22 +33,13 @@ from newrelic.api.transaction import add_custom_attribute, current_transaction -def set_trace_info(): - txn = current_transaction() - if txn: - txn._trace_id = "trace-id" - trace = current_trace() - if trace: - trace.guid = "span-id" - - @pytest.fixture(scope="session", params=[False, True], ids=["Bytes", "Stream"]) def is_file_payload(request): return request.param @pytest.fixture( - scope="session", + scope="module", params=[ "amazon.titan-text-express-v1", "ai21.j2-mid-v1", @@ -60,7 +51,7 @@ def model_id(request): return request.param -@pytest.fixture(scope="session") +@pytest.fixture(scope="module") def exercise_model(bedrock_server, model_id, is_file_payload): payload_template = chat_completion_payload_templates[model_id] @@ -81,12 +72,12 @@ def _exercise_model(prompt, temperature=0.7, max_tokens=100): return _exercise_model -@pytest.fixture(scope="session") +@pytest.fixture(scope="module") def expected_events(model_id): return chat_completion_expected_events[model_id] -@pytest.fixture(scope="session") +@pytest.fixture(scope="module") def expected_events_no_convo_id(model_id): events = copy.deepcopy(chat_completion_expected_events[model_id]) for event in events: @@ -98,11 +89,18 @@ def expected_events_no_convo_id(model_id): @reset_core_stats_engine() -def test_bedrock_chat_completion_in_txn(exercise_model, expected_events): +def test_bedrock_chat_completion_in_txn_with_convo_id(set_trace_info, exercise_model, expected_events): @validate_ml_events(expected_events) # One summary event, one user message, and one response message from the assistant @validate_ml_event_count(count=3) - @background_task() + # @validate_transaction_metrics( + # name="test_bedrock_chat_completion_in_txn_with_convo_id", + # custom_metrics=[ + # ("Python/ML/OpenAI/%s" % openai.__version__, 1), + # ], + # background_task=True, + # ) + @background_task(name="test_bedrock_chat_completion_in_txn_with_convo_id") def _test(): set_trace_info() add_custom_attribute("conversation_id", "my-awesome-id") @@ -112,11 +110,18 @@ def _test(): @reset_core_stats_engine() -def test_bedrock_chat_completion_in_txn_no_convo_id(exercise_model, expected_events_no_convo_id): +def test_bedrock_chat_completion_in_txn_no_convo_id(set_trace_info, exercise_model, expected_events_no_convo_id): @validate_ml_events(expected_events_no_convo_id) # One summary event, one user message, and one response message from the assistant @validate_ml_event_count(count=3) - @background_task() + # @validate_transaction_metrics( + # name="test_bedrock_chat_completion_in_txn_no_convo_id", + # custom_metrics=[ + # ("Python/ML/OpenAI/%s" % openai.__version__, 1), + # ], + # background_task=True, + # ) + @background_task(name="test_bedrock_chat_completion_in_txn_no_convo_id") def _test(): set_trace_info() exercise_model(prompt=_test_bedrock_chat_completion_prompt, temperature=0.7, max_tokens=100) @@ -126,7 +131,7 @@ def _test(): @reset_core_stats_engine() @validate_ml_event_count(count=0) -def test_bedrock_chat_completion_outside_txn(exercise_model): +def test_bedrock_chat_completion_outside_txn(set_trace_info, exercise_model): set_trace_info() add_custom_attribute("conversation_id", "my-awesome-id") exercise_model(prompt=_test_bedrock_chat_completion_prompt, temperature=0.7, max_tokens=100) @@ -138,6 +143,14 @@ def test_bedrock_chat_completion_outside_txn(exercise_model): @override_application_settings(disabled_ml_settings) @reset_core_stats_engine() @validate_ml_event_count(count=0) -def test_bedrock_chat_completion_disabled_settings(exercise_model): +# @validate_transaction_metrics( +# name="test_bedrock_chat_completion_disabled_settings", +# custom_metrics=[ +# ("Python/ML/OpenAI/%s" % openai.__version__, 1), +# ], +# background_task=True, +# ) +@background_task(name="test_bedrock_chat_completion_disabled_settings") +def test_bedrock_chat_completion_disabled_settings(set_trace_info, exercise_model): set_trace_info() exercise_model(prompt=_test_bedrock_chat_completion_prompt, temperature=0.7, max_tokens=100) diff --git a/tests/mlmodel_bedrock/test_embeddings.py b/tests/mlmodel_bedrock/test_embeddings.py new file mode 100644 index 0000000000..a419953ab5 --- /dev/null +++ b/tests/mlmodel_bedrock/test_embeddings.py @@ -0,0 +1,117 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import json +from io import BytesIO + +import pytest +from testing_support.fixtures import ( # override_application_settings, + override_application_settings, + reset_core_stats_engine, +) +from testing_support.validators.validate_ml_event_count import validate_ml_event_count +from testing_support.validators.validate_ml_events import validate_ml_events +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task + +from _test_embeddings import embedding_expected_events, embedding_payload_templates + +disabled_ml_insights_settings = {"ml_insights_events.enabled": False} + + +@pytest.fixture(scope="session", params=[False, True], ids=["Bytes", "Stream"]) +def is_file_payload(request): + return request.param + + +@pytest.fixture( + scope="module", + params=[ + "amazon.titan-embed-text-v1", + "amazon.titan-embed-g1-text-02", + ], +) +def model_id(request): + return request.param + + +@pytest.fixture(scope="module") +def exercise_model(bedrock_server, model_id, is_file_payload): + payload_template = embedding_payload_templates[model_id] + + def _exercise_model(prompt, temperature=0.7, max_tokens=100): + body = (payload_template % prompt).encode("utf-8") + if is_file_payload: + body = BytesIO(body) + + response = bedrock_server.invoke_model( + body=body, + modelId=model_id, + accept="application/json", + contentType="application/json", + ) + response_body = json.loads(response.get("body").read()) + assert response_body + + return _exercise_model + + +@pytest.fixture(scope="module") +def expected_events(model_id): + return embedding_expected_events[model_id] + + +@reset_core_stats_engine() +def test_bedrock_embedding(set_trace_info, exercise_model, expected_events): + @validate_ml_events(expected_events) + @validate_ml_event_count(count=1) + # @validate_transaction_metrics( + # name="test_bedrock_embedding", + # custom_metrics=[ + # ("Python/ML/OpenAI/%s" % openai.__version__, 1), + # ], + # background_task=True, + # ) + @background_task(name="test_bedrock_embedding") + def _test(): + set_trace_info() + exercise_model(prompt="This is an embedding test.") + + _test() + + +@reset_core_stats_engine() +@validate_ml_event_count(count=0) +def test_bedrock_embedding_outside_txn(exercise_model): + exercise_model(prompt="This is an embedding test.") + + +@override_application_settings(disabled_ml_insights_settings) +@reset_core_stats_engine() +@validate_ml_event_count(count=0) +# @validate_transaction_metrics( +# name="test_embeddings:test_bedrock_embedding_disabled_settings", +# custom_metrics=[ +# ("Python/ML/OpenAI/%s" % openai.__version__, 1), +# ], +# background_task=True, +# ) +@background_task() +def test_bedrock_embedding_disabled_settings(set_trace_info, exercise_model): + set_trace_info() + exercise_model(prompt="This is an embedding test.") From 1803b646be196cd76928ba8cb7a34e03202c0922 Mon Sep 17 00:00:00 2001 From: Hannah Stepanek Date: Mon, 6 Nov 2023 11:37:09 -0800 Subject: [PATCH 05/16] Add support for bedrock claude (#960) Co-authored-by: Timothy Pansino <11214426+TimPansino@users.noreply.github.com> --- newrelic/hooks/external_botocore.py | 19 ++++++ .../_mock_external_bedrock_server.py | 8 +++ .../mlmodel_bedrock/_test_chat_completion.py | 63 +++++++++++++++++++ tests/mlmodel_bedrock/test_chat_completion.py | 2 +- 4 files changed, 91 insertions(+), 1 deletion(-) diff --git a/newrelic/hooks/external_botocore.py b/newrelic/hooks/external_botocore.py index 91011f2a8b..c99293a63f 100644 --- a/newrelic/hooks/external_botocore.py +++ b/newrelic/hooks/external_botocore.py @@ -154,6 +154,24 @@ def extract_bedrock_ai21_j2_model(request_body, response_body): return message_list, chat_completion_summary_dict +def extract_bedrock_claude_model(request_body, response_body): + response_body = json.loads(response_body) + request_body = json.loads(request_body) + + message_list = [ + {"role": "user", "content": request_body.get("prompt", "")}, + {"role": "assistant", "content": response_body.get("completion", "")}, + ] + + chat_completion_summary_dict = { + "request.max_tokens": request_body.get("max_tokens_to_sample", ""), + "request.temperature": request_body.get("temperature", ""), + "response.choices.finish_reason": response_body.get("stop_reason", ""), + "response.number_of_messages": len(message_list), + } + return message_list, chat_completion_summary_dict + + def extract_bedrock_cohere_model(request_body, response_body): response_body = json.loads(response_body) request_body = json.loads(request_body) @@ -178,6 +196,7 @@ def extract_bedrock_cohere_model(request_body, response_body): ("amazon.titan", extract_bedrock_titan_text_model), ("ai21.j2", extract_bedrock_ai21_j2_model), ("cohere", extract_bedrock_cohere_model), + ("anthropic.claude", extract_bedrock_claude_model), ] diff --git a/tests/mlmodel_bedrock/_mock_external_bedrock_server.py b/tests/mlmodel_bedrock/_mock_external_bedrock_server.py index 96598a02a5..c9149e3f1b 100644 --- a/tests/mlmodel_bedrock/_mock_external_bedrock_server.py +++ b/tests/mlmodel_bedrock/_mock_external_bedrock_server.py @@ -42,6 +42,14 @@ ], }, ], + "anthropic.claude-instant-v1::Human: What is 212 degrees Fahrenheit converted to Celsius? Assistant:": [ + {"content-type": "application/json", "x-amzn-requestid": "f354b9a7-9eac-4f50-a8d7-7d5d23566176"}, + { + "completion": " Here are the step-by-step workings:\n1) 212 degrees Fahrenheit \n2) To convert to Celsius, use the formula: C = (F - 32) * 5/9\n3) Plug in the values: C = (212 - 32) * 5/9 = 100 * 5/9 = 100 degrees Celsius\n\nSo, 212 degrees Fahrenheit converted to Celsius is 100 degrees Celsius.", + "stop_reason": "stop_sequence", + "stop": "\n\nHuman:", + }, + ], "cohere.command-text-v14::What is 212 degrees Fahrenheit converted to Celsius?": [ {"content-type": "application/json", "x-amzn-requestid": "c5188fb5-dc58-4cbe-948d-af173c69ce0d"}, { diff --git a/tests/mlmodel_bedrock/_test_chat_completion.py b/tests/mlmodel_bedrock/_test_chat_completion.py index 17fa8549cb..3f1f297d4a 100644 --- a/tests/mlmodel_bedrock/_test_chat_completion.py +++ b/tests/mlmodel_bedrock/_test_chat_completion.py @@ -1,6 +1,7 @@ chat_completion_payload_templates = { "amazon.titan-text-express-v1": '{ "inputText": "%s", "textGenerationConfig": {"temperature": %f, "maxTokenCount": %d }}', "ai21.j2-mid-v1": '{"prompt": "%s", "temperature": %f, "maxTokens": %d}', + "anthropic.claude-instant-v1": '{"prompt": "Human: %s Assistant:", "temperature": %f, "max_tokens_to_sample": %d}', "cohere.command-text-v14": '{"prompt": "%s", "temperature": %f, "max_tokens": %d}', } @@ -133,6 +134,68 @@ }, ), ], + "anthropic.claude-instant-v1": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "appName": "Python Agent Test (mlmodel_bedrock)", + "conversation_id": "my-awesome-id", + "transaction_id": None, + "span_id": "span-id", + "trace_id": "trace-id", + "request_id": "f354b9a7-9eac-4f50-a8d7-7d5d23566176", + "api_key_last_four_digits": "CRET", + "duration": None, # Response time varies each test run + "request.model": "anthropic.claude-instant-v1", + "response.model": "anthropic.claude-instant-v1", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.choices.finish_reason": "stop_sequence", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "appName": "Python Agent Test (mlmodel_bedrock)", + "conversation_id": "my-awesome-id", + "request_id": "f354b9a7-9eac-4f50-a8d7-7d5d23566176", + "span_id": "span-id", + "trace_id": "trace-id", + "transaction_id": None, + "content": "Human: What is 212 degrees Fahrenheit converted to Celsius? Assistant:", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "anthropic.claude-instant-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "appName": "Python Agent Test (mlmodel_bedrock)", + "conversation_id": "my-awesome-id", + "request_id": "f354b9a7-9eac-4f50-a8d7-7d5d23566176", + "span_id": "span-id", + "trace_id": "trace-id", + "transaction_id": None, + "content": " Here are the step-by-step workings:\n1) 212 degrees Fahrenheit \n2) To convert to Celsius, use the formula: C = (F - 32) * 5/9\n3) Plug in the values: C = (212 - 32) * 5/9 = 100 * 5/9 = 100 degrees Celsius\n\nSo, 212 degrees Fahrenheit converted to Celsius is", + "role": "assistant", + "completion_id": None, + "sequence": 1, + "response.model": "anthropic.claude-instant-v1", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ], "cohere.command-text-v14": [ ( {"type": "LlmChatCompletionSummary"}, diff --git a/tests/mlmodel_bedrock/test_chat_completion.py b/tests/mlmodel_bedrock/test_chat_completion.py index 7e6c04899d..4da60eaee4 100644 --- a/tests/mlmodel_bedrock/test_chat_completion.py +++ b/tests/mlmodel_bedrock/test_chat_completion.py @@ -43,7 +43,7 @@ def is_file_payload(request): params=[ "amazon.titan-text-express-v1", "ai21.j2-mid-v1", - # ("anthropic.claude-instant-v1", '{"prompt": "Human: {prompt}\n\nAssistant:", "max_tokens_to_sample": {max_tokens:d}}'), + "anthropic.claude-instant-v1", "cohere.command-text-v14", ], ) From 277d0a53afc6acd7f56099a307eb487fae580d79 Mon Sep 17 00:00:00 2001 From: Timothy Pansino <11214426+TimPansino@users.noreply.github.com> Date: Mon, 6 Nov 2023 13:15:02 -0800 Subject: [PATCH 06/16] Combine Botocore Tests (#959) * Initial file migration * Enable DT on all span tests * Add pytest skip for older botocore versions * Fixup: app name merge conflict --------- Co-authored-by: Hannah Stepanek --- tests/external_boto3/conftest.py | 30 ---- .../_mock_external_bedrock_server.py | 0 .../_test_bedrock_chat_completion.py} | 24 +-- .../_test_bedrock_embeddings.py} | 4 +- tests/external_botocore/conftest.py | 147 +++++++++++++++-- .../test_bedrock_chat_completion.py} | 2 +- .../test_bedrock_embeddings.py} | 2 +- .../test_boto3_iam.py | 4 +- .../test_boto3_s3.py | 4 +- .../test_boto3_sns.py | 6 +- .../test_botocore_dynamodb.py | 4 +- tests/external_botocore/test_botocore_ec2.py | 4 +- tests/external_botocore/test_botocore_s3.py | 4 +- tests/external_botocore/test_botocore_sqs.py | 6 +- tests/mlmodel_bedrock/conftest.py | 150 ------------------ tox.ini | 10 +- 16 files changed, 171 insertions(+), 230 deletions(-) delete mode 100644 tests/external_boto3/conftest.py rename tests/{mlmodel_bedrock => external_botocore}/_mock_external_bedrock_server.py (100%) rename tests/{mlmodel_bedrock/_test_chat_completion.py => external_botocore/_test_bedrock_chat_completion.py} (93%) rename tests/{mlmodel_bedrock/_test_embeddings.py => external_botocore/_test_bedrock_embeddings.py} (93%) rename tests/{mlmodel_bedrock/test_chat_completion.py => external_botocore/test_bedrock_chat_completion.py} (99%) rename tests/{mlmodel_bedrock/test_embeddings.py => external_botocore/test_bedrock_embeddings.py} (97%) rename tests/{external_boto3 => external_botocore}/test_boto3_iam.py (95%) rename tests/{external_boto3 => external_botocore}/test_boto3_s3.py (97%) rename tests/{external_boto3 => external_botocore}/test_boto3_sns.py (94%) delete mode 100644 tests/mlmodel_bedrock/conftest.py diff --git a/tests/external_boto3/conftest.py b/tests/external_boto3/conftest.py deleted file mode 100644 index 90d82f0072..0000000000 --- a/tests/external_boto3/conftest.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2010 New Relic, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest - -from testing_support.fixtures import collector_agent_registration_fixture, collector_available_fixture # noqa: F401; pylint: disable=W0611 - - -_default_settings = { - 'transaction_tracer.explain_threshold': 0.0, - 'transaction_tracer.transaction_threshold': 0.0, - 'transaction_tracer.stack_trace_threshold': 0.0, - 'debug.log_data_collector_payloads': True, - 'debug.record_transaction_failure': True, -} - -collector_agent_registration = collector_agent_registration_fixture( - app_name='Python Agent Test (external_boto3)', - default_settings=_default_settings) diff --git a/tests/mlmodel_bedrock/_mock_external_bedrock_server.py b/tests/external_botocore/_mock_external_bedrock_server.py similarity index 100% rename from tests/mlmodel_bedrock/_mock_external_bedrock_server.py rename to tests/external_botocore/_mock_external_bedrock_server.py diff --git a/tests/mlmodel_bedrock/_test_chat_completion.py b/tests/external_botocore/_test_bedrock_chat_completion.py similarity index 93% rename from tests/mlmodel_bedrock/_test_chat_completion.py rename to tests/external_botocore/_test_bedrock_chat_completion.py index 3f1f297d4a..fc69b1ff89 100644 --- a/tests/mlmodel_bedrock/_test_chat_completion.py +++ b/tests/external_botocore/_test_bedrock_chat_completion.py @@ -11,7 +11,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run - "appName": "Python Agent Test (mlmodel_bedrock)", + "appName": "Python Agent Test (external_botocore)", "conversation_id": "my-awesome-id", "transaction_id": None, "span_id": "span-id", @@ -36,7 +36,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run - "appName": "Python Agent Test (mlmodel_bedrock)", + "appName": "Python Agent Test (external_botocore)", "conversation_id": "my-awesome-id", "request_id": "660d4de9-6804-460e-8556-4ab2a019d1e3", "span_id": "span-id", @@ -55,7 +55,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run - "appName": "Python Agent Test (mlmodel_bedrock)", + "appName": "Python Agent Test (external_botocore)", "conversation_id": "my-awesome-id", "request_id": "660d4de9-6804-460e-8556-4ab2a019d1e3", "span_id": "span-id", @@ -76,7 +76,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run - "appName": "Python Agent Test (mlmodel_bedrock)", + "appName": "Python Agent Test (external_botocore)", "conversation_id": "my-awesome-id", "transaction_id": None, "span_id": "span-id", @@ -99,7 +99,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": "1234-0", - "appName": "Python Agent Test (mlmodel_bedrock)", + "appName": "Python Agent Test (external_botocore)", "conversation_id": "my-awesome-id", "request_id": "3bf1bb6b-b6f0-4901-85a1-2fa0e814440e", "span_id": "span-id", @@ -118,7 +118,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": "1234-1", - "appName": "Python Agent Test (mlmodel_bedrock)", + "appName": "Python Agent Test (external_botocore)", "conversation_id": "my-awesome-id", "request_id": "3bf1bb6b-b6f0-4901-85a1-2fa0e814440e", "span_id": "span-id", @@ -139,7 +139,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run - "appName": "Python Agent Test (mlmodel_bedrock)", + "appName": "Python Agent Test (external_botocore)", "conversation_id": "my-awesome-id", "transaction_id": None, "span_id": "span-id", @@ -161,7 +161,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run - "appName": "Python Agent Test (mlmodel_bedrock)", + "appName": "Python Agent Test (external_botocore)", "conversation_id": "my-awesome-id", "request_id": "f354b9a7-9eac-4f50-a8d7-7d5d23566176", "span_id": "span-id", @@ -180,7 +180,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run - "appName": "Python Agent Test (mlmodel_bedrock)", + "appName": "Python Agent Test (external_botocore)", "conversation_id": "my-awesome-id", "request_id": "f354b9a7-9eac-4f50-a8d7-7d5d23566176", "span_id": "span-id", @@ -201,7 +201,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run - "appName": "Python Agent Test (mlmodel_bedrock)", + "appName": "Python Agent Test (external_botocore)", "conversation_id": "my-awesome-id", "transaction_id": None, "span_id": "span-id", @@ -224,7 +224,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run - "appName": "Python Agent Test (mlmodel_bedrock)", + "appName": "Python Agent Test (external_botocore)", "conversation_id": "my-awesome-id", "request_id": "c5188fb5-dc58-4cbe-948d-af173c69ce0d", "span_id": "span-id", @@ -243,7 +243,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run - "appName": "Python Agent Test (mlmodel_bedrock)", + "appName": "Python Agent Test (external_botocore)", "conversation_id": "my-awesome-id", "request_id": "c5188fb5-dc58-4cbe-948d-af173c69ce0d", "span_id": "span-id", diff --git a/tests/mlmodel_bedrock/_test_embeddings.py b/tests/external_botocore/_test_bedrock_embeddings.py similarity index 93% rename from tests/mlmodel_bedrock/_test_embeddings.py rename to tests/external_botocore/_test_bedrock_embeddings.py index ab724d93c7..fe4b4b839a 100644 --- a/tests/mlmodel_bedrock/_test_embeddings.py +++ b/tests/external_botocore/_test_bedrock_embeddings.py @@ -9,7 +9,7 @@ {"type": "LlmEmbedding"}, { "id": None, # UUID that varies with each run - "appName": "Python Agent Test (mlmodel_bedrock)", + "appName": "Python Agent Test (external_botocore)", "transaction_id": None, "span_id": "span-id", "trace_id": "trace-id", @@ -31,7 +31,7 @@ {"type": "LlmEmbedding"}, { "id": None, # UUID that varies with each run - "appName": "Python Agent Test (mlmodel_bedrock)", + "appName": "Python Agent Test (external_botocore)", "transaction_id": None, "span_id": "span-id", "trace_id": "trace-id", diff --git a/tests/external_botocore/conftest.py b/tests/external_botocore/conftest.py index e5cf155336..67a2058239 100644 --- a/tests/external_botocore/conftest.py +++ b/tests/external_botocore/conftest.py @@ -12,19 +12,146 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest +import json +import os -from testing_support.fixtures import collector_agent_registration_fixture, collector_available_fixture # noqa: F401; pylint: disable=W0611 +import pytest +from _mock_external_bedrock_server import ( + MockExternalBedrockServer, + extract_shortened_prompt, +) +from testing_support.fixtures import ( # noqa: F401, pylint: disable=W0611 + collector_agent_registration_fixture, + collector_available_fixture, +) +from newrelic.api.time_trace import current_trace +from newrelic.api.transaction import current_transaction +from newrelic.common.object_wrapper import wrap_function_wrapper +from newrelic.common.package_version_utils import get_package_version_tuple _default_settings = { - 'transaction_tracer.explain_threshold': 0.0, - 'transaction_tracer.transaction_threshold': 0.0, - 'transaction_tracer.stack_trace_threshold': 0.0, - 'debug.log_data_collector_payloads': True, - 'debug.record_transaction_failure': True, + "transaction_tracer.explain_threshold": 0.0, + "transaction_tracer.transaction_threshold": 0.0, + "transaction_tracer.stack_trace_threshold": 0.0, + "debug.log_data_collector_payloads": True, + "debug.record_transaction_failure": True, + "ml_insights_events.enabled": True, } - collector_agent_registration = collector_agent_registration_fixture( - app_name='Python Agent Test (external_botocore)', - default_settings=_default_settings) + app_name="Python Agent Test (external_botocore)", + default_settings=_default_settings, + linked_applications=["Python Agent Test (external_botocore)"], +) + + +# Bedrock Fixtures + +BEDROCK_AUDIT_LOG_FILE = os.path.join(os.path.realpath(os.path.dirname(__file__)), "bedrock_audit.log") +BEDROCK_AUDIT_LOG_CONTENTS = {} + + +@pytest.fixture(scope="session") +def bedrock_server(): + """ + This fixture will either create a mocked backend for testing purposes, or will + set up an audit log file to log responses of the real Bedrock backend to a file. + The behavior can be controlled by setting NEW_RELIC_TESTING_RECORD_BEDROCK_RESPONSES=1 as + an environment variable to run using the real Bedrock backend. (Default: mocking) + """ + import boto3 + + from newrelic.core.config import _environ_as_bool + + if get_package_version_tuple("botocore") < (1, 31, 57): + pytest.skip(reason="Bedrock Runtime not available.") + + if not _environ_as_bool("NEW_RELIC_TESTING_RECORD_BEDROCK_RESPONSES", False): + # Use mocked Bedrock backend and prerecorded responses + with MockExternalBedrockServer() as server: + client = boto3.client( # nosec + "bedrock-runtime", + "us-east-1", + endpoint_url="http://localhost:%d" % server.port, + aws_access_key_id="NOT-A-REAL-SECRET", + aws_secret_access_key="NOT-A-REAL-SECRET", + ) + + yield client + else: + # Use real Bedrock backend and record responses + assert ( + os.environ["AWS_ACCESS_KEY_ID"] and os.environ["AWS_SECRET_ACCESS_KEY"] + ), "AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are required." + + # Construct real client + client = boto3.client( + "bedrock-runtime", + "us-east-1", + ) + + # Apply function wrappers to record data + wrap_function_wrapper( + "botocore.client", "BaseClient._make_api_call", wrap_botocore_client_BaseClient__make_api_call + ) + yield client # Run tests + + # Write responses to audit log + with open(BEDROCK_AUDIT_LOG_FILE, "w") as audit_log_fp: + json.dump(BEDROCK_AUDIT_LOG_CONTENTS, fp=audit_log_fp, indent=4) + + +# Intercept outgoing requests and log to file for mocking +RECORDED_HEADERS = set(["x-amzn-requestid", "content-type"]) + + +def wrap_botocore_client_BaseClient__make_api_call(wrapped, instance, args, kwargs): + from io import BytesIO + + from botocore.response import StreamingBody + + params = bind_make_api_call_params(*args, **kwargs) + if not params: + return wrapped(*args, **kwargs) + + body = json.loads(params["body"]) + model = params["modelId"] + prompt = extract_shortened_prompt(body, model) + + # Send request + result = wrapped(*args, **kwargs) + + # Intercept body data, and replace stream + streamed_body = result["body"].read() + result["body"] = StreamingBody(BytesIO(streamed_body), len(streamed_body)) + + # Clean up data + data = json.loads(streamed_body.decode("utf-8")) + headers = dict(result["ResponseMetadata"]["HTTPHeaders"].items()) + headers = dict( + filter( + lambda k: k[0] in RECORDED_HEADERS or k[0].startswith("x-ratelimit"), + headers.items(), + ) + ) + + # Log response + BEDROCK_AUDIT_LOG_CONTENTS[prompt] = headers, data # Append response data to audit log + return result + + +def bind_make_api_call_params(operation_name, api_params): + return api_params + + +@pytest.fixture(scope="session") +def set_trace_info(): + def _set_trace_info(): + txn = current_transaction() + if txn: + txn._trace_id = "trace-id" + trace = current_trace() + if trace: + trace.guid = "span-id" + + return _set_trace_info diff --git a/tests/mlmodel_bedrock/test_chat_completion.py b/tests/external_botocore/test_bedrock_chat_completion.py similarity index 99% rename from tests/mlmodel_bedrock/test_chat_completion.py rename to tests/external_botocore/test_bedrock_chat_completion.py index 4da60eaee4..995a931633 100644 --- a/tests/mlmodel_bedrock/test_chat_completion.py +++ b/tests/external_botocore/test_bedrock_chat_completion.py @@ -17,7 +17,7 @@ from io import BytesIO import pytest -from _test_chat_completion import ( +from _test_bedrock_chat_completion import ( chat_completion_expected_events, chat_completion_payload_templates, ) diff --git a/tests/mlmodel_bedrock/test_embeddings.py b/tests/external_botocore/test_bedrock_embeddings.py similarity index 97% rename from tests/mlmodel_bedrock/test_embeddings.py rename to tests/external_botocore/test_bedrock_embeddings.py index a419953ab5..022eb07599 100644 --- a/tests/mlmodel_bedrock/test_embeddings.py +++ b/tests/external_botocore/test_bedrock_embeddings.py @@ -29,7 +29,7 @@ from newrelic.api.background_task import background_task -from _test_embeddings import embedding_expected_events, embedding_payload_templates +from _test_bedrock_embeddings import embedding_expected_events, embedding_payload_templates disabled_ml_insights_settings = {"ml_insights_events.enabled": False} diff --git a/tests/external_boto3/test_boto3_iam.py b/tests/external_botocore/test_boto3_iam.py similarity index 95% rename from tests/external_boto3/test_boto3_iam.py rename to tests/external_botocore/test_boto3_iam.py index a2237dc936..3d672f3751 100644 --- a/tests/external_boto3/test_boto3_iam.py +++ b/tests/external_botocore/test_boto3_iam.py @@ -17,7 +17,7 @@ import boto3 import moto -from testing_support.fixtures import override_application_settings +from testing_support.fixtures import dt_enabled from testing_support.validators.validate_span_events import validate_span_events from testing_support.validators.validate_transaction_metrics import ( validate_transaction_metrics, @@ -53,7 +53,7 @@ ] -@override_application_settings({"distributed_tracing.enabled": True}) +@dt_enabled @validate_span_events(exact_agents={"http.url": "https://iam.amazonaws.com/"}, count=3) @validate_span_events(expected_agents=("aws.requestId",), count=3) @validate_span_events(exact_agents={"aws.operation": "CreateUser"}, count=1) diff --git a/tests/external_boto3/test_boto3_s3.py b/tests/external_botocore/test_boto3_s3.py similarity index 97% rename from tests/external_boto3/test_boto3_s3.py rename to tests/external_botocore/test_boto3_s3.py index a7ecf034ab..b6299d9f6e 100644 --- a/tests/external_boto3/test_boto3_s3.py +++ b/tests/external_botocore/test_boto3_s3.py @@ -18,7 +18,7 @@ import boto3 import botocore import moto -from testing_support.fixtures import override_application_settings +from testing_support.fixtures import dt_enabled from testing_support.validators.validate_span_events import validate_span_events from testing_support.validators.validate_transaction_metrics import ( validate_transaction_metrics, @@ -73,7 +73,7 @@ ] -@override_application_settings({"distributed_tracing.enabled": True}) +@dt_enabled @validate_span_events(exact_agents={"aws.operation": "CreateBucket"}, count=1) @validate_span_events(exact_agents={"aws.operation": "PutObject"}, count=1) @validate_span_events(exact_agents={"aws.operation": "ListObjects"}, count=1) diff --git a/tests/external_boto3/test_boto3_sns.py b/tests/external_botocore/test_boto3_sns.py similarity index 94% rename from tests/external_boto3/test_boto3_sns.py rename to tests/external_botocore/test_boto3_sns.py index bafe68611d..5e6c7c4b4e 100644 --- a/tests/external_boto3/test_boto3_sns.py +++ b/tests/external_botocore/test_boto3_sns.py @@ -17,7 +17,7 @@ import boto3 import moto import pytest -from testing_support.fixtures import override_application_settings +from testing_support.fixtures import dt_enabled from testing_support.validators.validate_span_events import validate_span_events from testing_support.validators.validate_transaction_metrics import ( validate_transaction_metrics, @@ -45,7 +45,7 @@ sns_metrics_phone = [("MessageBroker/SNS/Topic" "/Produce/Named/PhoneNumber", 1)] -@override_application_settings({"distributed_tracing.enabled": True}) +@dt_enabled @validate_span_events(expected_agents=("aws.requestId",), count=2) @validate_span_events(exact_agents={"aws.operation": "CreateTopic"}, count=1) @validate_span_events(exact_agents={"aws.operation": "Publish"}, count=1) @@ -74,7 +74,7 @@ def test_publish_to_sns_topic(topic_argument): assert "MessageId" in published_message -@override_application_settings({"distributed_tracing.enabled": True}) +@dt_enabled @validate_span_events(expected_agents=("aws.requestId",), count=3) @validate_span_events(exact_agents={"aws.operation": "CreateTopic"}, count=1) @validate_span_events(exact_agents={"aws.operation": "Subscribe"}, count=1) diff --git a/tests/external_botocore/test_botocore_dynamodb.py b/tests/external_botocore/test_botocore_dynamodb.py index 30114d53b1..6ce9f12c33 100644 --- a/tests/external_botocore/test_botocore_dynamodb.py +++ b/tests/external_botocore/test_botocore_dynamodb.py @@ -17,7 +17,7 @@ import botocore.session import moto -from testing_support.fixtures import override_application_settings +from testing_support.fixtures import dt_enabled from testing_support.validators.validate_span_events import validate_span_events from testing_support.validators.validate_transaction_metrics import ( validate_transaction_metrics, @@ -63,7 +63,7 @@ ] -@override_application_settings({"distributed_tracing.enabled": True}) +@dt_enabled @validate_span_events(expected_agents=("aws.requestId",), count=8) @validate_span_events(exact_agents={"aws.operation": "PutItem"}, count=1) @validate_span_events(exact_agents={"aws.operation": "GetItem"}, count=1) diff --git a/tests/external_botocore/test_botocore_ec2.py b/tests/external_botocore/test_botocore_ec2.py index 28a8ff63ae..3cb83e3185 100644 --- a/tests/external_botocore/test_botocore_ec2.py +++ b/tests/external_botocore/test_botocore_ec2.py @@ -17,7 +17,7 @@ import botocore.session import moto -from testing_support.fixtures import override_application_settings +from testing_support.fixtures import dt_enabled from testing_support.validators.validate_span_events import validate_span_events from testing_support.validators.validate_transaction_metrics import ( validate_transaction_metrics, @@ -55,7 +55,7 @@ ] -@override_application_settings({"distributed_tracing.enabled": True}) +@dt_enabled @validate_span_events(expected_agents=("aws.requestId",), count=3) @validate_span_events(exact_agents={"aws.operation": "RunInstances"}, count=1) @validate_span_events(exact_agents={"aws.operation": "DescribeInstances"}, count=1) diff --git a/tests/external_botocore/test_botocore_s3.py b/tests/external_botocore/test_botocore_s3.py index 1984d8103e..ea0c225390 100644 --- a/tests/external_botocore/test_botocore_s3.py +++ b/tests/external_botocore/test_botocore_s3.py @@ -18,7 +18,7 @@ import botocore import botocore.session import moto -from testing_support.fixtures import override_application_settings +from testing_support.fixtures import dt_enabled from testing_support.validators.validate_span_events import validate_span_events from testing_support.validators.validate_transaction_metrics import ( validate_transaction_metrics, @@ -67,7 +67,7 @@ ] -@override_application_settings({"distributed_tracing.enabled": True}) +@dt_enabled @validate_span_events(exact_agents={"aws.operation": "CreateBucket"}, count=1) @validate_span_events(exact_agents={"aws.operation": "PutObject"}, count=1) @validate_span_events(exact_agents={"aws.operation": "ListObjects"}, count=1) diff --git a/tests/external_botocore/test_botocore_sqs.py b/tests/external_botocore/test_botocore_sqs.py index 3f7d8c0220..63f15801b5 100644 --- a/tests/external_botocore/test_botocore_sqs.py +++ b/tests/external_botocore/test_botocore_sqs.py @@ -18,7 +18,7 @@ import botocore.session import moto import pytest -from testing_support.fixtures import override_application_settings +from testing_support.fixtures import dt_enabled from testing_support.validators.validate_span_events import validate_span_events from testing_support.validators.validate_transaction_metrics import ( validate_transaction_metrics, @@ -70,7 +70,7 @@ ] -@override_application_settings({"distributed_tracing.enabled": True}) +@dt_enabled @validate_span_events(exact_agents={"aws.operation": "CreateQueue"}, count=1) @validate_span_events(exact_agents={"aws.operation": "SendMessage"}, count=1) @validate_span_events(exact_agents={"aws.operation": "ReceiveMessage"}, count=1) @@ -124,7 +124,7 @@ def test_sqs(): assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200 -@override_application_settings({"distributed_tracing.enabled": True}) +@dt_enabled @validate_transaction_metrics( "test_botocore_sqs:test_sqs_malformed", scoped_metrics=_sqs_scoped_metrics_malformed, diff --git a/tests/mlmodel_bedrock/conftest.py b/tests/mlmodel_bedrock/conftest.py deleted file mode 100644 index a313398ed4..0000000000 --- a/tests/mlmodel_bedrock/conftest.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright 2010 New Relic, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -import os - -import pytest -from _mock_external_bedrock_server import ( - MockExternalBedrockServer, - extract_shortened_prompt, -) -from testing_support.fixtures import ( # noqa: F401, pylint: disable=W0611 - collector_agent_registration_fixture, - collector_available_fixture, -) - -from newrelic.api.time_trace import current_trace -from newrelic.api.transaction import current_transaction -from newrelic.common.object_wrapper import wrap_function_wrapper - -_default_settings = { - "transaction_tracer.explain_threshold": 0.0, - "transaction_tracer.transaction_threshold": 0.0, - "transaction_tracer.stack_trace_threshold": 0.0, - "debug.log_data_collector_payloads": True, - "debug.record_transaction_failure": True, - "ml_insights_events.enabled": True, -} -collector_agent_registration = collector_agent_registration_fixture( - app_name="Python Agent Test (mlmodel_bedrock)", - default_settings=_default_settings, - linked_applications=["Python Agent Test (mlmodel_bedrock)"], -) - -BEDROCK_AUDIT_LOG_FILE = os.path.join(os.path.realpath(os.path.dirname(__file__)), "bedrock_audit.log") -BEDROCK_AUDIT_LOG_CONTENTS = {} - - -@pytest.fixture(autouse=True, scope="session") -def bedrock_server(): - """ - This fixture will either create a mocked backend for testing purposes, or will - set up an audit log file to log responses of the real Bedrock backend to a file. - The behavior can be controlled by setting NEW_RELIC_TESTING_RECORD_BEDROCK_RESPONSES=1 as - an environment variable to run using the real Bedrock backend. (Default: mocking) - """ - import boto3 - - from newrelic.core.config import _environ_as_bool - - if not _environ_as_bool("NEW_RELIC_TESTING_RECORD_BEDROCK_RESPONSES", False): - # Use mocked Bedrock backend and prerecorded responses - with MockExternalBedrockServer() as server: - client = boto3.client( # nosec - "bedrock-runtime", - "us-east-1", - endpoint_url="http://localhost:%d" % server.port, - aws_access_key_id="NOT-A-REAL-SECRET", - aws_secret_access_key="NOT-A-REAL-SECRET", - ) - - yield client - else: - # Use real Bedrock backend and record responses - assert ( - os.environ["AWS_ACCESS_KEY_ID"] and os.environ["AWS_SECRET_ACCESS_KEY"] - ), "AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are required." - - # Construct real client - client = boto3.client( - "bedrock-runtime", - "us-east-1", - ) - - # Apply function wrappers to record data - wrap_function_wrapper( - "botocore.client", "BaseClient._make_api_call", wrap_botocore_client_BaseClient__make_api_call - ) - yield client # Run tests - - # Write responses to audit log - with open(BEDROCK_AUDIT_LOG_FILE, "w") as audit_log_fp: - json.dump(BEDROCK_AUDIT_LOG_CONTENTS, fp=audit_log_fp, indent=4) - - -# Intercept outgoing requests and log to file for mocking -RECORDED_HEADERS = set(["x-amzn-requestid", "content-type"]) - - -def wrap_botocore_client_BaseClient__make_api_call(wrapped, instance, args, kwargs): - from io import BytesIO - - from botocore.response import StreamingBody - - params = bind_make_api_call_params(*args, **kwargs) - if not params: - return wrapped(*args, **kwargs) - - body = json.loads(params["body"]) - model = params["modelId"] - prompt = extract_shortened_prompt(body, model) - - # Send request - result = wrapped(*args, **kwargs) - - # Intercept body data, and replace stream - streamed_body = result["body"].read() - result["body"] = StreamingBody(BytesIO(streamed_body), len(streamed_body)) - - # Clean up data - data = json.loads(streamed_body.decode("utf-8")) - headers = dict(result["ResponseMetadata"]["HTTPHeaders"].items()) - headers = dict( - filter( - lambda k: k[0] in RECORDED_HEADERS or k[0].startswith("x-ratelimit"), - headers.items(), - ) - ) - - # Log response - BEDROCK_AUDIT_LOG_CONTENTS[prompt] = headers, data # Append response data to audit log - return result - - -def bind_make_api_call_params(operation_name, api_params): - return api_params - - -@pytest.fixture(scope="session") -def set_trace_info(): - def _set_trace_info(): - txn = current_transaction() - if txn: - txn._trace_id = "trace-id" - trace = current_trace() - if trace: - trace.guid = "span-id" - - return _set_trace_info diff --git a/tox.ini b/tox.ini index cc9488b30a..720c301dcd 100644 --- a/tox.ini +++ b/tox.ini @@ -97,7 +97,6 @@ envlist = redis-datastore_redis-{py37,py38,py39,py310,py311,pypy38}-redis{0400,latest}, rediscluster-datastore_rediscluster-{py37,py311,pypy38}-redis{latest}, python-datastore_sqlite-{py27,py37,py38,py39,py310,py311,pypy27,pypy38}, - python-external_boto3-{py27,py37,py38,py39,py310,py311}-boto01, python-external_botocore-{py37,py38,py39,py310,py311}-botocorelatest, python-external_botocore-{py311}-botocore128, python-external_botocore-py310-botocore0125, @@ -140,7 +139,6 @@ envlist = python-framework_starlette-{py37,py38}-starlette{002001}, python-framework_starlette-{py37,py38,py39,py310,py311,pypy38}-starlettelatest, python-framework_strawberry-{py37,py38,py39,py310,py311}-strawberrylatest, - python-mlmodel_bedrock-{py37,py38,py39,py310,py311,pypy38}, python-logger_logging-{py27,py37,py38,py39,py310,py311,pypy27,pypy38}, python-logger_loguru-{py37,py38,py39,py310,py311,pypy38}-logurulatest, python-logger_loguru-py39-loguru{06,05}, @@ -252,10 +250,9 @@ deps = datastore_redis-redislatest: redis datastore_rediscluster-redislatest: redis datastore_redis-redis0400: redis<4.1 - external_boto3-boto01: boto3<2.0 - external_boto3-boto01: moto<2.0 - external_boto3-py27: rsa<4.7.1 external_botocore-botocorelatest: botocore + external_botocore-botocorelatest: boto3 + external_botocore-botocorelatest: moto external_botocore-botocore128: botocore<1.29 external_botocore-botocore0125: botocore<1.26 external_botocore-{py37,py38,py39,py310,py311}: moto[awslambda,ec2,iam]<3.0 @@ -344,7 +341,6 @@ deps = framework_tornado: pycurl framework_tornado-tornadolatest: tornado framework_tornado-tornadomaster: https://github.com/tornadoweb/tornado/archive/master.zip - mlmodel_bedrock: boto3 logger_loguru-logurulatest: loguru logger_loguru-loguru06: loguru<0.7 logger_loguru-loguru05: loguru<0.6 @@ -439,7 +435,6 @@ changedir = datastore_redis: tests/datastore_redis datastore_rediscluster: tests/datastore_rediscluster datastore_sqlite: tests/datastore_sqlite - external_boto3: tests/external_boto3 external_botocore: tests/external_botocore external_feedparser: tests/external_feedparser external_http: tests/external_http @@ -464,7 +459,6 @@ changedir = framework_starlette: tests/framework_starlette framework_strawberry: tests/framework_strawberry framework_tornado: tests/framework_tornado - mlmodel_bedrock: tests/mlmodel_bedrock logger_logging: tests/logger_logging logger_loguru: tests/logger_loguru logger_structlog: tests/logger_structlog From cad06ffe51d758740b16d99ce951607dbce31cc0 Mon Sep 17 00:00:00 2001 From: Lalleh Rafeei Date: Fri, 3 Nov 2023 17:25:00 -0700 Subject: [PATCH 07/16] Initial bedrock error tracing commit --- newrelic/hooks/external_botocore.py | 43 ++++- .../test_bedrock_chat_completion_error.py | 147 ++++++++++++++++++ 2 files changed, 189 insertions(+), 1 deletion(-) create mode 100644 tests/external_botocore/test_bedrock_chat_completion_error.py diff --git a/newrelic/hooks/external_botocore.py b/newrelic/hooks/external_botocore.py index c99293a63f..607c182b83 100644 --- a/newrelic/hooks/external_botocore.py +++ b/newrelic/hooks/external_botocore.py @@ -55,6 +55,38 @@ def extractor_string(*args, **kwargs): return extractor_list +def bedrock_error_attributes(exception, request_args): + + response_body = json.loads(request_args.get("body", "")) + # "api_key_last_four_digits": api_key_last_four_digits, + # "request.model": request_args.get("model") or request_args.get("engine") or "", + # "request.temperature": request_args.get("temperature", ""), + # "request.max_tokens": request_args.get("max_tokens", ""), + # "vendor": "openAI", + # "ingest_source": "Python", + # "response.organization": getattr(exception, "organization", ""), + # "response.number_of_messages": number_of_messages, + # "http.statusCode": getattr(exception, "http_status", ""), + # "error.message": getattr(exception, "_message", ""), + # "error.code": getattr(getattr(exception, "error", ""), "code", ""), + # "error.param": getattr(exception, "param", ""), + + breakpoint() + error_attributes = { + "request.id": exception.response.get("ResponseMetadata", "").get("RequestId", ""), + "api_key_last_four_digits": None, + "request.model": request_args.get("modelId", ""), + "request.temperature": response_body.get("textGenerationConfig", "").get("temperature", ""), + "request.max_tokens": response_body.get("textGenerationConfig", "").get("maxTokenCount", ""), + "vendor": "Bedrock", + "ingest_source": "Python", + "http.statusCode": exception.response.get("ResponseMetadata", "").get("HTTPStatusCode", ""), + "error.message": exception.response.get("Error", "").get("Message", ""), + "error.code": exception.response.get("Error", "").get("Code", ""), + } + return error_attributes + + def create_chat_completion_message_event( transaction, app_name, @@ -217,7 +249,16 @@ def wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): ft_name = callable_name(wrapped) with FunctionTrace(ft_name) as ft: - response = wrapped(*args, **kwargs) + try: + response = wrapped(*args, **kwargs) + except Exception as exc: + breakpoint() + error_attributes = bedrock_error_attributes(exc, kwargs) + # exc._nr_message = error_attributes.pop("error.message") + ft.notice_error( + attributes=error_attributes, + ) + raise if not response: return response diff --git a/tests/external_botocore/test_bedrock_chat_completion_error.py b/tests/external_botocore/test_bedrock_chat_completion_error.py new file mode 100644 index 0000000000..209b5ea95b --- /dev/null +++ b/tests/external_botocore/test_bedrock_chat_completion_error.py @@ -0,0 +1,147 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from io import BytesIO + +import botocore +import pytest +from _test_bedrock_chat_completion import ( + chat_completion_expected_events, + chat_completion_payload_templates, +) +from test_bedrock_chat_completion import ( + exercise_model, + is_file_payload, + model_id, +) +from testing_support.fixtures import ( + dt_enabled, + override_application_settings, + reset_core_stats_engine, +) +from testing_support.validators.validate_error_trace_attributes import ( + validate_error_trace_attributes, +) +from testing_support.validators.validate_span_events import validate_span_events + +from newrelic.api.background_task import background_task +from newrelic.api.time_trace import current_trace +from newrelic.api.transaction import add_custom_attribute, current_transaction +from newrelic.common.object_names import callable_name + +_test_bedrock_chat_completion_prompt = "What is 212 degrees Fahrenheit converted to Celsius?" + +chat_completion_payload_templates_no_prompt = { + "amazon.titan-text-express-v1": '{ "textGenerationConfig": {"temperature": %f, "maxTokenCount": %d }}', + "ai21.j2-mid-v1": '{"temperature": %f, "maxTokens": %d}', + "cohere.command-text-v14": '{"temperature": %f, "max_tokens": %d}', +} + + +@pytest.fixture(scope="function") +def exercise_model_no_prompt(bedrock_server, model_id, is_file_payload): + payload_template = chat_completion_payload_templates_no_prompt[model_id] + + def _exercise_model(temperature=0.7, max_tokens=100): + breakpoint() + body = (payload_template % (temperature, max_tokens)).encode("utf-8") + if is_file_payload: + body = BytesIO(body) + + bedrock_server.invoke_model( + body=body, + modelId=model_id, + accept="application/json", + contentType="application/json", + ) + + return _exercise_model + + +# No prompt provided +@dt_enabled +@reset_core_stats_engine() +# @validate_error_trace_attributes( +# callable_name(botocore.InvalidRequestError), +# exact_attrs={ +# "agent": {}, +# "intrinsic": {}, +# "user": { +# # "api_key_last_four_digits": "sk-CRET", +# # "request.temperature": 0.7, +# # "request.max_tokens": 100, +# # "vendor": "openAI", +# # "ingest_source": "Python", +# # "response.number_of_messages": 2, +# # "error.param": "engine", +# }, +# }, +# ) +# @validate_span_events( +# exact_agents={ +# # "error.message": "Must provide an 'engine' or 'model' parameter to create a ", +# } +# ) +def test_bedrock_chat_completion_no_prompt(exercise_model_no_prompt): + @background_task() + def _test(): + set_trace_info() + add_custom_attribute("conversation_id", "my-awesome-id") + exercise_model_no_prompt(temperature=0.7, max_tokens=100) + + _test() + + +@dt_enabled +@reset_core_stats_engine() +@validate_error_trace_attributes( + callable_name(botocore.InvalidSignatureException), + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + # "request.id": "b61f5406-5955-4dc9-915c-9ae1bedda182", # This will change + # "api_key_last_four_digits": "sk-CRET", + # "request.model": None, # Grab from payload templates + "request.temperature": 0.7, + "request.max_tokens": 100, + "vendor": "Bedrock", + "ingest_source": "Python", + "http.statusCode": 403, + "error.message": "The request signature we calculated does not match the signature you provided. Check your AWS Secret Access Key and signing method. Consult the service documentation for details.", + "error.code": "InvalidSignatureException", + }, + }, +) +def test_bedrock_chat_completion_incorrect_secret_access_key(exercise_model): + @background_task() + def _test(): + with pytest.raises(botocore.InvalidSignatureException): # not sure where this exception actually comes from + set_trace_info() + add_custom_attribute("conversation_id", "my-awesome-id") + exercise_model(prompt=_test_bedrock_chat_completion_prompt, temperature=0.7, max_tokens=100) + + _test() + + +# @reset_core_stats_engine() +# def test_bedrock_chat_completion_in_txn(exercise_model, expected_events): +# @background_task() +# def _test(): +# set_trace_info() +# add_custom_attribute("conversation_id", "my-awesome-id") +# exercise_model(prompt=_test_bedrock_chat_completion_prompt, temperature=0.7, max_tokens=100) + +# _test() From 302972061c38f234d3d8858d4241b382a1d717d4 Mon Sep 17 00:00:00 2001 From: Tim Pansino Date: Tue, 7 Nov 2023 11:14:50 -0800 Subject: [PATCH 08/16] Add status code to mock bedrock server --- .../external_botocore/_mock_external_bedrock_server.py | 10 ++++++++-- tests/external_botocore/conftest.py | 3 ++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/tests/external_botocore/_mock_external_bedrock_server.py b/tests/external_botocore/_mock_external_bedrock_server.py index c9149e3f1b..19ccf733cd 100644 --- a/tests/external_botocore/_mock_external_bedrock_server.py +++ b/tests/external_botocore/_mock_external_bedrock_server.py @@ -31,6 +31,7 @@ RESPONSES = { "amazon.titan-text-express-v1::What is 212 degrees Fahrenheit converted to Celsius?": [ {"content-type": "application/json", "x-amzn-requestid": "660d4de9-6804-460e-8556-4ab2a019d1e3"}, + 200, { "inputTextTokenCount": 12, "results": [ @@ -44,6 +45,7 @@ ], "anthropic.claude-instant-v1::Human: What is 212 degrees Fahrenheit converted to Celsius? Assistant:": [ {"content-type": "application/json", "x-amzn-requestid": "f354b9a7-9eac-4f50-a8d7-7d5d23566176"}, + 200, { "completion": " Here are the step-by-step workings:\n1) 212 degrees Fahrenheit \n2) To convert to Celsius, use the formula: C = (F - 32) * 5/9\n3) Plug in the values: C = (212 - 32) * 5/9 = 100 * 5/9 = 100 degrees Celsius\n\nSo, 212 degrees Fahrenheit converted to Celsius is 100 degrees Celsius.", "stop_reason": "stop_sequence", @@ -52,6 +54,7 @@ ], "cohere.command-text-v14::What is 212 degrees Fahrenheit converted to Celsius?": [ {"content-type": "application/json", "x-amzn-requestid": "c5188fb5-dc58-4cbe-948d-af173c69ce0d"}, + 200, { "generations": [ { @@ -66,6 +69,7 @@ ], "ai21.j2-mid-v1::What is 212 degrees Fahrenheit converted to Celsius?": [ {"content-type": "application/json", "x-amzn-requestid": "3bf1bb6b-b6f0-4901-85a1-2fa0e814440e"}, + 200, { "id": 1234, "prompt": { @@ -240,6 +244,7 @@ ], "amazon.titan-embed-text-v1::This is an embedding test.": [ {"content-type": "application/json", "x-amzn-requestid": "75f1d3fe-6cde-4cf5-bdaf-7101f746ccfe"}, + 200, { "embedding": [ -0.14160156, @@ -1784,6 +1789,7 @@ ], "amazon.titan-embed-g1-text-02::This is an embedding test.": [ {"content-type": "application/json", "x-amzn-requestid": "f7e78265-6b7c-4b3a-b750-0c1d00347258"}, + 200, { "embedding": [ -0.14160156, @@ -3346,7 +3352,7 @@ def simple_get(self): headers, response = ({}, "") for k, v in RESPONSES.items(): if prompt.startswith(k): - headers, response = v + headers, status_code, response = v break else: # If no matches found self.send_response(500) @@ -3355,7 +3361,7 @@ def simple_get(self): return # Send response code - self.send_response(200) + self.send_response(status_code) # Send headers for k, v in headers.items(): diff --git a/tests/external_botocore/conftest.py b/tests/external_botocore/conftest.py index 67a2058239..82f415f10b 100644 --- a/tests/external_botocore/conftest.py +++ b/tests/external_botocore/conftest.py @@ -134,9 +134,10 @@ def wrap_botocore_client_BaseClient__make_api_call(wrapped, instance, args, kwar headers.items(), ) ) + status_code = result["ResponseMetadata"]["HTTPStatusCode"] # Log response - BEDROCK_AUDIT_LOG_CONTENTS[prompt] = headers, data # Append response data to audit log + BEDROCK_AUDIT_LOG_CONTENTS[prompt] = headers, status_code, data # Append response data to audit log return result From 79b46261a96149d4bb5d12a0c9de7245a78f6586 Mon Sep 17 00:00:00 2001 From: Tim Pansino Date: Tue, 7 Nov 2023 14:21:51 -0800 Subject: [PATCH 09/16] Updating error response recording logic --- .../_mock_external_bedrock_server.py | 127 ++++++++++-------- tests/external_botocore/conftest.py | 38 +++--- 2 files changed, 93 insertions(+), 72 deletions(-) diff --git a/tests/external_botocore/_mock_external_bedrock_server.py b/tests/external_botocore/_mock_external_bedrock_server.py index 19ccf733cd..378a3756bd 100644 --- a/tests/external_botocore/_mock_external_bedrock_server.py +++ b/tests/external_botocore/_mock_external_bedrock_server.py @@ -29,46 +29,13 @@ # 3) This app runs on a separate thread meaning it won't block the test app. RESPONSES = { - "amazon.titan-text-express-v1::What is 212 degrees Fahrenheit converted to Celsius?": [ - {"content-type": "application/json", "x-amzn-requestid": "660d4de9-6804-460e-8556-4ab2a019d1e3"}, - 200, - { - "inputTextTokenCount": 12, - "results": [ - { - "tokenCount": 55, - "outputText": "\nUse the formula,\n\u00b0C = (\u00b0F - 32) x 5/9\n= 212 x 5/9\n= 100 degrees Celsius\n212 degrees Fahrenheit is 100 degrees Celsius.", - "completionReason": "FINISH", - } - ], - }, - ], - "anthropic.claude-instant-v1::Human: What is 212 degrees Fahrenheit converted to Celsius? Assistant:": [ - {"content-type": "application/json", "x-amzn-requestid": "f354b9a7-9eac-4f50-a8d7-7d5d23566176"}, - 200, - { - "completion": " Here are the step-by-step workings:\n1) 212 degrees Fahrenheit \n2) To convert to Celsius, use the formula: C = (F - 32) * 5/9\n3) Plug in the values: C = (212 - 32) * 5/9 = 100 * 5/9 = 100 degrees Celsius\n\nSo, 212 degrees Fahrenheit converted to Celsius is 100 degrees Celsius.", - "stop_reason": "stop_sequence", - "stop": "\n\nHuman:", - }, - ], - "cohere.command-text-v14::What is 212 degrees Fahrenheit converted to Celsius?": [ - {"content-type": "application/json", "x-amzn-requestid": "c5188fb5-dc58-4cbe-948d-af173c69ce0d"}, - 200, - { - "generations": [ - { - "finish_reason": "MAX_TOKENS", - "id": "0730f5c0-9a49-4f35-af94-cf8f77327740", - "text": " To convert 212 degrees Fahrenheit to Celsius, we can use the conversion factor that Celsius is equal to (Fahrenheit - 32) x 5/9. \\n\\nApplying this formula, we have:\\n212°F = (212°F - 32) x 5/9\\n= (180) x 5/9\\n= 100°C.\\n\\nTherefore, 212 degrees F", - } - ], - "id": "a9cc8ce6-50b6-40b6-bf77-cf24561d8de7", - "prompt": "What is 212 degrees Fahrenheit converted to Celsius?", - }, + "ai21.j2-mid-v1::Invalid Token": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "b393ba08-c838-4503-a489-a43bf9bbaccd"}, + 403, + {"message": "The security token included in the request is invalid."}, ], "ai21.j2-mid-v1::What is 212 degrees Fahrenheit converted to Celsius?": [ - {"content-type": "application/json", "x-amzn-requestid": "3bf1bb6b-b6f0-4901-85a1-2fa0e814440e"}, + {"Content-Type": "application/json", "x-amzn-RequestId": "c863d9fc-888b-421c-a175-ac5256baec62"}, 200, { "id": 1234, @@ -77,7 +44,7 @@ "tokens": [ { "generatedToken": { - "token": "\u2581What\u2581is", + "token": "▁What▁is", "logprob": -7.446773529052734, "raw_logprob": -7.446773529052734, }, @@ -86,7 +53,7 @@ }, { "generatedToken": { - "token": "\u2581", + "token": "▁", "logprob": -3.8046724796295166, "raw_logprob": -3.8046724796295166, }, @@ -104,7 +71,7 @@ }, { "generatedToken": { - "token": "\u2581degrees\u2581Fahrenheit", + "token": "▁degrees▁Fahrenheit", "logprob": -7.953181743621826, "raw_logprob": -7.953181743621826, }, @@ -113,7 +80,7 @@ }, { "generatedToken": { - "token": "\u2581converted\u2581to", + "token": "▁converted▁to", "logprob": -6.168096542358398, "raw_logprob": -6.168096542358398, }, @@ -122,7 +89,7 @@ }, { "generatedToken": { - "token": "\u2581Celsius", + "token": "▁Celsius", "logprob": -0.09790332615375519, "raw_logprob": -0.09790332615375519, }, @@ -156,7 +123,7 @@ }, { "generatedToken": { - "token": "\u2581", + "token": "▁", "logprob": -0.03473362699151039, "raw_logprob": -0.11261807382106781, }, @@ -174,7 +141,7 @@ }, { "generatedToken": { - "token": "\u2581degrees\u2581Fahrenheit", + "token": "▁degrees▁Fahrenheit", "logprob": -0.003579758107662201, "raw_logprob": -0.03144374489784241, }, @@ -183,7 +150,7 @@ }, { "generatedToken": { - "token": "\u2581is\u2581equal\u2581to", + "token": "▁is▁equal▁to", "logprob": -0.0027733694296330214, "raw_logprob": -0.027207009494304657, }, @@ -192,7 +159,7 @@ }, { "generatedToken": { - "token": "\u2581", + "token": "▁", "logprob": -0.0003392120997887105, "raw_logprob": -0.005458095110952854, }, @@ -210,7 +177,7 @@ }, { "generatedToken": { - "token": "\u2581degrees\u2581Celsius", + "token": "▁degrees▁Celsius", "logprob": -0.31207239627838135, "raw_logprob": -0.402545303106308, }, @@ -242,8 +209,8 @@ ], }, ], - "amazon.titan-embed-text-v1::This is an embedding test.": [ - {"content-type": "application/json", "x-amzn-requestid": "75f1d3fe-6cde-4cf5-bdaf-7101f746ccfe"}, + "amazon.titan-embed-g1-text-02::This is an embedding test.": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "b10ac895-eae3-4f07-b926-10b2866c55ed"}, 200, { "embedding": [ @@ -1787,8 +1754,8 @@ "inputTextTokenCount": 6, }, ], - "amazon.titan-embed-g1-text-02::This is an embedding test.": [ - {"content-type": "application/json", "x-amzn-requestid": "f7e78265-6b7c-4b3a-b750-0c1d00347258"}, + "amazon.titan-embed-text-v1::This is an embedding test.": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "11233989-07e8-4ecb-9ba6-79601ba6d8cc"}, 200, { "embedding": [ @@ -3332,8 +3299,62 @@ "inputTextTokenCount": 6, }, ], + "amazon.titan-text-express-v1::Invalid Token": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "041a580c-c3a4-4d99-aafc-00dc0698da5a"}, + 403, + {"message": "The security token included in the request is invalid."}, + ], + "amazon.titan-text-express-v1::What is 212 degrees Fahrenheit converted to Celsius?": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "03524118-8d77-430f-9e08-63b5c03a40cf"}, + 200, + { + "inputTextTokenCount": 12, + "results": [ + { + "tokenCount": 75, + "outputText": "\nUse the formula,\n°C = °F - 32) x (5/9)\n= 212 - 32 x (5/9)\n= 212 - 16.11\n= 195.89\n\nThe answer is 195.89 degrees Celsius.", + "completionReason": "FINISH", + } + ], + }, + ], + "anthropic.claude-instant-v1::Human: Invalid Token Assistant:": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "cdc4ea3e-8724-45e3-97a3-9c2dec3376ca"}, + 403, + {"message": "The security token included in the request is invalid."}, + ], + "anthropic.claude-instant-v1::Human: What is 212 degrees Fahrenheit converted to Celsius? Assistant:": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "7b0b37c6-85fb-4664-8f5b-361ca7b1aa18"}, + 200, + { + "completion": " Okay, here are the conversion steps:\n212 degrees Fahrenheit\n- Subtract 32 from 212 to get 180 (to convert from Fahrenheit to Celsius scale)\n- Multiply by 5/9 (because the formula is °C = (°F - 32) × 5/9)\n- 180 × 5/9 = 100\n\nSo 212 degrees Fahrenheit converted to Celsius is 100 degrees Celsius.", + "stop_reason": "stop_sequence", + "stop": "\n\nHuman:", + }, + ], + "cohere.command-text-v14::Invalid Token": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "cc797330-6fc2-4570-a3c2-f60ef63d37b0"}, + 403, + {"message": "The security token included in the request is invalid."}, + ], + "cohere.command-text-v14::What is 212 degrees Fahrenheit converted to Celsius?": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "e77422c8-fbbf-4e17-afeb-c758425c9f97"}, + 200, + { + "generations": [ + { + "finish_reason": "MAX_TOKENS", + "id": "d20c06b0-aafe-4230-b2c7-200f4069355e", + "text": " 212°F is equivalent to 100°C. \n\nFahrenheit and Celsius are two temperature scales commonly used in everyday life. The Fahrenheit scale is based on 32°F for the freezing point of water and 212°F for the boiling point of water. On the other hand, the Celsius scale uses 0°C and 100°C as the freezing and boiling points of water, respectively. \n\nTo convert from Fahrenheit to Celsius, we subtract 32 from the Fahrenheit temperature and multiply the result", + } + ], + "id": "e77422c8-fbbf-4e17-afeb-c758425c9f97", + "prompt": "What is 212 degrees Fahrenheit converted to Celsius?", + }, + ], } + MODEL_PATH_RE = re.compile(r"/model/([^/]+)/invoke") @@ -3374,7 +3395,7 @@ def simple_get(self): def extract_shortened_prompt(content, model): - prompt = content.get("inputText", None) or content.get("prompt", None) + prompt = content.get("inputText", "") or content.get("prompt", "") prompt = "::".join((model, prompt)) # Prepend model name to prompt key to keep separate copies return prompt.lstrip().split("\n")[0] diff --git a/tests/external_botocore/conftest.py b/tests/external_botocore/conftest.py index 82f415f10b..b39a13c53e 100644 --- a/tests/external_botocore/conftest.py +++ b/tests/external_botocore/conftest.py @@ -14,6 +14,7 @@ import json import os +import re import pytest from _mock_external_bedrock_server import ( @@ -92,11 +93,12 @@ def bedrock_server(): # Apply function wrappers to record data wrap_function_wrapper( - "botocore.client", "BaseClient._make_api_call", wrap_botocore_client_BaseClient__make_api_call + "botocore.endpoint", "Endpoint._do_get_response", wrap_botocore_endpoint_Endpoint__do_get_response ) yield client # Run tests # Write responses to audit log + BEDROCK_AUDIT_LOG_CONTENTS = dict(sorted(BEDROCK_AUDIT_LOG_CONTENTS.items(), key=lambda i: i[0])) with open(BEDROCK_AUDIT_LOG_FILE, "w") as audit_log_fp: json.dump(BEDROCK_AUDIT_LOG_CONTENTS, fp=audit_log_fp, indent=4) @@ -105,44 +107,42 @@ def bedrock_server(): RECORDED_HEADERS = set(["x-amzn-requestid", "content-type"]) -def wrap_botocore_client_BaseClient__make_api_call(wrapped, instance, args, kwargs): - from io import BytesIO - - from botocore.response import StreamingBody - - params = bind_make_api_call_params(*args, **kwargs) - if not params: +def wrap_botocore_endpoint_Endpoint__do_get_response(wrapped, instance, args, kwargs): + request = bind__do_get_response(*args, **kwargs) + if not request: return wrapped(*args, **kwargs) - body = json.loads(params["body"]) - model = params["modelId"] + body = json.loads(request.body) + + match = re.search(r"/model/([0-9a-zA-Z.-]+)/", request.url) + model = match.group(1) prompt = extract_shortened_prompt(body, model) # Send request result = wrapped(*args, **kwargs) - # Intercept body data, and replace stream - streamed_body = result["body"].read() - result["body"] = StreamingBody(BytesIO(streamed_body), len(streamed_body)) + # Unpack response + success, exception = result + response = (success or exception)[0] # Clean up data - data = json.loads(streamed_body.decode("utf-8")) - headers = dict(result["ResponseMetadata"]["HTTPHeaders"].items()) + data = json.loads(response.content.decode("utf-8")) + headers = dict(response.headers.items()) headers = dict( filter( - lambda k: k[0] in RECORDED_HEADERS or k[0].startswith("x-ratelimit"), + lambda k: k[0].lower() in RECORDED_HEADERS or k[0].startswith("x-ratelimit"), headers.items(), ) ) - status_code = result["ResponseMetadata"]["HTTPStatusCode"] + status_code = response.status_code # Log response BEDROCK_AUDIT_LOG_CONTENTS[prompt] = headers, status_code, data # Append response data to audit log return result -def bind_make_api_call_params(operation_name, api_params): - return api_params +def bind__do_get_response(request, operation_model, context): + return request @pytest.fixture(scope="session") From 8ebd9c58043018799adc4c925453aeb4ab72e2df Mon Sep 17 00:00:00 2001 From: Tim Pansino Date: Wed, 8 Nov 2023 09:22:45 -0800 Subject: [PATCH 10/16] Work on bedrock errror tracing --- newrelic/hooks/external_botocore.py | 45 +++++------ .../_test_bedrock_chat_completion.py | 29 +++++++ .../test_bedrock_chat_completion.py | 76 +++++++++++++++++++ 3 files changed, 122 insertions(+), 28 deletions(-) diff --git a/newrelic/hooks/external_botocore.py b/newrelic/hooks/external_botocore.py index 607c182b83..0d8a327443 100644 --- a/newrelic/hooks/external_botocore.py +++ b/newrelic/hooks/external_botocore.py @@ -55,34 +55,23 @@ def extractor_string(*args, **kwargs): return extractor_list -def bedrock_error_attributes(exception, request_args): - +def bedrock_error_attributes(exception, request_args, client): + response = getattr(exception, "response", None) + if not response: + return {} + response_body = json.loads(request_args.get("body", "")) - # "api_key_last_four_digits": api_key_last_four_digits, - # "request.model": request_args.get("model") or request_args.get("engine") or "", - # "request.temperature": request_args.get("temperature", ""), - # "request.max_tokens": request_args.get("max_tokens", ""), - # "vendor": "openAI", - # "ingest_source": "Python", - # "response.organization": getattr(exception, "organization", ""), - # "response.number_of_messages": number_of_messages, - # "http.statusCode": getattr(exception, "http_status", ""), - # "error.message": getattr(exception, "_message", ""), - # "error.code": getattr(getattr(exception, "error", ""), "code", ""), - # "error.param": getattr(exception, "param", ""), - - breakpoint() error_attributes = { - "request.id": exception.response.get("ResponseMetadata", "").get("RequestId", ""), - "api_key_last_four_digits": None, + "request.id": response.get("ResponseMetadata", "").get("RequestId", ""), + "api_key_last_four_digits": client._request_signer._credentials.access_key[-4:], "request.model": request_args.get("modelId", ""), "request.temperature": response_body.get("textGenerationConfig", "").get("temperature", ""), "request.max_tokens": response_body.get("textGenerationConfig", "").get("maxTokenCount", ""), "vendor": "Bedrock", "ingest_source": "Python", - "http.statusCode": exception.response.get("ResponseMetadata", "").get("HTTPStatusCode", ""), - "error.message": exception.response.get("Error", "").get("Message", ""), - "error.code": exception.response.get("Error", "").get("Code", ""), + "http.statusCode": response.get("ResponseMetadata", "").get("HTTPStatusCode", ""), + "error.message": response.get("Error", "").get("Message", ""), + "error.code": response.get("Error", "").get("Code", ""), } return error_attributes @@ -252,13 +241,13 @@ def wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): try: response = wrapped(*args, **kwargs) except Exception as exc: - breakpoint() - error_attributes = bedrock_error_attributes(exc, kwargs) - # exc._nr_message = error_attributes.pop("error.message") - ft.notice_error( - attributes=error_attributes, - ) - raise + try: + error_attributes = bedrock_error_attributes(exc, kwargs, instance) + ft.notice_error( + attributes=error_attributes, + ) + finally: + raise if not response: return response diff --git a/tests/external_botocore/_test_bedrock_chat_completion.py b/tests/external_botocore/_test_bedrock_chat_completion.py index fc69b1ff89..3987919bd0 100644 --- a/tests/external_botocore/_test_bedrock_chat_completion.py +++ b/tests/external_botocore/_test_bedrock_chat_completion.py @@ -260,3 +260,32 @@ ), ], } + +chat_completion_expected_client_errors = { + "amazon.titan-text-express-v1": { + "conversation_id": "my-awesome-id", + "request.id": "041a580c-c3a4-4d99-aafc-00dc0698da5a", + "api_key_last_four_digits": "-KEY", + "request.model": "amazon.titan-text-express-v1", + "request.temperature": 0.7, + "request.max_tokens": 100, + "vendor": "Bedrock", + "ingest_source": "Python", + "http.statusCode": 403, + "error.message": "The security token included in the request is invalid.", + "error.code": "403", + }, + "ai21.j2-mid-v1": { + "conversation_id": "my-awesome-id", + "request.id": "041a580c-c3a4-4d99-aafc-00dc0698da5a", + "api_key_last_four_digits": "-KEY", + "request.model": "amazon.titan-text-express-v1", + "request.temperature": 0.7, + "request.max_tokens": 100, + "vendor": "Bedrock", + "ingest_source": "Python", + "http.statusCode": 403, + "error.message": "The security token included in the request is invalid.", + "error.code": "403", + } +} diff --git a/tests/external_botocore/test_bedrock_chat_completion.py b/tests/external_botocore/test_bedrock_chat_completion.py index 995a931633..9a99f7a6a0 100644 --- a/tests/external_botocore/test_bedrock_chat_completion.py +++ b/tests/external_botocore/test_bedrock_chat_completion.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import botocore.exceptions + import copy import json from io import BytesIO @@ -20,18 +22,25 @@ from _test_bedrock_chat_completion import ( chat_completion_expected_events, chat_completion_payload_templates, + chat_completion_expected_client_errors, ) from testing_support.fixtures import ( + dt_enabled, override_application_settings, reset_core_stats_engine, ) from testing_support.validators.validate_ml_event_count import validate_ml_event_count from testing_support.validators.validate_ml_events import validate_ml_events +from testing_support.validators.validate_span_events import validate_span_events +from testing_support.validators.validate_error_trace_attributes import ( + validate_error_trace_attributes, +) from newrelic.api.background_task import background_task from newrelic.api.time_trace import current_trace from newrelic.api.transaction import add_custom_attribute, current_transaction +from newrelic.common.object_names import callable_name @pytest.fixture(scope="session", params=[False, True], ids=["Bytes", "Stream"]) def is_file_payload(request): @@ -85,6 +94,11 @@ def expected_events_no_convo_id(model_id): return events +@pytest.fixture(scope="module") +def expected_client_error(model_id): + return chat_completion_expected_client_errors[model_id] + + _test_bedrock_chat_completion_prompt = "What is 212 degrees Fahrenheit converted to Celsius?" @@ -154,3 +168,65 @@ def test_bedrock_chat_completion_outside_txn(set_trace_info, exercise_model): def test_bedrock_chat_completion_disabled_settings(set_trace_info, exercise_model): set_trace_info() exercise_model(prompt=_test_bedrock_chat_completion_prompt, temperature=0.7, max_tokens=100) + + + +_client_error = botocore.exceptions.ClientError +_client_error_name = callable_name(_client_error) + + +# No prompt provided +@dt_enabled +@reset_core_stats_engine() +# @validate_error_trace_attributes( +# callable_name(botocore.InvalidRequestError), +# exact_attrs={ +# "agent": {}, +# "intrinsic": {}, +# "user": { +# # "api_key_last_four_digits": "sk-CRET", +# # "request.temperature": 0.7, +# # "request.max_tokens": 100, +# # "vendor": "openAI", +# # "ingest_source": "Python", +# # "response.number_of_messages": 2, +# # "error.param": "engine", +# }, +# }, +# ) +# @validate_span_events( +# exact_agents={ +# # "error.message": "Must provide an 'engine' or 'model' parameter to create a ", +# } +# ) +def test_bedrock_chat_completion_error_no_prompt(exercise_model_no_prompt, set_trace_info): + @background_task() + def _test(): + set_trace_info() + add_custom_attribute("conversation_id", "my-awesome-id") + exercise_model_no_prompt(temperature=0.7, max_tokens=100) + + _test() + + +@dt_enabled +@reset_core_stats_engine() +def test_bedrock_chat_completion_error_incorrect_access_key(monkeypatch, bedrock_server, exercise_model, set_trace_info, expected_client_error): + @validate_error_trace_attributes( + _client_error_name, + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": expected_client_error, + }, + ) + @background_task() + def _test(): + monkeypatch.setattr(bedrock_server._request_signer._credentials, "access_key", "INVALID-ACCESS-KEY") + + with pytest.raises(_client_error): # not sure where this exception actually comes from + set_trace_info() + add_custom_attribute("conversation_id", "my-awesome-id") + exercise_model(prompt="Invalid Token", temperature=0.7, max_tokens=100) + + _test() From 58694dc41b6e6abd033152049e68703fc6997b62 Mon Sep 17 00:00:00 2001 From: Tim Pansino Date: Wed, 8 Nov 2023 10:42:16 -0800 Subject: [PATCH 11/16] Chat completion error tracing --- newrelic/hooks/external_botocore.py | 177 +++++++++++------- .../_mock_external_bedrock_server.py | 59 ++++-- .../_test_bedrock_chat_completion.py | 70 ++++--- .../_test_bedrock_embeddings.py | 4 +- tests/external_botocore/conftest.py | 6 +- .../test_bedrock_chat_completion.py | 59 +++--- .../test_bedrock_chat_completion_error.py | 147 --------------- 7 files changed, 231 insertions(+), 291 deletions(-) delete mode 100644 tests/external_botocore/test_bedrock_chat_completion_error.py diff --git a/newrelic/hooks/external_botocore.py b/newrelic/hooks/external_botocore.py index 0d8a327443..a28082e4a8 100644 --- a/newrelic/hooks/external_botocore.py +++ b/newrelic/hooks/external_botocore.py @@ -55,24 +55,24 @@ def extractor_string(*args, **kwargs): return extractor_list -def bedrock_error_attributes(exception, request_args, client): +def bedrock_error_attributes(exception, request_args, client, extractor): response = getattr(exception, "response", None) if not response: return {} - response_body = json.loads(request_args.get("body", "")) - error_attributes = { + request_body = request_args.get("body", "") + error_attributes = extractor(request_body)[1] + + error_attributes.update({ "request.id": response.get("ResponseMetadata", "").get("RequestId", ""), "api_key_last_four_digits": client._request_signer._credentials.access_key[-4:], "request.model": request_args.get("modelId", ""), - "request.temperature": response_body.get("textGenerationConfig", "").get("temperature", ""), - "request.max_tokens": response_body.get("textGenerationConfig", "").get("maxTokenCount", ""), "vendor": "Bedrock", "ingest_source": "Python", "http.statusCode": response.get("ResponseMetadata", "").get("HTTPStatusCode", ""), "error.message": response.get("Error", "").get("Message", ""), "error.code": response.get("Error", "").get("Code", ""), - } + }) return error_attributes @@ -116,37 +116,47 @@ def create_chat_completion_message_event( transaction.record_ml_event("LlmChatCompletionMessage", chat_completion_message_dict) -def extract_bedrock_titan_text_model(request_body, response_body): - response_body = json.loads(response_body) +def extract_bedrock_titan_text_model(request_body, response_body=None): request_body = json.loads(request_body) - - input_tokens = response_body["inputTextTokenCount"] - completion_tokens = sum(result["tokenCount"] for result in response_body.get("results", [])) - total_tokens = input_tokens + completion_tokens + if response_body: + response_body = json.loads(response_body) request_config = request_body.get("textGenerationConfig", {}) - message_list = [{"role": "user", "content": request_body.get("inputText", "")}] - message_list.extend( - {"role": "assistant", "content": result["outputText"]} for result in response_body.get("results", []) - ) chat_completion_summary_dict = { "request.max_tokens": request_config.get("maxTokenCount", ""), "request.temperature": request_config.get("temperature", ""), - "response.choices.finish_reason": response_body["results"][0]["completionReason"], - "response.usage.completion_tokens": completion_tokens, - "response.usage.prompt_tokens": input_tokens, - "response.usage.total_tokens": total_tokens, - "response.number_of_messages": len(message_list), } + + if response_body: + input_tokens = response_body["inputTextTokenCount"] + completion_tokens = sum(result["tokenCount"] for result in response_body.get("results", [])) + total_tokens = input_tokens + completion_tokens + + message_list = [{"role": "user", "content": request_body.get("inputText", "")}] + message_list.extend( + {"role": "assistant", "content": result["outputText"]} for result in response_body.get("results", []) + ) + + chat_completion_summary_dict.update({ + "response.choices.finish_reason": response_body["results"][0]["completionReason"], + "response.usage.completion_tokens": completion_tokens, + "response.usage.prompt_tokens": input_tokens, + "response.usage.total_tokens": total_tokens, + "response.number_of_messages": len(message_list), + }) + else: + message_list = [] + return message_list, chat_completion_summary_dict -def extract_bedrock_titan_embedding_model(request_body, response_body): - response_body = json.loads(response_body) +def extract_bedrock_titan_embedding_model(request_body, response_body=None): request_body = json.loads(request_body) + if response_body: + response_body = json.loads(response_body) - input_tokens = response_body["inputTextTokenCount"] + input_tokens = response_body.get("inputTextTokenCount", None) embedding_dict = { "input": request_body.get("inputText", ""), @@ -156,59 +166,85 @@ def extract_bedrock_titan_embedding_model(request_body, response_body): return embedding_dict -def extract_bedrock_ai21_j2_model(request_body, response_body): - response_body = json.loads(response_body) +def extract_bedrock_ai21_j2_model(request_body, response_body=None): request_body = json.loads(request_body) - - message_list = [{"role": "user", "content": request_body.get("prompt", "")}] - message_list.extend( - {"role": "assistant", "content": result["data"]["text"]} for result in response_body.get("completions", []) - ) + if response_body: + response_body = json.loads(response_body) chat_completion_summary_dict = { "request.max_tokens": request_body.get("maxTokens", ""), "request.temperature": request_body.get("temperature", ""), - "response.choices.finish_reason": response_body["completions"][0]["finishReason"]["reason"], - "response.number_of_messages": len(message_list), - "response_id": str(response_body.get("id", "")), } + + if response_body: + message_list = [{"role": "user", "content": request_body.get("prompt", "")}] + message_list.extend( + {"role": "assistant", "content": result["data"]["text"]} for result in response_body.get("completions", []) + ) + + chat_completion_summary_dict.update({ + "response.choices.finish_reason": response_body["completions"][0]["finishReason"]["reason"], + "response.number_of_messages": len(message_list), + "response_id": str(response_body.get("id", "")), + }) + else: + message_list = [] + return message_list, chat_completion_summary_dict -def extract_bedrock_claude_model(request_body, response_body): - response_body = json.loads(response_body) +def extract_bedrock_claude_model(request_body, response_body=None): request_body = json.loads(request_body) - - message_list = [ - {"role": "user", "content": request_body.get("prompt", "")}, - {"role": "assistant", "content": response_body.get("completion", "")}, - ] + if response_body: + response_body = json.loads(response_body) chat_completion_summary_dict = { "request.max_tokens": request_body.get("max_tokens_to_sample", ""), "request.temperature": request_body.get("temperature", ""), - "response.choices.finish_reason": response_body.get("stop_reason", ""), - "response.number_of_messages": len(message_list), } + + if response_body: + message_list = [ + {"role": "user", "content": request_body.get("prompt", "")}, + {"role": "assistant", "content": response_body.get("completion", "")}, + ] + + chat_completion_summary_dict.update({ + "response.choices.finish_reason": response_body.get("stop_reason", ""), + "response.number_of_messages": len(message_list), + }) + else: + message_list = [] + return message_list, chat_completion_summary_dict -def extract_bedrock_cohere_model(request_body, response_body): - response_body = json.loads(response_body) +def extract_bedrock_cohere_model(request_body, response_body=None): request_body = json.loads(request_body) - - message_list = [{"role": "user", "content": request_body.get("prompt", "")}] - message_list.extend( - {"role": "assistant", "content": result["text"]} for result in response_body.get("generations", []) - ) + if response_body: + response_body = json.loads(response_body) chat_completion_summary_dict = { "request.max_tokens": request_body.get("max_tokens", ""), "request.temperature": request_body.get("temperature", ""), - "response.choices.finish_reason": response_body["generations"][0]["finish_reason"], - "response.number_of_messages": len(message_list), - "response_id": str(response_body.get("id", "")), } + + if response_body: + message_list = [{"role": "user", "content": request_body.get("prompt", "")}] + message_list.extend( + {"role": "assistant", "content": result["text"]} for result in response_body.get("generations", []) + ) + + chat_completion_summary_dict.update({ + "request.max_tokens": request_body.get("max_tokens", ""), + "request.temperature": request_body.get("temperature", ""), + "response.choices.finish_reason": response_body["generations"][0]["finish_reason"], + "response.number_of_messages": len(message_list), + "response_id": str(response_body.get("id", "")), + }) + else: + message_list = [] + return message_list, chat_completion_summary_dict @@ -236,26 +272,10 @@ def wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): request_body = request_body.read() kwargs["body"] = request_body - ft_name = callable_name(wrapped) - with FunctionTrace(ft_name) as ft: - try: - response = wrapped(*args, **kwargs) - except Exception as exc: - try: - error_attributes = bedrock_error_attributes(exc, kwargs, instance) - ft.notice_error( - attributes=error_attributes, - ) - finally: - raise - - if not response: - return response - # Determine model to be used with extractor model = kwargs.get("modelId") if not model: - return response + return wrapped(*args, **kwargs) # Determine extractor by model type for extractor_name, extractor in MODEL_EXTRACTORS: @@ -271,7 +291,24 @@ def wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): model, ) UNSUPPORTED_MODEL_WARNING_SENT = True + + extractor = lambda *args: ([], {}) # Empty extractor that returns nothing + ft_name = callable_name(wrapped) + with FunctionTrace(ft_name) as ft: + try: + response = wrapped(*args, **kwargs) + except Exception as exc: + try: + error_attributes = extractor(request_body) + error_attributes = bedrock_error_attributes(exc, kwargs, instance, extractor) + ft.notice_error( + attributes=error_attributes, + ) + finally: + raise + + if not response: return response # Read and replace response streaming bodies diff --git a/tests/external_botocore/_mock_external_bedrock_server.py b/tests/external_botocore/_mock_external_bedrock_server.py index 378a3756bd..42e430a124 100644 --- a/tests/external_botocore/_mock_external_bedrock_server.py +++ b/tests/external_botocore/_mock_external_bedrock_server.py @@ -30,10 +30,50 @@ RESPONSES = { "ai21.j2-mid-v1::Invalid Token": [ - {"Content-Type": "application/json", "x-amzn-RequestId": "b393ba08-c838-4503-a489-a43bf9bbaccd"}, + { + "Content-Type": "application/json", + "x-amzn-RequestId": "9021791d-3797-493d-9277-e33aa6f6d544", + "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", + }, + 403, + {"message": "The security token included in the request is invalid."}, + ], + "amazon.titan-text-express-v1::Invalid Token": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "15b39c8b-8e85-42c9-9623-06720301bda3", + "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", + }, + 403, + {"message": "The security token included in the request is invalid."}, + ], + "anthropic.claude-instant-v1::Human: Invalid Token Assistant:": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "37396f55-b721-4bae-9461-4c369f5a080d", + "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", + }, 403, {"message": "The security token included in the request is invalid."}, ], + "cohere.command-text-v14::Invalid Token": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "22476490-a0d6-42db-b5ea-32d0b8a7f751", + "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", + }, + 403, + {"message": "The security token included in the request is invalid."}, + ], + "does-not-exist::": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "f4908827-3db9-4742-9103-2bbc34578b03", + "x-amzn-ErrorType": "ValidationException:http://internal.amazon.com/coral/com.amazon.bedrock/", + }, + 400, + {"message": "The provided model identifier is invalid."}, + ], "ai21.j2-mid-v1::What is 212 degrees Fahrenheit converted to Celsius?": [ {"Content-Type": "application/json", "x-amzn-RequestId": "c863d9fc-888b-421c-a175-ac5256baec62"}, 200, @@ -3299,11 +3339,6 @@ "inputTextTokenCount": 6, }, ], - "amazon.titan-text-express-v1::Invalid Token": [ - {"Content-Type": "application/json", "x-amzn-RequestId": "041a580c-c3a4-4d99-aafc-00dc0698da5a"}, - 403, - {"message": "The security token included in the request is invalid."}, - ], "amazon.titan-text-express-v1::What is 212 degrees Fahrenheit converted to Celsius?": [ {"Content-Type": "application/json", "x-amzn-RequestId": "03524118-8d77-430f-9e08-63b5c03a40cf"}, 200, @@ -3312,17 +3347,12 @@ "results": [ { "tokenCount": 75, - "outputText": "\nUse the formula,\n°C = °F - 32) x (5/9)\n= 212 - 32 x (5/9)\n= 212 - 16.11\n= 195.89\n\nThe answer is 195.89 degrees Celsius.", + "outputText": "\nUse the formula,\n°C = (°F - 32) x 5/9\n= 212 x 5/9\n= 100 degrees Celsius\n212 degrees Fahrenheit is 100 degrees Celsius.", "completionReason": "FINISH", } ], }, ], - "anthropic.claude-instant-v1::Human: Invalid Token Assistant:": [ - {"Content-Type": "application/json", "x-amzn-RequestId": "cdc4ea3e-8724-45e3-97a3-9c2dec3376ca"}, - 403, - {"message": "The security token included in the request is invalid."}, - ], "anthropic.claude-instant-v1::Human: What is 212 degrees Fahrenheit converted to Celsius? Assistant:": [ {"Content-Type": "application/json", "x-amzn-RequestId": "7b0b37c6-85fb-4664-8f5b-361ca7b1aa18"}, 200, @@ -3332,11 +3362,6 @@ "stop": "\n\nHuman:", }, ], - "cohere.command-text-v14::Invalid Token": [ - {"Content-Type": "application/json", "x-amzn-RequestId": "cc797330-6fc2-4570-a3c2-f60ef63d37b0"}, - 403, - {"message": "The security token included in the request is invalid."}, - ], "cohere.command-text-v14::What is 212 degrees Fahrenheit converted to Celsius?": [ {"Content-Type": "application/json", "x-amzn-RequestId": "e77422c8-fbbf-4e17-afeb-c758425c9f97"}, 200, diff --git a/tests/external_botocore/_test_bedrock_chat_completion.py b/tests/external_botocore/_test_bedrock_chat_completion.py index 3987919bd0..bb4e479bdd 100644 --- a/tests/external_botocore/_test_bedrock_chat_completion.py +++ b/tests/external_botocore/_test_bedrock_chat_completion.py @@ -16,13 +16,13 @@ "transaction_id": None, "span_id": "span-id", "trace_id": "trace-id", - "request_id": "660d4de9-6804-460e-8556-4ab2a019d1e3", + "request_id": "03524118-8d77-430f-9e08-63b5c03a40cf", "api_key_last_four_digits": "CRET", "duration": None, # Response time varies each test run "request.model": "amazon.titan-text-express-v1", "response.model": "amazon.titan-text-express-v1", - "response.usage.completion_tokens": 55, - "response.usage.total_tokens": 67, + "response.usage.completion_tokens": 75, + "response.usage.total_tokens": 87, "response.usage.prompt_tokens": 12, "request.temperature": 0.7, "request.max_tokens": 100, @@ -38,7 +38,7 @@ "id": None, # UUID that varies with each run "appName": "Python Agent Test (external_botocore)", "conversation_id": "my-awesome-id", - "request_id": "660d4de9-6804-460e-8556-4ab2a019d1e3", + "request_id": "03524118-8d77-430f-9e08-63b5c03a40cf", "span_id": "span-id", "trace_id": "trace-id", "transaction_id": None, @@ -57,7 +57,7 @@ "id": None, # UUID that varies with each run "appName": "Python Agent Test (external_botocore)", "conversation_id": "my-awesome-id", - "request_id": "660d4de9-6804-460e-8556-4ab2a019d1e3", + "request_id": "03524118-8d77-430f-9e08-63b5c03a40cf", "span_id": "span-id", "trace_id": "trace-id", "transaction_id": None, @@ -81,7 +81,7 @@ "transaction_id": None, "span_id": "span-id", "trace_id": "trace-id", - "request_id": "3bf1bb6b-b6f0-4901-85a1-2fa0e814440e", + "request_id": "c863d9fc-888b-421c-a175-ac5256baec62", "response_id": "1234", "api_key_last_four_digits": "CRET", "duration": None, # Response time varies each test run @@ -101,7 +101,7 @@ "id": "1234-0", "appName": "Python Agent Test (external_botocore)", "conversation_id": "my-awesome-id", - "request_id": "3bf1bb6b-b6f0-4901-85a1-2fa0e814440e", + "request_id": "c863d9fc-888b-421c-a175-ac5256baec62", "span_id": "span-id", "trace_id": "trace-id", "transaction_id": None, @@ -120,7 +120,7 @@ "id": "1234-1", "appName": "Python Agent Test (external_botocore)", "conversation_id": "my-awesome-id", - "request_id": "3bf1bb6b-b6f0-4901-85a1-2fa0e814440e", + "request_id": "c863d9fc-888b-421c-a175-ac5256baec62", "span_id": "span-id", "trace_id": "trace-id", "transaction_id": None, @@ -144,7 +144,7 @@ "transaction_id": None, "span_id": "span-id", "trace_id": "trace-id", - "request_id": "f354b9a7-9eac-4f50-a8d7-7d5d23566176", + "request_id": "7b0b37c6-85fb-4664-8f5b-361ca7b1aa18", "api_key_last_four_digits": "CRET", "duration": None, # Response time varies each test run "request.model": "anthropic.claude-instant-v1", @@ -163,7 +163,7 @@ "id": None, # UUID that varies with each run "appName": "Python Agent Test (external_botocore)", "conversation_id": "my-awesome-id", - "request_id": "f354b9a7-9eac-4f50-a8d7-7d5d23566176", + "request_id": "7b0b37c6-85fb-4664-8f5b-361ca7b1aa18", "span_id": "span-id", "trace_id": "trace-id", "transaction_id": None, @@ -182,11 +182,11 @@ "id": None, # UUID that varies with each run "appName": "Python Agent Test (external_botocore)", "conversation_id": "my-awesome-id", - "request_id": "f354b9a7-9eac-4f50-a8d7-7d5d23566176", + "request_id": "7b0b37c6-85fb-4664-8f5b-361ca7b1aa18", "span_id": "span-id", "trace_id": "trace-id", "transaction_id": None, - "content": " Here are the step-by-step workings:\n1) 212 degrees Fahrenheit \n2) To convert to Celsius, use the formula: C = (F - 32) * 5/9\n3) Plug in the values: C = (212 - 32) * 5/9 = 100 * 5/9 = 100 degrees Celsius\n\nSo, 212 degrees Fahrenheit converted to Celsius is", + "content": " Okay, here are the conversion steps:\n212 degrees Fahrenheit\n- Subtract 32 from 212 to get 180 (to convert from Fahrenheit to Celsius scale)\n- Multiply by 5/9 (because the formula is °C = (°F - 32) × 5/9)\n- 180 × 5/9 = 100\n\nSo 212 degrees Fahrenheit c", "role": "assistant", "completion_id": None, "sequence": 1, @@ -206,7 +206,7 @@ "transaction_id": None, "span_id": "span-id", "trace_id": "trace-id", - "request_id": "c5188fb5-dc58-4cbe-948d-af173c69ce0d", + "request_id": "e77422c8-fbbf-4e17-afeb-c758425c9f97", "response_id": None, # UUID that varies with each run "api_key_last_four_digits": "CRET", "duration": None, # Response time varies each test run @@ -226,7 +226,7 @@ "id": None, # UUID that varies with each run "appName": "Python Agent Test (external_botocore)", "conversation_id": "my-awesome-id", - "request_id": "c5188fb5-dc58-4cbe-948d-af173c69ce0d", + "request_id": "e77422c8-fbbf-4e17-afeb-c758425c9f97", "span_id": "span-id", "trace_id": "trace-id", "transaction_id": None, @@ -245,11 +245,11 @@ "id": None, # UUID that varies with each run "appName": "Python Agent Test (external_botocore)", "conversation_id": "my-awesome-id", - "request_id": "c5188fb5-dc58-4cbe-948d-af173c69ce0d", + "request_id": "e77422c8-fbbf-4e17-afeb-c758425c9f97", "span_id": "span-id", "trace_id": "trace-id", "transaction_id": None, - "content": " To convert 212 degrees Fahrenheit to Celsius, we can use the conversion factor that Celsius is equal to (Fahrenheit - 32) x 5/9. \\n\\nApplying this formula, we have:\\n212°F = (212°F - 32) x 5/9\\n= (180) x 5/9\\n= 100°C.\\n\\nTherefore, 212 degrees F", + "content": " 212°F is equivalent to 100°C. \n\nFahrenheit and Celsius are two temperature scales commonly used in everyday life. The Fahrenheit scale is based on 32°F for the freezing point of water and 212°F for the boiling point of water. On the other hand, the C", "role": "assistant", "completion_id": None, "sequence": 1, @@ -264,7 +264,7 @@ chat_completion_expected_client_errors = { "amazon.titan-text-express-v1": { "conversation_id": "my-awesome-id", - "request.id": "041a580c-c3a4-4d99-aafc-00dc0698da5a", + "request.id": "15b39c8b-8e85-42c9-9623-06720301bda3", "api_key_last_four_digits": "-KEY", "request.model": "amazon.titan-text-express-v1", "request.temperature": 0.7, @@ -273,19 +273,45 @@ "ingest_source": "Python", "http.statusCode": 403, "error.message": "The security token included in the request is invalid.", - "error.code": "403", + "error.code": "UnrecognizedClientException", }, "ai21.j2-mid-v1": { "conversation_id": "my-awesome-id", - "request.id": "041a580c-c3a4-4d99-aafc-00dc0698da5a", + "request.id": "9021791d-3797-493d-9277-e33aa6f6d544", "api_key_last_four_digits": "-KEY", - "request.model": "amazon.titan-text-express-v1", + "request.model": "ai21.j2-mid-v1", + "request.temperature": 0.7, + "request.max_tokens": 100, + "vendor": "Bedrock", + "ingest_source": "Python", + "http.statusCode": 403, + "error.message": "The security token included in the request is invalid.", + "error.code": "UnrecognizedClientException", + }, + "anthropic.claude-instant-v1": { + "conversation_id": "my-awesome-id", + "request.id": "37396f55-b721-4bae-9461-4c369f5a080d", + "api_key_last_four_digits": "-KEY", + "request.model": "anthropic.claude-instant-v1", "request.temperature": 0.7, "request.max_tokens": 100, "vendor": "Bedrock", "ingest_source": "Python", "http.statusCode": 403, "error.message": "The security token included in the request is invalid.", - "error.code": "403", - } + "error.code": "UnrecognizedClientException", + }, + "cohere.command-text-v14": { + "conversation_id": "my-awesome-id", + "request.id": "22476490-a0d6-42db-b5ea-32d0b8a7f751", + "api_key_last_four_digits": "-KEY", + "request.model": "cohere.command-text-v14", + "request.temperature": 0.7, + "request.max_tokens": 100, + "vendor": "Bedrock", + "ingest_source": "Python", + "http.statusCode": 403, + "error.message": "The security token included in the request is invalid.", + "error.code": "UnrecognizedClientException", + }, } diff --git a/tests/external_botocore/_test_bedrock_embeddings.py b/tests/external_botocore/_test_bedrock_embeddings.py index fe4b4b839a..ddb35565b7 100644 --- a/tests/external_botocore/_test_bedrock_embeddings.py +++ b/tests/external_botocore/_test_bedrock_embeddings.py @@ -18,7 +18,7 @@ "duration": None, # Response time varies each test run "response.model": "amazon.titan-embed-text-v1", "request.model": "amazon.titan-embed-text-v1", - "request_id": "75f1d3fe-6cde-4cf5-bdaf-7101f746ccfe", + "request_id": "11233989-07e8-4ecb-9ba6-79601ba6d8cc", "response.usage.total_tokens": 6, "response.usage.prompt_tokens": 6, "vendor": "bedrock", @@ -40,7 +40,7 @@ "duration": None, # Response time varies each test run "response.model": "amazon.titan-embed-g1-text-02", "request.model": "amazon.titan-embed-g1-text-02", - "request_id": "f7e78265-6b7c-4b3a-b750-0c1d00347258", + "request_id": "b10ac895-eae3-4f07-b926-10b2866c55ed", "response.usage.total_tokens": 6, "response.usage.prompt_tokens": 6, "vendor": "bedrock", diff --git a/tests/external_botocore/conftest.py b/tests/external_botocore/conftest.py index b39a13c53e..e0e83329b2 100644 --- a/tests/external_botocore/conftest.py +++ b/tests/external_botocore/conftest.py @@ -98,13 +98,13 @@ def bedrock_server(): yield client # Run tests # Write responses to audit log - BEDROCK_AUDIT_LOG_CONTENTS = dict(sorted(BEDROCK_AUDIT_LOG_CONTENTS.items(), key=lambda i: i[0])) + bedrock_audit_log_contents = dict(sorted(BEDROCK_AUDIT_LOG_CONTENTS.items(), key=lambda i: i[0])) with open(BEDROCK_AUDIT_LOG_FILE, "w") as audit_log_fp: - json.dump(BEDROCK_AUDIT_LOG_CONTENTS, fp=audit_log_fp, indent=4) + json.dump(bedrock_audit_log_contents, fp=audit_log_fp, indent=4) # Intercept outgoing requests and log to file for mocking -RECORDED_HEADERS = set(["x-amzn-requestid", "content-type"]) +RECORDED_HEADERS = set(["x-amzn-requestid", "x-amzn-errortype", "content-type"]) def wrap_botocore_endpoint_Endpoint__do_get_response(wrapped, instance, args, kwargs): diff --git a/tests/external_botocore/test_bedrock_chat_completion.py b/tests/external_botocore/test_bedrock_chat_completion.py index 9a99f7a6a0..1ad87b0a4b 100644 --- a/tests/external_botocore/test_bedrock_chat_completion.py +++ b/tests/external_botocore/test_bedrock_chat_completion.py @@ -176,37 +176,36 @@ def test_bedrock_chat_completion_disabled_settings(set_trace_info, exercise_mode # No prompt provided -@dt_enabled -@reset_core_stats_engine() -# @validate_error_trace_attributes( -# callable_name(botocore.InvalidRequestError), -# exact_attrs={ -# "agent": {}, -# "intrinsic": {}, -# "user": { -# # "api_key_last_four_digits": "sk-CRET", -# # "request.temperature": 0.7, -# # "request.max_tokens": 100, -# # "vendor": "openAI", -# # "ingest_source": "Python", -# # "response.number_of_messages": 2, -# # "error.param": "engine", -# }, -# }, -# ) -# @validate_span_events( -# exact_agents={ -# # "error.message": "Must provide an 'engine' or 'model' parameter to create a ", -# } -# ) -def test_bedrock_chat_completion_error_no_prompt(exercise_model_no_prompt, set_trace_info): - @background_task() - def _test(): - set_trace_info() - add_custom_attribute("conversation_id", "my-awesome-id") - exercise_model_no_prompt(temperature=0.7, max_tokens=100) - _test() +@validate_error_trace_attributes( + "botocore.errorfactory:ValidationException", + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "conversation_id": "my-awesome-id", + "request.id": "f4908827-3db9-4742-9103-2bbc34578b03", + "api_key_last_four_digits": "CRET", + "request.model": "does-not-exist", + "vendor": "Bedrock", + "ingest_source": "Python", + "http.statusCode": 400, + "error.message": "The provided model identifier is invalid.", + "error.code": "ValidationException", + }, + }, +) +@background_task() +def test_bedrock_chat_completion_error_invalid_model(bedrock_server, set_trace_info): + set_trace_info() + add_custom_attribute("conversation_id", "my-awesome-id") + with pytest.raises(_client_error): + bedrock_server.invoke_model( + body=b"{}", + modelId="does-not-exist", + accept="application/json", + contentType="application/json", + ) @dt_enabled diff --git a/tests/external_botocore/test_bedrock_chat_completion_error.py b/tests/external_botocore/test_bedrock_chat_completion_error.py deleted file mode 100644 index 209b5ea95b..0000000000 --- a/tests/external_botocore/test_bedrock_chat_completion_error.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright 2010 New Relic, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -from io import BytesIO - -import botocore -import pytest -from _test_bedrock_chat_completion import ( - chat_completion_expected_events, - chat_completion_payload_templates, -) -from test_bedrock_chat_completion import ( - exercise_model, - is_file_payload, - model_id, -) -from testing_support.fixtures import ( - dt_enabled, - override_application_settings, - reset_core_stats_engine, -) -from testing_support.validators.validate_error_trace_attributes import ( - validate_error_trace_attributes, -) -from testing_support.validators.validate_span_events import validate_span_events - -from newrelic.api.background_task import background_task -from newrelic.api.time_trace import current_trace -from newrelic.api.transaction import add_custom_attribute, current_transaction -from newrelic.common.object_names import callable_name - -_test_bedrock_chat_completion_prompt = "What is 212 degrees Fahrenheit converted to Celsius?" - -chat_completion_payload_templates_no_prompt = { - "amazon.titan-text-express-v1": '{ "textGenerationConfig": {"temperature": %f, "maxTokenCount": %d }}', - "ai21.j2-mid-v1": '{"temperature": %f, "maxTokens": %d}', - "cohere.command-text-v14": '{"temperature": %f, "max_tokens": %d}', -} - - -@pytest.fixture(scope="function") -def exercise_model_no_prompt(bedrock_server, model_id, is_file_payload): - payload_template = chat_completion_payload_templates_no_prompt[model_id] - - def _exercise_model(temperature=0.7, max_tokens=100): - breakpoint() - body = (payload_template % (temperature, max_tokens)).encode("utf-8") - if is_file_payload: - body = BytesIO(body) - - bedrock_server.invoke_model( - body=body, - modelId=model_id, - accept="application/json", - contentType="application/json", - ) - - return _exercise_model - - -# No prompt provided -@dt_enabled -@reset_core_stats_engine() -# @validate_error_trace_attributes( -# callable_name(botocore.InvalidRequestError), -# exact_attrs={ -# "agent": {}, -# "intrinsic": {}, -# "user": { -# # "api_key_last_four_digits": "sk-CRET", -# # "request.temperature": 0.7, -# # "request.max_tokens": 100, -# # "vendor": "openAI", -# # "ingest_source": "Python", -# # "response.number_of_messages": 2, -# # "error.param": "engine", -# }, -# }, -# ) -# @validate_span_events( -# exact_agents={ -# # "error.message": "Must provide an 'engine' or 'model' parameter to create a ", -# } -# ) -def test_bedrock_chat_completion_no_prompt(exercise_model_no_prompt): - @background_task() - def _test(): - set_trace_info() - add_custom_attribute("conversation_id", "my-awesome-id") - exercise_model_no_prompt(temperature=0.7, max_tokens=100) - - _test() - - -@dt_enabled -@reset_core_stats_engine() -@validate_error_trace_attributes( - callable_name(botocore.InvalidSignatureException), - exact_attrs={ - "agent": {}, - "intrinsic": {}, - "user": { - # "request.id": "b61f5406-5955-4dc9-915c-9ae1bedda182", # This will change - # "api_key_last_four_digits": "sk-CRET", - # "request.model": None, # Grab from payload templates - "request.temperature": 0.7, - "request.max_tokens": 100, - "vendor": "Bedrock", - "ingest_source": "Python", - "http.statusCode": 403, - "error.message": "The request signature we calculated does not match the signature you provided. Check your AWS Secret Access Key and signing method. Consult the service documentation for details.", - "error.code": "InvalidSignatureException", - }, - }, -) -def test_bedrock_chat_completion_incorrect_secret_access_key(exercise_model): - @background_task() - def _test(): - with pytest.raises(botocore.InvalidSignatureException): # not sure where this exception actually comes from - set_trace_info() - add_custom_attribute("conversation_id", "my-awesome-id") - exercise_model(prompt=_test_bedrock_chat_completion_prompt, temperature=0.7, max_tokens=100) - - _test() - - -# @reset_core_stats_engine() -# def test_bedrock_chat_completion_in_txn(exercise_model, expected_events): -# @background_task() -# def _test(): -# set_trace_info() -# add_custom_attribute("conversation_id", "my-awesome-id") -# exercise_model(prompt=_test_bedrock_chat_completion_prompt, temperature=0.7, max_tokens=100) - -# _test() From b37668df57c0621b1afa9d3cbb4234639a8d486c Mon Sep 17 00:00:00 2001 From: Tim Pansino Date: Wed, 8 Nov 2023 11:53:56 -0800 Subject: [PATCH 12/16] Adding embedding error tracing --- newrelic/hooks/external_botocore.py | 12 +- .../_mock_external_bedrock_server.py | 112 +++++++++++------- .../_test_bedrock_chat_completion.py | 8 +- .../_test_bedrock_embeddings.py | 23 ++++ tests/external_botocore/conftest.py | 2 +- .../test_bedrock_chat_completion.py | 2 +- .../test_bedrock_embeddings.py | 46 ++++++- 7 files changed, 145 insertions(+), 60 deletions(-) diff --git a/newrelic/hooks/external_botocore.py b/newrelic/hooks/external_botocore.py index a28082e4a8..618c3a32a6 100644 --- a/newrelic/hooks/external_botocore.py +++ b/newrelic/hooks/external_botocore.py @@ -64,7 +64,7 @@ def bedrock_error_attributes(exception, request_args, client, extractor): error_attributes = extractor(request_body)[1] error_attributes.update({ - "request.id": response.get("ResponseMetadata", "").get("RequestId", ""), + "request_id": response.get("ResponseMetadata", {}).get("RequestId", ""), "api_key_last_four_digits": client._request_signer._credentials.access_key[-4:], "request.model": request_args.get("modelId", ""), "vendor": "Bedrock", @@ -152,9 +152,11 @@ def extract_bedrock_titan_text_model(request_body, response_body=None): def extract_bedrock_titan_embedding_model(request_body, response_body=None): + if not response_body: + return [], {} # No extracted information necessary for embedding + request_body = json.loads(request_body) - if response_body: - response_body = json.loads(response_body) + response_body = json.loads(response_body) input_tokens = response_body.get("inputTextTokenCount", None) @@ -163,7 +165,7 @@ def extract_bedrock_titan_embedding_model(request_body, response_body=None): "response.usage.prompt_tokens": input_tokens, "response.usage.total_tokens": input_tokens, } - return embedding_dict + return [], embedding_dict def extract_bedrock_ai21_j2_model(request_body, response_body=None): @@ -332,7 +334,7 @@ def handle_embedding_event(client, transaction, extractor, model, response_body, request_id = response_headers.get("x-amzn-requestid", "") settings = transaction.settings if transaction.settings is not None else global_settings() - embedding_dict = extractor(request_body, response_body) + _, embedding_dict = extractor(request_body, response_body) embedding_dict.update({ "vendor": "bedrock", diff --git a/tests/external_botocore/_mock_external_bedrock_server.py b/tests/external_botocore/_mock_external_bedrock_server.py index 42e430a124..da5ff68dd9 100644 --- a/tests/external_botocore/_mock_external_bedrock_server.py +++ b/tests/external_botocore/_mock_external_bedrock_server.py @@ -29,51 +29,6 @@ # 3) This app runs on a separate thread meaning it won't block the test app. RESPONSES = { - "ai21.j2-mid-v1::Invalid Token": [ - { - "Content-Type": "application/json", - "x-amzn-RequestId": "9021791d-3797-493d-9277-e33aa6f6d544", - "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", - }, - 403, - {"message": "The security token included in the request is invalid."}, - ], - "amazon.titan-text-express-v1::Invalid Token": [ - { - "Content-Type": "application/json", - "x-amzn-RequestId": "15b39c8b-8e85-42c9-9623-06720301bda3", - "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", - }, - 403, - {"message": "The security token included in the request is invalid."}, - ], - "anthropic.claude-instant-v1::Human: Invalid Token Assistant:": [ - { - "Content-Type": "application/json", - "x-amzn-RequestId": "37396f55-b721-4bae-9461-4c369f5a080d", - "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", - }, - 403, - {"message": "The security token included in the request is invalid."}, - ], - "cohere.command-text-v14::Invalid Token": [ - { - "Content-Type": "application/json", - "x-amzn-RequestId": "22476490-a0d6-42db-b5ea-32d0b8a7f751", - "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", - }, - 403, - {"message": "The security token included in the request is invalid."}, - ], - "does-not-exist::": [ - { - "Content-Type": "application/json", - "x-amzn-RequestId": "f4908827-3db9-4742-9103-2bbc34578b03", - "x-amzn-ErrorType": "ValidationException:http://internal.amazon.com/coral/com.amazon.bedrock/", - }, - 400, - {"message": "The provided model identifier is invalid."}, - ], "ai21.j2-mid-v1::What is 212 degrees Fahrenheit converted to Celsius?": [ {"Content-Type": "application/json", "x-amzn-RequestId": "c863d9fc-888b-421c-a175-ac5256baec62"}, 200, @@ -3377,9 +3332,71 @@ "prompt": "What is 212 degrees Fahrenheit converted to Celsius?", }, ], + "does-not-exist::": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "f4908827-3db9-4742-9103-2bbc34578b03", + "x-amzn-ErrorType": "ValidationException:http://internal.amazon.com/coral/com.amazon.bedrock/", + }, + 400, + {"message": "The provided model identifier is invalid."}, + ], + "ai21.j2-mid-v1::Invalid Token": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "9021791d-3797-493d-9277-e33aa6f6d544", + "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", + }, + 403, + {"message": "The security token included in the request is invalid."}, + ], + "amazon.titan-embed-g1-text-02::Invalid Token": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "73328313-506e-4da8-af0f-51017fa6ca3f", + "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", + }, + 403, + {"message": "The security token included in the request is invalid."}, + ], + "amazon.titan-embed-text-v1::Invalid Token": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "aece6ad7-e2ff-443b-a953-ba7d385fd0cc", + "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", + }, + 403, + {"message": "The security token included in the request is invalid."}, + ], + "amazon.titan-text-express-v1::Invalid Token": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "15b39c8b-8e85-42c9-9623-06720301bda3", + "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", + }, + 403, + {"message": "The security token included in the request is invalid."}, + ], + "anthropic.claude-instant-v1::Human: Invalid Token Assistant:": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "37396f55-b721-4bae-9461-4c369f5a080d", + "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", + }, + 403, + {"message": "The security token included in the request is invalid."}, + ], + "cohere.command-text-v14::Invalid Token": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "22476490-a0d6-42db-b5ea-32d0b8a7f751", + "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", + }, + 403, + {"message": "The security token included in the request is invalid."}, + ], } - MODEL_PATH_RE = re.compile(r"/model/([^/]+)/invoke") @@ -3435,6 +3452,9 @@ def __init__(self, handler=simple_get, port=None, *args, **kwargs): if __name__ == "__main__": + # Use this to sort dict for easier future incremental updates + print("RESPONSES = %s" % dict(sorted(RESPONSES.items(), key=lambda i: (i[1][1], i[0])))) + with MockExternalBedrockServer() as server: print("MockExternalBedrockServer serving on port %s" % str(server.port)) while True: diff --git a/tests/external_botocore/_test_bedrock_chat_completion.py b/tests/external_botocore/_test_bedrock_chat_completion.py index bb4e479bdd..1a66d74e43 100644 --- a/tests/external_botocore/_test_bedrock_chat_completion.py +++ b/tests/external_botocore/_test_bedrock_chat_completion.py @@ -264,7 +264,7 @@ chat_completion_expected_client_errors = { "amazon.titan-text-express-v1": { "conversation_id": "my-awesome-id", - "request.id": "15b39c8b-8e85-42c9-9623-06720301bda3", + "request_id": "15b39c8b-8e85-42c9-9623-06720301bda3", "api_key_last_four_digits": "-KEY", "request.model": "amazon.titan-text-express-v1", "request.temperature": 0.7, @@ -277,7 +277,7 @@ }, "ai21.j2-mid-v1": { "conversation_id": "my-awesome-id", - "request.id": "9021791d-3797-493d-9277-e33aa6f6d544", + "request_id": "9021791d-3797-493d-9277-e33aa6f6d544", "api_key_last_four_digits": "-KEY", "request.model": "ai21.j2-mid-v1", "request.temperature": 0.7, @@ -290,7 +290,7 @@ }, "anthropic.claude-instant-v1": { "conversation_id": "my-awesome-id", - "request.id": "37396f55-b721-4bae-9461-4c369f5a080d", + "request_id": "37396f55-b721-4bae-9461-4c369f5a080d", "api_key_last_four_digits": "-KEY", "request.model": "anthropic.claude-instant-v1", "request.temperature": 0.7, @@ -303,7 +303,7 @@ }, "cohere.command-text-v14": { "conversation_id": "my-awesome-id", - "request.id": "22476490-a0d6-42db-b5ea-32d0b8a7f751", + "request_id": "22476490-a0d6-42db-b5ea-32d0b8a7f751", "api_key_last_four_digits": "-KEY", "request.model": "cohere.command-text-v14", "request.temperature": 0.7, diff --git a/tests/external_botocore/_test_bedrock_embeddings.py b/tests/external_botocore/_test_bedrock_embeddings.py index ddb35565b7..8fb2ceecee 100644 --- a/tests/external_botocore/_test_bedrock_embeddings.py +++ b/tests/external_botocore/_test_bedrock_embeddings.py @@ -49,3 +49,26 @@ ), ] } + +embedding_expected_client_errors = { + "amazon.titan-embed-text-v1": { + "request_id": "aece6ad7-e2ff-443b-a953-ba7d385fd0cc", + "api_key_last_four_digits": "-KEY", + "request.model": "amazon.titan-embed-text-v1", + "vendor": "Bedrock", + "ingest_source": "Python", + "http.statusCode": 403, + "error.message": "The security token included in the request is invalid.", + "error.code": "UnrecognizedClientException", + }, + "amazon.titan-embed-g1-text-02": { + "request_id": "73328313-506e-4da8-af0f-51017fa6ca3f", + "api_key_last_four_digits": "-KEY", + "request.model": "amazon.titan-embed-g1-text-02", + "vendor": "Bedrock", + "ingest_source": "Python", + "http.statusCode": 403, + "error.message": "The security token included in the request is invalid.", + "error.code": "UnrecognizedClientException", + }, +} diff --git a/tests/external_botocore/conftest.py b/tests/external_botocore/conftest.py index e0e83329b2..8b19d3ce75 100644 --- a/tests/external_botocore/conftest.py +++ b/tests/external_botocore/conftest.py @@ -98,7 +98,7 @@ def bedrock_server(): yield client # Run tests # Write responses to audit log - bedrock_audit_log_contents = dict(sorted(BEDROCK_AUDIT_LOG_CONTENTS.items(), key=lambda i: i[0])) + bedrock_audit_log_contents = dict(sorted(BEDROCK_AUDIT_LOG_CONTENTS.items(), key=lambda i: (i[1][1], i[0]))) with open(BEDROCK_AUDIT_LOG_FILE, "w") as audit_log_fp: json.dump(bedrock_audit_log_contents, fp=audit_log_fp, indent=4) diff --git a/tests/external_botocore/test_bedrock_chat_completion.py b/tests/external_botocore/test_bedrock_chat_completion.py index 1ad87b0a4b..9dcbbfdfab 100644 --- a/tests/external_botocore/test_bedrock_chat_completion.py +++ b/tests/external_botocore/test_bedrock_chat_completion.py @@ -184,7 +184,7 @@ def test_bedrock_chat_completion_disabled_settings(set_trace_info, exercise_mode "intrinsic": {}, "user": { "conversation_id": "my-awesome-id", - "request.id": "f4908827-3db9-4742-9103-2bbc34578b03", + "request_id": "f4908827-3db9-4742-9103-2bbc34578b03", "api_key_last_four_digits": "CRET", "request.model": "does-not-exist", "vendor": "Bedrock", diff --git a/tests/external_botocore/test_bedrock_embeddings.py b/tests/external_botocore/test_bedrock_embeddings.py index 022eb07599..c374dd69c5 100644 --- a/tests/external_botocore/test_bedrock_embeddings.py +++ b/tests/external_botocore/test_bedrock_embeddings.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy +import botocore.exceptions + import json from io import BytesIO import pytest -from testing_support.fixtures import ( # override_application_settings, +from testing_support.fixtures import ( + dt_enabled, override_application_settings, reset_core_stats_engine, ) @@ -26,10 +28,17 @@ from testing_support.validators.validate_transaction_metrics import ( validate_transaction_metrics, ) +from testing_support.validators.validate_span_events import validate_span_events +from testing_support.validators.validate_error_trace_attributes import ( + validate_error_trace_attributes, +) from newrelic.api.background_task import background_task -from _test_bedrock_embeddings import embedding_expected_events, embedding_payload_templates +from _test_bedrock_embeddings import embedding_expected_events, embedding_payload_templates, embedding_expected_client_errors + +from newrelic.common.object_names import callable_name + disabled_ml_insights_settings = {"ml_insights_events.enabled": False} @@ -76,6 +85,11 @@ def expected_events(model_id): return embedding_expected_events[model_id] +@pytest.fixture(scope="module") +def expected_client_error(model_id): + return embedding_expected_client_errors[model_id] + + @reset_core_stats_engine() def test_bedrock_embedding(set_trace_info, exercise_model, expected_events): @validate_ml_events(expected_events) @@ -101,6 +115,10 @@ def test_bedrock_embedding_outside_txn(exercise_model): exercise_model(prompt="This is an embedding test.") +_client_error = botocore.exceptions.ClientError +_client_error_name = callable_name(_client_error) + + @override_application_settings(disabled_ml_insights_settings) @reset_core_stats_engine() @validate_ml_event_count(count=0) @@ -115,3 +133,25 @@ def test_bedrock_embedding_outside_txn(exercise_model): def test_bedrock_embedding_disabled_settings(set_trace_info, exercise_model): set_trace_info() exercise_model(prompt="This is an embedding test.") + + +@dt_enabled +@reset_core_stats_engine() +def test_bedrock_embedding_error_incorrect_access_key(monkeypatch, bedrock_server, exercise_model, set_trace_info, expected_client_error): + @validate_error_trace_attributes( + _client_error_name, + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": expected_client_error, + }, + ) + @background_task() + def _test(): + monkeypatch.setattr(bedrock_server._request_signer._credentials, "access_key", "INVALID-ACCESS-KEY") + + with pytest.raises(_client_error): # not sure where this exception actually comes from + set_trace_info() + exercise_model(prompt="Invalid Token", temperature=0.7, max_tokens=100) + + _test() From 1eaca3766868493645fc9810ad5eff2ce21a03db Mon Sep 17 00:00:00 2001 From: Tim Pansino Date: Wed, 8 Nov 2023 12:36:34 -0800 Subject: [PATCH 13/16] Delete comment --- tests/external_botocore/test_bedrock_chat_completion.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/external_botocore/test_bedrock_chat_completion.py b/tests/external_botocore/test_bedrock_chat_completion.py index 9dcbbfdfab..a1eb881cc1 100644 --- a/tests/external_botocore/test_bedrock_chat_completion.py +++ b/tests/external_botocore/test_bedrock_chat_completion.py @@ -175,8 +175,6 @@ def test_bedrock_chat_completion_disabled_settings(set_trace_info, exercise_mode _client_error_name = callable_name(_client_error) -# No prompt provided - @validate_error_trace_attributes( "botocore.errorfactory:ValidationException", exact_attrs={ From a976e75888d44c46f83eb7be0cffc4f987d51741 Mon Sep 17 00:00:00 2001 From: Tim Pansino Date: Wed, 8 Nov 2023 15:31:29 -0800 Subject: [PATCH 14/16] Update moto --- tests/external_botocore/test_botocore_dynamodb.py | 2 +- tox.ini | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/external_botocore/test_botocore_dynamodb.py b/tests/external_botocore/test_botocore_dynamodb.py index 6ce9f12c33..932fb1743a 100644 --- a/tests/external_botocore/test_botocore_dynamodb.py +++ b/tests/external_botocore/test_botocore_dynamodb.py @@ -80,7 +80,7 @@ background_task=True, ) @background_task() -@moto.mock_dynamodb2 +@moto.mock_dynamodb def test_dynamodb(): session = botocore.session.get_session() client = session.create_client( diff --git a/tox.ini b/tox.ini index 720c301dcd..bb6102d895 100644 --- a/tox.ini +++ b/tox.ini @@ -252,12 +252,11 @@ deps = datastore_redis-redis0400: redis<4.1 external_botocore-botocorelatest: botocore external_botocore-botocorelatest: boto3 - external_botocore-botocorelatest: moto external_botocore-botocore128: botocore<1.29 external_botocore-botocore0125: botocore<1.26 - external_botocore-{py37,py38,py39,py310,py311}: moto[awslambda,ec2,iam]<3.0 + external_botocore-{py37,py38,py39,py310,py311}: moto[awslambda,ec2,iam,sqs] external_botocore-py27: rsa<4.7.1 - external_botocore-py27: moto[awslambda,ec2,iam]<2.0 + external_botocore-py27: moto[awslambda,ec2,iam,sqs]<2.0 external_feedparser-feedparser05: feedparser<6 external_feedparser-feedparser06: feedparser<7 external_httplib2: httplib2<1.0 From 45820120a035fbb5dc6c047aaff06c72021449b1 Mon Sep 17 00:00:00 2001 From: Hannah Stepanek Date: Fri, 10 Nov 2023 18:13:30 -0800 Subject: [PATCH 15/16] Fix botocore tests & re-structure --- newrelic/hooks/external_botocore.py | 366 +- .../_mock_external_bedrock_server.py | 3461 ----------------- .../_test_bedrock_chat_completion.py | 317 -- .../_test_bedrock_embeddings.py | 74 - tests/external_botocore/conftest.py | 134 +- .../test_bedrock_chat_completion.py | 229 -- .../test_bedrock_embeddings.py | 157 - 7 files changed, 5 insertions(+), 4733 deletions(-) delete mode 100644 tests/external_botocore/_mock_external_bedrock_server.py delete mode 100644 tests/external_botocore/_test_bedrock_chat_completion.py delete mode 100644 tests/external_botocore/_test_bedrock_embeddings.py delete mode 100644 tests/external_botocore/test_bedrock_chat_completion.py delete mode 100644 tests/external_botocore/test_bedrock_embeddings.py diff --git a/newrelic/hooks/external_botocore.py b/newrelic/hooks/external_botocore.py index 618c3a32a6..30c5d77088 100644 --- a/newrelic/hooks/external_botocore.py +++ b/newrelic/hooks/external_botocore.py @@ -12,25 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json -import logging -import uuid -from io import BytesIO - -from botocore.response import StreamingBody - from newrelic.api.datastore_trace import datastore_trace from newrelic.api.external_trace import ExternalTrace -from newrelic.api.function_trace import FunctionTrace from newrelic.api.message_trace import message_trace -from newrelic.api.time_trace import get_trace_linking_metadata -from newrelic.api.transaction import current_transaction -from newrelic.common.object_names import callable_name -from newrelic.common.object_wrapper import function_wrapper, wrap_function_wrapper -from newrelic.core.config import global_settings - -_logger = logging.getLogger(__name__) -UNSUPPORTED_MODEL_WARNING_SENT = False +from newrelic.common.object_wrapper import wrap_function_wrapper def extract_sqs(*args, **kwargs): @@ -55,353 +40,6 @@ def extractor_string(*args, **kwargs): return extractor_list -def bedrock_error_attributes(exception, request_args, client, extractor): - response = getattr(exception, "response", None) - if not response: - return {} - - request_body = request_args.get("body", "") - error_attributes = extractor(request_body)[1] - - error_attributes.update({ - "request_id": response.get("ResponseMetadata", {}).get("RequestId", ""), - "api_key_last_four_digits": client._request_signer._credentials.access_key[-4:], - "request.model": request_args.get("modelId", ""), - "vendor": "Bedrock", - "ingest_source": "Python", - "http.statusCode": response.get("ResponseMetadata", "").get("HTTPStatusCode", ""), - "error.message": response.get("Error", "").get("Message", ""), - "error.code": response.get("Error", "").get("Code", ""), - }) - return error_attributes - - -def create_chat_completion_message_event( - transaction, - app_name, - message_list, - chat_completion_id, - span_id, - trace_id, - request_model, - request_id, - conversation_id, - response_id="", -): - if not transaction: - return - - for index, message in enumerate(message_list): - if response_id: - id_ = "%s-%d" % (response_id, index) # Response ID was set, append message index to it. - else: - id_ = str(uuid.uuid4()) # No response IDs, use random UUID - - chat_completion_message_dict = { - "id": id_, - "appName": app_name, - "conversation_id": conversation_id, - "request_id": request_id, - "span_id": span_id, - "trace_id": trace_id, - "transaction_id": transaction._transaction_id, - "content": message.get("content", ""), - "role": message.get("role"), - "completion_id": chat_completion_id, - "sequence": index, - "response.model": request_model, - "vendor": "bedrock", - "ingest_source": "Python", - } - transaction.record_ml_event("LlmChatCompletionMessage", chat_completion_message_dict) - - -def extract_bedrock_titan_text_model(request_body, response_body=None): - request_body = json.loads(request_body) - if response_body: - response_body = json.loads(response_body) - - request_config = request_body.get("textGenerationConfig", {}) - - chat_completion_summary_dict = { - "request.max_tokens": request_config.get("maxTokenCount", ""), - "request.temperature": request_config.get("temperature", ""), - } - - if response_body: - input_tokens = response_body["inputTextTokenCount"] - completion_tokens = sum(result["tokenCount"] for result in response_body.get("results", [])) - total_tokens = input_tokens + completion_tokens - - message_list = [{"role": "user", "content": request_body.get("inputText", "")}] - message_list.extend( - {"role": "assistant", "content": result["outputText"]} for result in response_body.get("results", []) - ) - - chat_completion_summary_dict.update({ - "response.choices.finish_reason": response_body["results"][0]["completionReason"], - "response.usage.completion_tokens": completion_tokens, - "response.usage.prompt_tokens": input_tokens, - "response.usage.total_tokens": total_tokens, - "response.number_of_messages": len(message_list), - }) - else: - message_list = [] - - return message_list, chat_completion_summary_dict - - -def extract_bedrock_titan_embedding_model(request_body, response_body=None): - if not response_body: - return [], {} # No extracted information necessary for embedding - - request_body = json.loads(request_body) - response_body = json.loads(response_body) - - input_tokens = response_body.get("inputTextTokenCount", None) - - embedding_dict = { - "input": request_body.get("inputText", ""), - "response.usage.prompt_tokens": input_tokens, - "response.usage.total_tokens": input_tokens, - } - return [], embedding_dict - - -def extract_bedrock_ai21_j2_model(request_body, response_body=None): - request_body = json.loads(request_body) - if response_body: - response_body = json.loads(response_body) - - chat_completion_summary_dict = { - "request.max_tokens": request_body.get("maxTokens", ""), - "request.temperature": request_body.get("temperature", ""), - } - - if response_body: - message_list = [{"role": "user", "content": request_body.get("prompt", "")}] - message_list.extend( - {"role": "assistant", "content": result["data"]["text"]} for result in response_body.get("completions", []) - ) - - chat_completion_summary_dict.update({ - "response.choices.finish_reason": response_body["completions"][0]["finishReason"]["reason"], - "response.number_of_messages": len(message_list), - "response_id": str(response_body.get("id", "")), - }) - else: - message_list = [] - - return message_list, chat_completion_summary_dict - - -def extract_bedrock_claude_model(request_body, response_body=None): - request_body = json.loads(request_body) - if response_body: - response_body = json.loads(response_body) - - chat_completion_summary_dict = { - "request.max_tokens": request_body.get("max_tokens_to_sample", ""), - "request.temperature": request_body.get("temperature", ""), - } - - if response_body: - message_list = [ - {"role": "user", "content": request_body.get("prompt", "")}, - {"role": "assistant", "content": response_body.get("completion", "")}, - ] - - chat_completion_summary_dict.update({ - "response.choices.finish_reason": response_body.get("stop_reason", ""), - "response.number_of_messages": len(message_list), - }) - else: - message_list = [] - - return message_list, chat_completion_summary_dict - - -def extract_bedrock_cohere_model(request_body, response_body=None): - request_body = json.loads(request_body) - if response_body: - response_body = json.loads(response_body) - - chat_completion_summary_dict = { - "request.max_tokens": request_body.get("max_tokens", ""), - "request.temperature": request_body.get("temperature", ""), - } - - if response_body: - message_list = [{"role": "user", "content": request_body.get("prompt", "")}] - message_list.extend( - {"role": "assistant", "content": result["text"]} for result in response_body.get("generations", []) - ) - - chat_completion_summary_dict.update({ - "request.max_tokens": request_body.get("max_tokens", ""), - "request.temperature": request_body.get("temperature", ""), - "response.choices.finish_reason": response_body["generations"][0]["finish_reason"], - "response.number_of_messages": len(message_list), - "response_id": str(response_body.get("id", "")), - }) - else: - message_list = [] - - return message_list, chat_completion_summary_dict - - -MODEL_EXTRACTORS = [ # Order is important here, avoiding dictionaries - ("amazon.titan-embed", extract_bedrock_titan_embedding_model), - ("amazon.titan", extract_bedrock_titan_text_model), - ("ai21.j2", extract_bedrock_ai21_j2_model), - ("cohere", extract_bedrock_cohere_model), - ("anthropic.claude", extract_bedrock_claude_model), -] - - -@function_wrapper -def wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): - # Wrapped function only takes keyword arguments, no need for binding - - transaction = current_transaction() - - if not transaction: - return wrapped(*args, **kwargs) - - # Read and replace request file stream bodies - request_body = kwargs["body"] - if hasattr(request_body, "read"): - request_body = request_body.read() - kwargs["body"] = request_body - - # Determine model to be used with extractor - model = kwargs.get("modelId") - if not model: - return wrapped(*args, **kwargs) - - # Determine extractor by model type - for extractor_name, extractor in MODEL_EXTRACTORS: - if model.startswith(extractor_name): - break - else: - # Model was not found in extractor list - global UNSUPPORTED_MODEL_WARNING_SENT - if not UNSUPPORTED_MODEL_WARNING_SENT: - # Only send warning once to avoid spam - _logger.warning( - "Unsupported Amazon Bedrock model in use (%s). Upgrade to a newer version of the agent, and contact New Relic support if the issue persists.", - model, - ) - UNSUPPORTED_MODEL_WARNING_SENT = True - - extractor = lambda *args: ([], {}) # Empty extractor that returns nothing - - ft_name = callable_name(wrapped) - with FunctionTrace(ft_name) as ft: - try: - response = wrapped(*args, **kwargs) - except Exception as exc: - try: - error_attributes = extractor(request_body) - error_attributes = bedrock_error_attributes(exc, kwargs, instance, extractor) - ft.notice_error( - attributes=error_attributes, - ) - finally: - raise - - if not response: - return response - - # Read and replace response streaming bodies - response_body = response["body"].read() - response["body"] = StreamingBody(BytesIO(response_body), len(response_body)) - response_headers = response["ResponseMetadata"]["HTTPHeaders"] - - if model.startswith("amazon.titan-embed"): # Only available embedding models - handle_embedding_event(instance, transaction, extractor, model, response_body, response_headers, request_body, ft.duration) - else: - handle_chat_completion_event(instance, transaction, extractor, model, response_body, response_headers, request_body, ft.duration) - - return response - -def handle_embedding_event(client, transaction, extractor, model, response_body, response_headers, request_body, duration): - embedding_id = str(uuid.uuid4()) - available_metadata = get_trace_linking_metadata() - span_id = available_metadata.get("span.id", "") - trace_id = available_metadata.get("trace.id", "") - - request_id = response_headers.get("x-amzn-requestid", "") - settings = transaction.settings if transaction.settings is not None else global_settings() - - _, embedding_dict = extractor(request_body, response_body) - - embedding_dict.update({ - "vendor": "bedrock", - "ingest_source": "Python", - "id": embedding_id, - "appName": settings.app_name, - "span_id": span_id, - "trace_id": trace_id, - "request_id": request_id, - "transaction_id": transaction._transaction_id, - "api_key_last_four_digits": client._request_signer._credentials.access_key[-4:], - "duration": duration, - "request.model": model, - "response.model": model, - }) - - transaction.record_ml_event("LlmEmbedding", embedding_dict) - - -def handle_chat_completion_event(client, transaction, extractor, model, response_body, response_headers, request_body, duration): - custom_attrs_dict = transaction._custom_params - conversation_id = custom_attrs_dict.get("conversation_id", "") - - chat_completion_id = str(uuid.uuid4()) - available_metadata = get_trace_linking_metadata() - span_id = available_metadata.get("span.id", "") - trace_id = available_metadata.get("trace.id", "") - - request_id = response_headers.get("x-amzn-requestid", "") - settings = transaction.settings if transaction.settings is not None else global_settings() - - message_list, chat_completion_summary_dict = extractor(request_body, response_body) - response_id = chat_completion_summary_dict.get("response_id", "") - chat_completion_summary_dict.update( - { - "vendor": "bedrock", - "ingest_source": "Python", - "api_key_last_four_digits": client._request_signer._credentials.access_key[-4:], - "id": chat_completion_id, - "appName": settings.app_name, - "conversation_id": conversation_id, - "span_id": span_id, - "trace_id": trace_id, - "transaction_id": transaction._transaction_id, - "request_id": request_id, - "duration": duration, - "request.model": model, - "response.model": model, # Duplicate data required by the UI - } - ) - - transaction.record_ml_event("LlmChatCompletionSummary", chat_completion_summary_dict) - - create_chat_completion_message_event( - transaction=transaction, - app_name=settings.app_name, - message_list=message_list, - chat_completion_id=chat_completion_id, - span_id=span_id, - trace_id=trace_id, - request_model=model, - request_id=request_id, - conversation_id=conversation_id, - response_id=response_id, - ) - - CUSTOM_TRACE_POINTS = { ("sns", "publish"): message_trace("SNS", "Produce", "Topic", extract(("TopicArn", "TargetArn"), "PhoneNumber")), ("dynamodb", "put_item"): datastore_trace("DynamoDB", extract("TableName"), "put_item"), @@ -415,7 +53,6 @@ def handle_chat_completion_event(client, transaction, extractor, model, response ("sqs", "send_message"): message_trace("SQS", "Produce", "Queue", extract_sqs), ("sqs", "send_message_batch"): message_trace("SQS", "Produce", "Queue", extract_sqs), ("sqs", "receive_message"): message_trace("SQS", "Consume", "Queue", extract_sqs), - ("bedrock-runtime", "invoke_model"): wrap_bedrock_runtime_invoke_model, } @@ -447,6 +84,7 @@ def _nr_endpoint_make_request_(wrapped, instance, args, kwargs): method = request_dict.get("method", None) with ExternalTrace(library="botocore", url=url, method=method, source=wrapped) as trace: + try: trace._add_agent_attribute("aws.operation", operation_model.name) except: diff --git a/tests/external_botocore/_mock_external_bedrock_server.py b/tests/external_botocore/_mock_external_bedrock_server.py deleted file mode 100644 index da5ff68dd9..0000000000 --- a/tests/external_botocore/_mock_external_bedrock_server.py +++ /dev/null @@ -1,3461 +0,0 @@ -# Copyright 2010 New Relic, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -import re - -from testing_support.mock_external_http_server import MockExternalHTTPServer - -# This defines an external server test apps can make requests to instead of -# the real Bedrock backend. This provides 3 features: -# -# 1) This removes dependencies on external websites. -# 2) Provides a better mechanism for making an external call in a test app than -# simple calling another endpoint the test app makes available because this -# server will not be instrumented meaning we don't have to sort through -# transactions to separate the ones created in the test app and the ones -# created by an external call. -# 3) This app runs on a separate thread meaning it won't block the test app. - -RESPONSES = { - "ai21.j2-mid-v1::What is 212 degrees Fahrenheit converted to Celsius?": [ - {"Content-Type": "application/json", "x-amzn-RequestId": "c863d9fc-888b-421c-a175-ac5256baec62"}, - 200, - { - "id": 1234, - "prompt": { - "text": "What is 212 degrees Fahrenheit converted to Celsius?", - "tokens": [ - { - "generatedToken": { - "token": "▁What▁is", - "logprob": -7.446773529052734, - "raw_logprob": -7.446773529052734, - }, - "topTokens": None, - "textRange": {"start": 0, "end": 7}, - }, - { - "generatedToken": { - "token": "▁", - "logprob": -3.8046724796295166, - "raw_logprob": -3.8046724796295166, - }, - "topTokens": None, - "textRange": {"start": 7, "end": 8}, - }, - { - "generatedToken": { - "token": "212", - "logprob": -9.287349700927734, - "raw_logprob": -9.287349700927734, - }, - "topTokens": None, - "textRange": {"start": 8, "end": 11}, - }, - { - "generatedToken": { - "token": "▁degrees▁Fahrenheit", - "logprob": -7.953181743621826, - "raw_logprob": -7.953181743621826, - }, - "topTokens": None, - "textRange": {"start": 11, "end": 30}, - }, - { - "generatedToken": { - "token": "▁converted▁to", - "logprob": -6.168096542358398, - "raw_logprob": -6.168096542358398, - }, - "topTokens": None, - "textRange": {"start": 30, "end": 43}, - }, - { - "generatedToken": { - "token": "▁Celsius", - "logprob": -0.09790332615375519, - "raw_logprob": -0.09790332615375519, - }, - "topTokens": None, - "textRange": {"start": 43, "end": 51}, - }, - { - "generatedToken": { - "token": "?", - "logprob": -6.5795369148254395, - "raw_logprob": -6.5795369148254395, - }, - "topTokens": None, - "textRange": {"start": 51, "end": 52}, - }, - ], - }, - "completions": [ - { - "data": { - "text": "\n212 degrees Fahrenheit is equal to 100 degrees Celsius.", - "tokens": [ - { - "generatedToken": { - "token": "<|newline|>", - "logprob": -1.6689286894688848e-06, - "raw_logprob": -0.00015984688070602715, - }, - "topTokens": None, - "textRange": {"start": 0, "end": 1}, - }, - { - "generatedToken": { - "token": "▁", - "logprob": -0.03473362699151039, - "raw_logprob": -0.11261807382106781, - }, - "topTokens": None, - "textRange": {"start": 1, "end": 1}, - }, - { - "generatedToken": { - "token": "212", - "logprob": -0.003316262038424611, - "raw_logprob": -0.019686665385961533, - }, - "topTokens": None, - "textRange": {"start": 1, "end": 4}, - }, - { - "generatedToken": { - "token": "▁degrees▁Fahrenheit", - "logprob": -0.003579758107662201, - "raw_logprob": -0.03144374489784241, - }, - "topTokens": None, - "textRange": {"start": 4, "end": 23}, - }, - { - "generatedToken": { - "token": "▁is▁equal▁to", - "logprob": -0.0027733694296330214, - "raw_logprob": -0.027207009494304657, - }, - "topTokens": None, - "textRange": {"start": 23, "end": 35}, - }, - { - "generatedToken": { - "token": "▁", - "logprob": -0.0003392120997887105, - "raw_logprob": -0.005458095110952854, - }, - "topTokens": None, - "textRange": {"start": 35, "end": 36}, - }, - { - "generatedToken": { - "token": "100", - "logprob": -2.145764938177308e-06, - "raw_logprob": -0.00012730741582345217, - }, - "topTokens": None, - "textRange": {"start": 36, "end": 39}, - }, - { - "generatedToken": { - "token": "▁degrees▁Celsius", - "logprob": -0.31207239627838135, - "raw_logprob": -0.402545303106308, - }, - "topTokens": None, - "textRange": {"start": 39, "end": 55}, - }, - { - "generatedToken": { - "token": ".", - "logprob": -0.023684674873948097, - "raw_logprob": -0.0769972875714302, - }, - "topTokens": None, - "textRange": {"start": 55, "end": 56}, - }, - { - "generatedToken": { - "token": "<|endoftext|>", - "logprob": -0.0073706600815057755, - "raw_logprob": -0.06265579164028168, - }, - "topTokens": None, - "textRange": {"start": 56, "end": 56}, - }, - ], - }, - "finishReason": {"reason": "endoftext"}, - } - ], - }, - ], - "amazon.titan-embed-g1-text-02::This is an embedding test.": [ - {"Content-Type": "application/json", "x-amzn-RequestId": "b10ac895-eae3-4f07-b926-10b2866c55ed"}, - 200, - { - "embedding": [ - -0.14160156, - 0.034423828, - 0.54296875, - 0.10986328, - 0.053466797, - 0.3515625, - 0.12988281, - -0.0002708435, - -0.21484375, - 0.060302734, - 0.58984375, - -0.5859375, - 0.52734375, - 0.82421875, - -0.91015625, - -0.19628906, - 0.45703125, - 0.609375, - -0.67578125, - 0.39453125, - -0.46875, - -0.25390625, - -0.21191406, - 0.114746094, - 0.31640625, - -0.41015625, - -0.32617188, - -0.43554688, - 0.4765625, - -0.4921875, - 0.40429688, - 0.06542969, - 0.859375, - -0.008056641, - -0.19921875, - 0.072753906, - 0.33203125, - 0.69921875, - 0.39453125, - 0.15527344, - 0.08886719, - -0.25, - 0.859375, - 0.22949219, - -0.19042969, - 0.13769531, - -0.078125, - 0.41210938, - 0.875, - 0.5234375, - 0.59765625, - -0.22949219, - -0.22558594, - -0.47460938, - 0.37695312, - 0.51953125, - -0.5703125, - 0.46679688, - 0.43554688, - 0.17480469, - -0.080566406, - -0.16699219, - -0.734375, - -1.0625, - -0.33984375, - 0.390625, - -0.18847656, - -0.5234375, - -0.48828125, - 0.44921875, - -0.09814453, - -0.3359375, - 0.087402344, - 0.36914062, - 1.3203125, - 0.25585938, - 0.14746094, - -0.059570312, - -0.15820312, - -0.037353516, - -0.61328125, - -0.6484375, - -0.35351562, - 0.55078125, - -0.26953125, - 0.90234375, - 0.3671875, - 0.31054688, - 0.00014019012, - -0.171875, - 0.025512695, - 0.5078125, - 0.11621094, - 0.33203125, - 0.8125, - -0.3046875, - -1.078125, - -0.5703125, - 0.26171875, - -0.4609375, - 0.203125, - 0.44726562, - -0.5078125, - 0.41601562, - -0.1953125, - 0.028930664, - -0.57421875, - 0.2265625, - 0.13574219, - -0.040039062, - -0.22949219, - -0.515625, - -0.19042969, - -0.30078125, - 0.10058594, - -0.66796875, - 0.6015625, - 0.296875, - -0.765625, - -0.87109375, - 0.2265625, - 0.068847656, - -0.088378906, - -0.1328125, - -0.796875, - -0.37304688, - 0.47460938, - -0.3515625, - -0.8125, - -0.32226562, - 0.265625, - 0.3203125, - -0.4140625, - -0.49023438, - 0.859375, - -0.19140625, - -0.6328125, - 0.10546875, - -0.5625, - 0.66015625, - 0.26171875, - -0.2109375, - 0.421875, - -0.82421875, - 0.29296875, - 0.17773438, - 0.24023438, - 0.5078125, - -0.49804688, - -0.10205078, - 0.10498047, - -0.36132812, - -0.47460938, - -0.20996094, - 0.010070801, - -0.546875, - 0.66796875, - -0.123046875, - -0.75390625, - 0.19628906, - 0.17480469, - 0.18261719, - -0.96875, - -0.26171875, - 0.4921875, - -0.40039062, - 0.296875, - 0.1640625, - -0.20507812, - -0.36132812, - 0.76171875, - -1.234375, - -0.625, - 0.060058594, - -0.09375, - -0.14746094, - 1.09375, - 0.057861328, - 0.22460938, - -0.703125, - 0.07470703, - 0.23828125, - -0.083984375, - -0.54296875, - 0.5546875, - -0.5, - -0.390625, - 0.106933594, - 0.6640625, - 0.27734375, - -0.953125, - 0.35351562, - -0.7734375, - -0.77734375, - 0.16503906, - -0.42382812, - 0.36914062, - 0.020141602, - -1.3515625, - 0.18847656, - 0.13476562, - -0.034179688, - -0.03930664, - -0.03857422, - -0.027954102, - 0.73828125, - -0.18945312, - -0.09814453, - -0.46289062, - 0.36914062, - 0.033203125, - 0.020874023, - -0.703125, - 0.91796875, - 0.38671875, - 0.625, - -0.19335938, - -0.16796875, - -0.58203125, - 0.21386719, - -0.032470703, - -0.296875, - -0.15625, - -0.1640625, - -0.74609375, - 0.328125, - 0.5546875, - -0.1953125, - 1.0546875, - 0.171875, - -0.099609375, - 0.5234375, - 0.05078125, - -0.35742188, - -0.2734375, - -1.3203125, - -0.8515625, - -0.16015625, - 0.01574707, - 0.29296875, - 0.18457031, - -0.265625, - 0.048339844, - 0.045654297, - -0.32226562, - 0.087890625, - -0.0047302246, - 0.38671875, - 0.10644531, - -0.06225586, - 1.03125, - 0.94140625, - -0.3203125, - 0.20800781, - -1.171875, - 0.48046875, - -0.091796875, - 0.20800781, - -0.1328125, - -0.20507812, - 0.28125, - -0.47070312, - -0.09033203, - 0.0013809204, - -0.08203125, - 0.43359375, - -0.03100586, - -0.060791016, - -0.53515625, - -1.46875, - 0.000101566315, - 0.515625, - 0.40625, - -0.10498047, - -0.15820312, - -0.009460449, - -0.77734375, - -0.5859375, - 0.9765625, - 0.099609375, - 0.51953125, - 0.38085938, - -0.09667969, - -0.100097656, - -0.5, - -1.3125, - -0.18066406, - -0.099121094, - 0.26171875, - -0.14453125, - -0.546875, - 0.17578125, - 0.484375, - 0.765625, - 0.45703125, - 0.2734375, - 0.0028076172, - 0.17089844, - -0.32421875, - -0.37695312, - 0.30664062, - -0.48046875, - 0.07128906, - 0.031982422, - -0.31054688, - -0.055419922, - -0.29296875, - 0.3359375, - -0.296875, - 0.47851562, - -0.05126953, - 0.18457031, - -0.01953125, - -0.35742188, - 0.017944336, - -0.25, - 0.10595703, - 0.17382812, - -0.73828125, - 0.36914062, - -0.15234375, - -0.8125, - 0.17382812, - 0.048095703, - 0.5625, - -0.33789062, - 0.023071289, - -0.21972656, - 0.16015625, - 0.032958984, - -1.1171875, - -0.984375, - 0.83984375, - 0.009033203, - -0.042236328, - -0.46484375, - -0.08203125, - 0.44726562, - -0.765625, - -0.3984375, - -0.40820312, - -0.234375, - 0.044189453, - 0.119628906, - -0.7578125, - -0.55078125, - -0.4453125, - 0.7578125, - 0.34960938, - 0.96484375, - 0.35742188, - 0.36914062, - -0.35351562, - -0.36132812, - 1.109375, - 0.5859375, - 0.85546875, - -0.10644531, - -0.6953125, - -0.0066833496, - 0.042236328, - -0.06689453, - 0.36914062, - 0.9765625, - -0.3046875, - 0.59765625, - -0.6640625, - 0.21484375, - -0.07128906, - 1.1328125, - -0.51953125, - 0.86328125, - -0.11328125, - 0.15722656, - -0.36328125, - -0.04638672, - 1.4375, - 0.18457031, - -0.18359375, - 0.10595703, - -0.49023438, - -0.07324219, - -0.73046875, - -0.119140625, - 0.021118164, - 0.4921875, - -0.46875, - 0.28710938, - 0.3359375, - 0.11767578, - -0.2109375, - -0.14550781, - 0.39648438, - -0.27734375, - 0.48046875, - 0.12988281, - 0.45507812, - -0.375, - -0.84765625, - 0.25585938, - -0.36523438, - 0.8046875, - 0.42382812, - -0.24511719, - 0.54296875, - 0.71875, - 0.010009766, - -0.04296875, - 0.083984375, - -0.52734375, - 0.13964844, - -0.27539062, - -0.30273438, - 1.1484375, - -0.515625, - -0.19335938, - 0.58984375, - 0.049072266, - 0.703125, - -0.04272461, - 0.5078125, - 0.34960938, - -0.3359375, - -0.47460938, - 0.049316406, - 0.36523438, - 0.7578125, - -0.022827148, - -0.71484375, - 0.21972656, - 0.09716797, - -0.203125, - -0.36914062, - 1.34375, - 0.34179688, - 0.46679688, - 1.078125, - 0.26171875, - 0.41992188, - 0.22363281, - -0.515625, - -0.5703125, - 0.13378906, - 0.26757812, - -0.22558594, - -0.5234375, - 0.06689453, - 0.08251953, - -0.625, - 0.16796875, - 0.43164062, - -0.55859375, - 0.28125, - 0.078125, - 0.6328125, - 0.23242188, - -0.064941406, - -0.004486084, - -0.20703125, - 0.2734375, - 0.453125, - -0.734375, - 0.04272461, - 0.36132812, - -0.19628906, - -0.12402344, - 1.3515625, - 0.25585938, - 0.4921875, - -0.29296875, - -0.58984375, - 0.021240234, - -0.044677734, - 0.7578125, - -0.7890625, - 0.10253906, - -0.15820312, - -0.5078125, - -0.39453125, - -0.453125, - 0.35742188, - 0.921875, - 0.44335938, - -0.49804688, - 0.44335938, - 0.31445312, - 0.58984375, - -1.0078125, - -0.22460938, - 0.24121094, - 0.87890625, - 0.66015625, - -0.390625, - -0.05053711, - 0.059570312, - 0.36132812, - -0.00038719177, - -0.017089844, - 0.62890625, - 0.203125, - 0.17480469, - 0.025512695, - 0.47460938, - 0.3125, - 1.140625, - 0.32421875, - -0.057861328, - 0.36914062, - -0.7265625, - -0.51953125, - 0.26953125, - 0.42773438, - 0.064453125, - 0.6328125, - 0.27148438, - -0.11767578, - 0.66796875, - -0.38671875, - 0.5234375, - -0.59375, - 0.5078125, - 0.008239746, - -0.34179688, - -0.27539062, - 0.5234375, - 1.296875, - 0.29492188, - -0.010986328, - -0.41210938, - 0.59375, - 0.061767578, - -0.33398438, - -2.03125, - 0.87890625, - -0.010620117, - 0.53125, - 0.14257812, - -0.515625, - -1.03125, - 0.578125, - 0.1875, - 0.44335938, - -0.33203125, - -0.36328125, - -0.3203125, - 0.29296875, - -0.8203125, - 0.41015625, - -0.48242188, - 0.66015625, - 0.5625, - -0.16503906, - -0.54296875, - -0.38085938, - 0.26171875, - 0.62109375, - 0.29101562, - -0.31054688, - 0.23730469, - -0.8515625, - 0.5234375, - 0.15332031, - 0.52734375, - -0.079589844, - -0.080566406, - -0.15527344, - -0.022827148, - 0.030517578, - -0.1640625, - -0.421875, - 0.09716797, - 0.03930664, - -0.055908203, - -0.546875, - -0.47851562, - 0.091796875, - 0.32226562, - -0.94140625, - -0.04638672, - -1.203125, - -0.39648438, - 0.45507812, - 0.296875, - -0.45703125, - 0.37890625, - -0.122558594, - 0.28320312, - -0.01965332, - -0.11669922, - -0.34570312, - -0.53515625, - -0.091308594, - -0.9375, - -0.32617188, - 0.095214844, - -0.4765625, - 0.37890625, - -0.859375, - 1.1015625, - -0.08935547, - 0.46484375, - -0.19238281, - 0.7109375, - 0.040039062, - -0.5390625, - 0.22363281, - -0.70703125, - 0.4921875, - -0.119140625, - -0.26757812, - -0.08496094, - 0.0859375, - -0.00390625, - -0.013366699, - -0.03955078, - 0.07421875, - -0.13085938, - 0.29101562, - -0.12109375, - 0.45703125, - 0.021728516, - 0.38671875, - -0.3671875, - -0.52734375, - -0.115722656, - 0.125, - 0.5703125, - -1.234375, - 0.06298828, - -0.55859375, - 0.60546875, - 0.8125, - -0.0032958984, - -0.068359375, - -0.21191406, - 0.56640625, - 0.17285156, - -0.3515625, - 0.36328125, - -0.99609375, - 0.43554688, - -0.1015625, - 0.07080078, - -0.66796875, - 1.359375, - 0.41601562, - 0.15917969, - 0.17773438, - -0.28710938, - 0.021850586, - -0.46289062, - 0.17578125, - -0.03955078, - -0.026855469, - 0.5078125, - -0.65625, - 0.0012512207, - 0.044433594, - -0.18652344, - 0.4921875, - -0.75390625, - 0.0072021484, - 0.4375, - -0.31445312, - 0.20214844, - 0.15039062, - -0.63671875, - -0.296875, - -0.375, - -0.027709961, - 0.013427734, - 0.17089844, - 0.89453125, - 0.11621094, - -0.43945312, - -0.30859375, - 0.02709961, - 0.23242188, - -0.64453125, - -0.859375, - 0.22167969, - -0.023071289, - -0.052734375, - 0.3671875, - -0.18359375, - 0.81640625, - -0.11816406, - 0.028320312, - 0.19042969, - 0.012817383, - -0.43164062, - 0.55859375, - -0.27929688, - 0.14257812, - -0.140625, - -0.048583984, - -0.014526367, - 0.35742188, - 0.22753906, - 0.13183594, - 0.04638672, - 0.03930664, - -0.29296875, - -0.2109375, - -0.16308594, - -0.48046875, - -0.13378906, - -0.39257812, - 0.29296875, - -0.047851562, - -0.5546875, - 0.08300781, - -0.14941406, - -0.07080078, - 0.12451172, - 0.1953125, - -0.51171875, - -0.048095703, - 0.1953125, - -0.37695312, - 0.46875, - -0.084472656, - 0.19042969, - -0.39453125, - 0.69921875, - -0.0065307617, - 0.25390625, - -0.16992188, - -0.5078125, - 0.016845703, - 0.27929688, - -0.22070312, - 0.671875, - 0.18652344, - 0.25, - -0.046875, - -0.012023926, - -0.36523438, - 0.36523438, - -0.11279297, - 0.421875, - 0.079589844, - -0.100097656, - 0.37304688, - 0.29882812, - -0.10546875, - -0.36523438, - 0.040039062, - 0.546875, - 0.12890625, - -0.06542969, - -0.38085938, - -0.35742188, - -0.6484375, - -0.28515625, - 0.0107421875, - -0.055664062, - 0.45703125, - 0.33984375, - 0.26367188, - -0.23144531, - 0.012878418, - -0.875, - 0.11035156, - 0.33984375, - 0.203125, - 0.38867188, - 0.24902344, - -0.37304688, - -0.98046875, - -0.122558594, - -0.17871094, - -0.09277344, - 0.1796875, - 0.4453125, - -0.66796875, - 0.78515625, - 0.12988281, - 0.35546875, - 0.44140625, - 0.58984375, - 0.29492188, - 0.7734375, - -0.21972656, - -0.40234375, - -0.22265625, - 0.18359375, - 0.54296875, - 0.17382812, - 0.59375, - -0.390625, - -0.92578125, - -0.017456055, - -0.25, - 0.73828125, - 0.7578125, - -0.3828125, - -0.25976562, - 0.049072266, - 0.046875, - -0.3515625, - 0.30078125, - -1.03125, - -0.48828125, - 0.0017929077, - -0.26171875, - 0.20214844, - 0.29882812, - 0.064941406, - 0.21484375, - -0.55078125, - -0.021362305, - 0.12988281, - 0.27148438, - 0.38867188, - -0.19726562, - -0.55078125, - 0.1640625, - 0.32226562, - -0.72265625, - 0.36132812, - 1.21875, - -0.22070312, - -0.32421875, - -0.29882812, - 0.0024414062, - 0.19921875, - 0.734375, - 0.16210938, - 0.17871094, - -0.19140625, - 0.38476562, - -0.06591797, - -0.47070312, - -0.040039062, - -0.33007812, - -0.07910156, - -0.2890625, - 0.00970459, - 0.12695312, - -0.12060547, - -0.18847656, - 1.015625, - -0.032958984, - 0.12451172, - -0.38476562, - 0.063964844, - 1.0859375, - 0.067871094, - -0.24511719, - 0.125, - 0.10546875, - -0.22460938, - -0.29101562, - 0.24414062, - -0.017944336, - -0.15625, - -0.60546875, - -0.25195312, - -0.46875, - 0.80859375, - -0.34960938, - 0.42382812, - 0.796875, - 0.296875, - -0.067871094, - 0.39453125, - 0.07470703, - 0.033935547, - 0.24414062, - 0.32617188, - 0.023925781, - 0.73046875, - 0.2109375, - -0.43164062, - 0.14453125, - 0.63671875, - 0.21972656, - -0.1875, - -0.18066406, - -0.22167969, - -1.3359375, - 0.52734375, - -0.40625, - -0.12988281, - 0.17480469, - -0.18066406, - 0.58984375, - -0.32421875, - -0.13476562, - 0.39257812, - -0.19238281, - 0.068359375, - 0.7265625, - -0.7109375, - -0.125, - 0.328125, - 0.34179688, - -0.48828125, - -0.10058594, - -0.83984375, - 0.30273438, - 0.008239746, - -1.390625, - 0.171875, - 0.34960938, - 0.44921875, - 0.22167969, - 0.60546875, - -0.36914062, - -0.028808594, - -0.19921875, - 0.6875, - 0.52734375, - -0.07421875, - 0.35546875, - 0.546875, - 0.08691406, - 0.23339844, - -0.984375, - -0.20507812, - 0.08544922, - 0.453125, - -0.07421875, - -0.953125, - 0.74609375, - -0.796875, - 0.47851562, - 0.81640625, - -0.44921875, - -0.33398438, - -0.54296875, - 0.46484375, - -0.390625, - -0.24121094, - -0.0115356445, - 1.1328125, - 1.0390625, - 0.6484375, - 0.35742188, - -0.29492188, - -0.0007095337, - -0.060302734, - 0.21777344, - 0.15136719, - -0.6171875, - 0.11328125, - -0.025878906, - 0.19238281, - 0.140625, - 0.171875, - 0.25195312, - 0.10546875, - 0.0008354187, - -0.13476562, - -0.26953125, - 0.025024414, - -0.28320312, - -0.107910156, - 1.015625, - 0.05493164, - -0.12988281, - 0.30859375, - 0.22558594, - -0.60546875, - 0.11328125, - -1.203125, - 0.6484375, - 0.087402344, - 0.32226562, - 0.63671875, - -0.07714844, - -1.390625, - -0.71875, - -0.34179688, - -0.10546875, - -0.37304688, - -0.09863281, - -0.41210938, - -0.14941406, - 0.41210938, - -0.20898438, - 0.18261719, - 0.67578125, - 0.41601562, - 0.32617188, - 0.2421875, - -0.14257812, - -0.6796875, - 0.01953125, - 0.34179688, - 0.20800781, - -0.123046875, - 0.087402344, - 0.85546875, - 0.33984375, - 0.33203125, - -0.68359375, - 0.44921875, - 0.50390625, - 0.083496094, - 0.10888672, - -0.09863281, - 0.55078125, - 0.09765625, - -0.50390625, - 0.13378906, - -0.29882812, - 0.030761719, - -0.64453125, - 0.22949219, - 0.43945312, - 0.16503906, - 0.10888672, - -0.12792969, - -0.039794922, - -0.111328125, - -0.35742188, - 0.053222656, - -0.78125, - -0.4375, - 0.359375, - -0.88671875, - -0.21972656, - -0.053710938, - 0.91796875, - -0.10644531, - 0.55859375, - -0.7734375, - 0.5078125, - 0.46484375, - 0.32226562, - 0.16796875, - -0.28515625, - 0.045410156, - -0.45117188, - 0.38867188, - -0.33398438, - -0.5234375, - 0.296875, - 0.6015625, - 0.3515625, - -0.734375, - 0.3984375, - -0.08251953, - 0.359375, - -0.28515625, - -0.88671875, - 0.0051879883, - 0.045166016, - -0.7421875, - -0.36523438, - 0.140625, - 0.18066406, - -0.171875, - -0.15625, - -0.53515625, - 0.2421875, - -0.19140625, - -0.18066406, - 0.25390625, - 0.6875, - -0.01965332, - -0.33203125, - 0.29492188, - 0.107421875, - -0.048339844, - -0.82421875, - 0.52734375, - 0.78125, - 0.8203125, - -0.90625, - 0.765625, - 0.0390625, - 0.045410156, - 0.26367188, - -0.14355469, - -0.26367188, - 0.390625, - -0.10888672, - 0.33007812, - -0.5625, - 0.08105469, - -0.13769531, - 0.8515625, - -0.14453125, - 0.77734375, - -0.48046875, - -0.3515625, - -0.25390625, - -0.09277344, - 0.23925781, - -0.022338867, - -0.45898438, - 0.36132812, - -0.23828125, - 0.265625, - -0.48632812, - -0.46875, - -0.75390625, - 1.3125, - 0.78125, - -0.63671875, - -1.21875, - 0.5078125, - -0.27734375, - -0.118652344, - 0.041992188, - -0.14648438, - -0.8046875, - 0.21679688, - -0.79296875, - 0.28320312, - -0.09667969, - 0.42773438, - 0.49414062, - 0.44726562, - 0.21972656, - -0.02746582, - -0.03540039, - -0.14941406, - -0.515625, - -0.27929688, - 0.9609375, - -0.007598877, - 0.34765625, - -0.060546875, - -0.44726562, - 0.7421875, - 0.15332031, - 0.45117188, - -0.4921875, - 0.07080078, - 0.5625, - 0.3984375, - -0.20019531, - 0.014892578, - 0.63671875, - -0.0071411133, - 0.016357422, - 1.0625, - 0.049316406, - 0.18066406, - 0.09814453, - -0.52734375, - -0.359375, - -0.072265625, - -0.41992188, - 0.39648438, - 0.38671875, - -0.30273438, - -0.056640625, - -0.640625, - -0.44921875, - 0.49414062, - 0.29101562, - 0.49609375, - 0.40429688, - -0.10205078, - 0.49414062, - -0.28125, - -0.12695312, - -0.0022735596, - -0.37304688, - 0.122558594, - 0.07519531, - -0.12597656, - -0.38085938, - -0.19824219, - -0.40039062, - 0.56640625, - -1.140625, - -0.515625, - -0.17578125, - -0.765625, - -0.43945312, - 0.3359375, - -0.24707031, - 0.32617188, - -0.45117188, - -0.37109375, - 0.45117188, - -0.27539062, - -0.38867188, - 0.09082031, - 0.17675781, - 0.49414062, - 0.19921875, - 0.17480469, - 0.8515625, - -0.23046875, - -0.234375, - -0.28515625, - 0.10253906, - 0.29101562, - -0.3359375, - -0.203125, - 0.6484375, - 0.11767578, - -0.20214844, - -0.42382812, - 0.26367188, - 0.6328125, - 0.0059509277, - 0.08691406, - -1.5625, - -0.43554688, - 0.17675781, - 0.091796875, - -0.5234375, - -0.09863281, - 0.20605469, - 0.16601562, - -0.578125, - 0.017700195, - 0.41015625, - 1.03125, - -0.55078125, - 0.21289062, - -0.35351562, - 0.24316406, - -0.123535156, - 0.11035156, - -0.48242188, - -0.34179688, - 0.45117188, - 0.3125, - -0.071777344, - 0.12792969, - 0.55859375, - 0.063964844, - -0.21191406, - 0.01965332, - -1.359375, - -0.21582031, - -0.019042969, - 0.16308594, - -0.3671875, - -0.40625, - -1.0234375, - -0.21289062, - 0.24023438, - -0.28125, - 0.26953125, - -0.14550781, - -0.087890625, - 0.16113281, - -0.49804688, - -0.17675781, - -0.890625, - 0.27929688, - 0.484375, - 0.27148438, - 0.11816406, - 0.83984375, - 0.029052734, - -0.890625, - 0.66796875, - 0.78515625, - -0.953125, - 0.49414062, - -0.546875, - 0.106933594, - -0.08251953, - 0.2890625, - -0.1484375, - -0.85546875, - 0.32421875, - -0.0040893555, - -0.16601562, - -0.16699219, - 0.24414062, - -0.5078125, - 0.25390625, - -0.10253906, - 0.15625, - 0.140625, - -0.27539062, - -0.546875, - -0.5546875, - -0.71875, - 0.37304688, - 0.060058594, - -0.076171875, - 0.44921875, - 0.06933594, - -0.28710938, - -0.22949219, - 0.17578125, - 0.09814453, - 0.4765625, - -0.95703125, - -0.03540039, - 0.21289062, - -0.7578125, - -0.07373047, - 0.10546875, - 0.07128906, - 0.76171875, - 0.4296875, - -0.09375, - 0.27539062, - -0.55078125, - 0.29882812, - -0.42382812, - 0.32617188, - -0.39648438, - 0.12451172, - 0.16503906, - -0.22460938, - -0.65625, - -0.022094727, - 0.61328125, - -0.024780273, - 0.62109375, - -0.033447266, - 0.515625, - 0.12890625, - -0.21875, - -0.08642578, - 0.49804688, - -0.2265625, - -0.29296875, - 0.19238281, - 0.3515625, - -1.265625, - 0.57421875, - 0.20117188, - -0.28320312, - 0.1953125, - -0.30664062, - 0.2265625, - -0.11230469, - 0.83984375, - 0.111328125, - 0.265625, - 0.71484375, - -0.625, - 0.38867188, - 0.47070312, - -0.32617188, - -0.171875, - 1.0078125, - 0.19726562, - -0.118652344, - 0.63671875, - -0.068359375, - -0.25585938, - 0.4140625, - -0.29296875, - 0.21386719, - -0.064453125, - 0.15820312, - -0.89453125, - -0.16308594, - 0.48046875, - 0.14648438, - -0.5703125, - 0.84765625, - -0.19042969, - 0.03515625, - 0.42578125, - -0.27539062, - -0.5390625, - 0.95703125, - 0.2734375, - 0.16699219, - -0.328125, - 0.11279297, - 0.003250122, - 0.47265625, - -0.31640625, - 0.546875, - 0.55859375, - 0.06933594, - -0.61328125, - -0.16210938, - -0.375, - 0.100097656, - -0.088378906, - 0.12695312, - 0.079589844, - 0.123535156, - -1.0078125, - 0.6875, - 0.022949219, - -0.40039062, - -0.09863281, - 0.29101562, - -1.2890625, - -0.20996094, - 0.36328125, - -0.3515625, - 0.7890625, - 0.12207031, - 0.48046875, - -0.13671875, - -0.041015625, - 0.19824219, - 0.19921875, - 0.01171875, - -0.37695312, - -0.62890625, - 0.9375, - -0.671875, - 0.24609375, - 0.6484375, - -0.29101562, - 0.076171875, - 0.62109375, - -0.5546875, - 0.36523438, - 0.75390625, - -0.19140625, - -0.875, - -0.8203125, - -0.24414062, - -0.625, - 0.1796875, - -0.40039062, - 0.25390625, - -0.14550781, - -0.21679688, - -0.828125, - 0.3359375, - 0.43554688, - 0.55078125, - -0.44921875, - -0.28710938, - 0.24023438, - 0.18066406, - -0.6953125, - 0.020385742, - -0.11376953, - 0.13867188, - -0.92578125, - 0.33398438, - -0.328125, - 0.78125, - -0.45507812, - -0.07470703, - 0.34179688, - 0.07080078, - 0.76171875, - 0.37890625, - -0.10644531, - 0.90234375, - -0.21875, - -0.15917969, - -0.36132812, - 0.2109375, - -0.45703125, - -0.76953125, - 0.21289062, - 0.26367188, - 0.49804688, - 0.35742188, - -0.20019531, - 0.31054688, - 0.34179688, - 0.17089844, - -0.15429688, - 0.39648438, - -0.5859375, - 0.20996094, - -0.40039062, - 0.5703125, - -0.515625, - 0.5234375, - 0.049560547, - 0.328125, - 0.24804688, - 0.42578125, - 0.609375, - 0.19238281, - 0.27929688, - 0.19335938, - 0.78125, - -0.9921875, - 0.23925781, - -1.3828125, - -0.22949219, - -0.578125, - -0.13964844, - -0.17382812, - -0.011169434, - 0.26171875, - -0.73046875, - -1.4375, - 0.6953125, - -0.7421875, - 0.052246094, - 0.12207031, - 1.3046875, - 0.38867188, - 0.040283203, - -0.546875, - -0.0021514893, - 0.18457031, - -0.5546875, - -0.51171875, - -0.16308594, - -0.104003906, - -0.38867188, - -0.20996094, - -0.8984375, - 0.6015625, - -0.30078125, - -0.13769531, - 0.16113281, - 0.58203125, - -0.23730469, - -0.125, - -1.0234375, - 0.875, - -0.7109375, - 0.29101562, - 0.09667969, - -0.3203125, - -0.48046875, - 0.37890625, - 0.734375, - -0.28710938, - -0.29882812, - -0.05493164, - 0.34765625, - -0.84375, - 0.65625, - 0.578125, - -0.20019531, - 0.13769531, - 0.10058594, - -0.37109375, - 0.36523438, - -0.22167969, - 0.72265625, - ], - "inputTextTokenCount": 6, - }, - ], - "amazon.titan-embed-text-v1::This is an embedding test.": [ - {"Content-Type": "application/json", "x-amzn-RequestId": "11233989-07e8-4ecb-9ba6-79601ba6d8cc"}, - 200, - { - "embedding": [ - -0.14160156, - 0.034423828, - 0.54296875, - 0.10986328, - 0.053466797, - 0.3515625, - 0.12988281, - -0.0002708435, - -0.21484375, - 0.060302734, - 0.58984375, - -0.5859375, - 0.52734375, - 0.82421875, - -0.91015625, - -0.19628906, - 0.45703125, - 0.609375, - -0.67578125, - 0.39453125, - -0.46875, - -0.25390625, - -0.21191406, - 0.114746094, - 0.31640625, - -0.41015625, - -0.32617188, - -0.43554688, - 0.4765625, - -0.4921875, - 0.40429688, - 0.06542969, - 0.859375, - -0.008056641, - -0.19921875, - 0.072753906, - 0.33203125, - 0.69921875, - 0.39453125, - 0.15527344, - 0.08886719, - -0.25, - 0.859375, - 0.22949219, - -0.19042969, - 0.13769531, - -0.078125, - 0.41210938, - 0.875, - 0.5234375, - 0.59765625, - -0.22949219, - -0.22558594, - -0.47460938, - 0.37695312, - 0.51953125, - -0.5703125, - 0.46679688, - 0.43554688, - 0.17480469, - -0.080566406, - -0.16699219, - -0.734375, - -1.0625, - -0.33984375, - 0.390625, - -0.18847656, - -0.5234375, - -0.48828125, - 0.44921875, - -0.09814453, - -0.3359375, - 0.087402344, - 0.36914062, - 1.3203125, - 0.25585938, - 0.14746094, - -0.059570312, - -0.15820312, - -0.037353516, - -0.61328125, - -0.6484375, - -0.35351562, - 0.55078125, - -0.26953125, - 0.90234375, - 0.3671875, - 0.31054688, - 0.00014019012, - -0.171875, - 0.025512695, - 0.5078125, - 0.11621094, - 0.33203125, - 0.8125, - -0.3046875, - -1.078125, - -0.5703125, - 0.26171875, - -0.4609375, - 0.203125, - 0.44726562, - -0.5078125, - 0.41601562, - -0.1953125, - 0.028930664, - -0.57421875, - 0.2265625, - 0.13574219, - -0.040039062, - -0.22949219, - -0.515625, - -0.19042969, - -0.30078125, - 0.10058594, - -0.66796875, - 0.6015625, - 0.296875, - -0.765625, - -0.87109375, - 0.2265625, - 0.068847656, - -0.088378906, - -0.1328125, - -0.796875, - -0.37304688, - 0.47460938, - -0.3515625, - -0.8125, - -0.32226562, - 0.265625, - 0.3203125, - -0.4140625, - -0.49023438, - 0.859375, - -0.19140625, - -0.6328125, - 0.10546875, - -0.5625, - 0.66015625, - 0.26171875, - -0.2109375, - 0.421875, - -0.82421875, - 0.29296875, - 0.17773438, - 0.24023438, - 0.5078125, - -0.49804688, - -0.10205078, - 0.10498047, - -0.36132812, - -0.47460938, - -0.20996094, - 0.010070801, - -0.546875, - 0.66796875, - -0.123046875, - -0.75390625, - 0.19628906, - 0.17480469, - 0.18261719, - -0.96875, - -0.26171875, - 0.4921875, - -0.40039062, - 0.296875, - 0.1640625, - -0.20507812, - -0.36132812, - 0.76171875, - -1.234375, - -0.625, - 0.060058594, - -0.09375, - -0.14746094, - 1.09375, - 0.057861328, - 0.22460938, - -0.703125, - 0.07470703, - 0.23828125, - -0.083984375, - -0.54296875, - 0.5546875, - -0.5, - -0.390625, - 0.106933594, - 0.6640625, - 0.27734375, - -0.953125, - 0.35351562, - -0.7734375, - -0.77734375, - 0.16503906, - -0.42382812, - 0.36914062, - 0.020141602, - -1.3515625, - 0.18847656, - 0.13476562, - -0.034179688, - -0.03930664, - -0.03857422, - -0.027954102, - 0.73828125, - -0.18945312, - -0.09814453, - -0.46289062, - 0.36914062, - 0.033203125, - 0.020874023, - -0.703125, - 0.91796875, - 0.38671875, - 0.625, - -0.19335938, - -0.16796875, - -0.58203125, - 0.21386719, - -0.032470703, - -0.296875, - -0.15625, - -0.1640625, - -0.74609375, - 0.328125, - 0.5546875, - -0.1953125, - 1.0546875, - 0.171875, - -0.099609375, - 0.5234375, - 0.05078125, - -0.35742188, - -0.2734375, - -1.3203125, - -0.8515625, - -0.16015625, - 0.01574707, - 0.29296875, - 0.18457031, - -0.265625, - 0.048339844, - 0.045654297, - -0.32226562, - 0.087890625, - -0.0047302246, - 0.38671875, - 0.10644531, - -0.06225586, - 1.03125, - 0.94140625, - -0.3203125, - 0.20800781, - -1.171875, - 0.48046875, - -0.091796875, - 0.20800781, - -0.1328125, - -0.20507812, - 0.28125, - -0.47070312, - -0.09033203, - 0.0013809204, - -0.08203125, - 0.43359375, - -0.03100586, - -0.060791016, - -0.53515625, - -1.46875, - 0.000101566315, - 0.515625, - 0.40625, - -0.10498047, - -0.15820312, - -0.009460449, - -0.77734375, - -0.5859375, - 0.9765625, - 0.099609375, - 0.51953125, - 0.38085938, - -0.09667969, - -0.100097656, - -0.5, - -1.3125, - -0.18066406, - -0.099121094, - 0.26171875, - -0.14453125, - -0.546875, - 0.17578125, - 0.484375, - 0.765625, - 0.45703125, - 0.2734375, - 0.0028076172, - 0.17089844, - -0.32421875, - -0.37695312, - 0.30664062, - -0.48046875, - 0.07128906, - 0.031982422, - -0.31054688, - -0.055419922, - -0.29296875, - 0.3359375, - -0.296875, - 0.47851562, - -0.05126953, - 0.18457031, - -0.01953125, - -0.35742188, - 0.017944336, - -0.25, - 0.10595703, - 0.17382812, - -0.73828125, - 0.36914062, - -0.15234375, - -0.8125, - 0.17382812, - 0.048095703, - 0.5625, - -0.33789062, - 0.023071289, - -0.21972656, - 0.16015625, - 0.032958984, - -1.1171875, - -0.984375, - 0.83984375, - 0.009033203, - -0.042236328, - -0.46484375, - -0.08203125, - 0.44726562, - -0.765625, - -0.3984375, - -0.40820312, - -0.234375, - 0.044189453, - 0.119628906, - -0.7578125, - -0.55078125, - -0.4453125, - 0.7578125, - 0.34960938, - 0.96484375, - 0.35742188, - 0.36914062, - -0.35351562, - -0.36132812, - 1.109375, - 0.5859375, - 0.85546875, - -0.10644531, - -0.6953125, - -0.0066833496, - 0.042236328, - -0.06689453, - 0.36914062, - 0.9765625, - -0.3046875, - 0.59765625, - -0.6640625, - 0.21484375, - -0.07128906, - 1.1328125, - -0.51953125, - 0.86328125, - -0.11328125, - 0.15722656, - -0.36328125, - -0.04638672, - 1.4375, - 0.18457031, - -0.18359375, - 0.10595703, - -0.49023438, - -0.07324219, - -0.73046875, - -0.119140625, - 0.021118164, - 0.4921875, - -0.46875, - 0.28710938, - 0.3359375, - 0.11767578, - -0.2109375, - -0.14550781, - 0.39648438, - -0.27734375, - 0.48046875, - 0.12988281, - 0.45507812, - -0.375, - -0.84765625, - 0.25585938, - -0.36523438, - 0.8046875, - 0.42382812, - -0.24511719, - 0.54296875, - 0.71875, - 0.010009766, - -0.04296875, - 0.083984375, - -0.52734375, - 0.13964844, - -0.27539062, - -0.30273438, - 1.1484375, - -0.515625, - -0.19335938, - 0.58984375, - 0.049072266, - 0.703125, - -0.04272461, - 0.5078125, - 0.34960938, - -0.3359375, - -0.47460938, - 0.049316406, - 0.36523438, - 0.7578125, - -0.022827148, - -0.71484375, - 0.21972656, - 0.09716797, - -0.203125, - -0.36914062, - 1.34375, - 0.34179688, - 0.46679688, - 1.078125, - 0.26171875, - 0.41992188, - 0.22363281, - -0.515625, - -0.5703125, - 0.13378906, - 0.26757812, - -0.22558594, - -0.5234375, - 0.06689453, - 0.08251953, - -0.625, - 0.16796875, - 0.43164062, - -0.55859375, - 0.28125, - 0.078125, - 0.6328125, - 0.23242188, - -0.064941406, - -0.004486084, - -0.20703125, - 0.2734375, - 0.453125, - -0.734375, - 0.04272461, - 0.36132812, - -0.19628906, - -0.12402344, - 1.3515625, - 0.25585938, - 0.4921875, - -0.29296875, - -0.58984375, - 0.021240234, - -0.044677734, - 0.7578125, - -0.7890625, - 0.10253906, - -0.15820312, - -0.5078125, - -0.39453125, - -0.453125, - 0.35742188, - 0.921875, - 0.44335938, - -0.49804688, - 0.44335938, - 0.31445312, - 0.58984375, - -1.0078125, - -0.22460938, - 0.24121094, - 0.87890625, - 0.66015625, - -0.390625, - -0.05053711, - 0.059570312, - 0.36132812, - -0.00038719177, - -0.017089844, - 0.62890625, - 0.203125, - 0.17480469, - 0.025512695, - 0.47460938, - 0.3125, - 1.140625, - 0.32421875, - -0.057861328, - 0.36914062, - -0.7265625, - -0.51953125, - 0.26953125, - 0.42773438, - 0.064453125, - 0.6328125, - 0.27148438, - -0.11767578, - 0.66796875, - -0.38671875, - 0.5234375, - -0.59375, - 0.5078125, - 0.008239746, - -0.34179688, - -0.27539062, - 0.5234375, - 1.296875, - 0.29492188, - -0.010986328, - -0.41210938, - 0.59375, - 0.061767578, - -0.33398438, - -2.03125, - 0.87890625, - -0.010620117, - 0.53125, - 0.14257812, - -0.515625, - -1.03125, - 0.578125, - 0.1875, - 0.44335938, - -0.33203125, - -0.36328125, - -0.3203125, - 0.29296875, - -0.8203125, - 0.41015625, - -0.48242188, - 0.66015625, - 0.5625, - -0.16503906, - -0.54296875, - -0.38085938, - 0.26171875, - 0.62109375, - 0.29101562, - -0.31054688, - 0.23730469, - -0.8515625, - 0.5234375, - 0.15332031, - 0.52734375, - -0.079589844, - -0.080566406, - -0.15527344, - -0.022827148, - 0.030517578, - -0.1640625, - -0.421875, - 0.09716797, - 0.03930664, - -0.055908203, - -0.546875, - -0.47851562, - 0.091796875, - 0.32226562, - -0.94140625, - -0.04638672, - -1.203125, - -0.39648438, - 0.45507812, - 0.296875, - -0.45703125, - 0.37890625, - -0.122558594, - 0.28320312, - -0.01965332, - -0.11669922, - -0.34570312, - -0.53515625, - -0.091308594, - -0.9375, - -0.32617188, - 0.095214844, - -0.4765625, - 0.37890625, - -0.859375, - 1.1015625, - -0.08935547, - 0.46484375, - -0.19238281, - 0.7109375, - 0.040039062, - -0.5390625, - 0.22363281, - -0.70703125, - 0.4921875, - -0.119140625, - -0.26757812, - -0.08496094, - 0.0859375, - -0.00390625, - -0.013366699, - -0.03955078, - 0.07421875, - -0.13085938, - 0.29101562, - -0.12109375, - 0.45703125, - 0.021728516, - 0.38671875, - -0.3671875, - -0.52734375, - -0.115722656, - 0.125, - 0.5703125, - -1.234375, - 0.06298828, - -0.55859375, - 0.60546875, - 0.8125, - -0.0032958984, - -0.068359375, - -0.21191406, - 0.56640625, - 0.17285156, - -0.3515625, - 0.36328125, - -0.99609375, - 0.43554688, - -0.1015625, - 0.07080078, - -0.66796875, - 1.359375, - 0.41601562, - 0.15917969, - 0.17773438, - -0.28710938, - 0.021850586, - -0.46289062, - 0.17578125, - -0.03955078, - -0.026855469, - 0.5078125, - -0.65625, - 0.0012512207, - 0.044433594, - -0.18652344, - 0.4921875, - -0.75390625, - 0.0072021484, - 0.4375, - -0.31445312, - 0.20214844, - 0.15039062, - -0.63671875, - -0.296875, - -0.375, - -0.027709961, - 0.013427734, - 0.17089844, - 0.89453125, - 0.11621094, - -0.43945312, - -0.30859375, - 0.02709961, - 0.23242188, - -0.64453125, - -0.859375, - 0.22167969, - -0.023071289, - -0.052734375, - 0.3671875, - -0.18359375, - 0.81640625, - -0.11816406, - 0.028320312, - 0.19042969, - 0.012817383, - -0.43164062, - 0.55859375, - -0.27929688, - 0.14257812, - -0.140625, - -0.048583984, - -0.014526367, - 0.35742188, - 0.22753906, - 0.13183594, - 0.04638672, - 0.03930664, - -0.29296875, - -0.2109375, - -0.16308594, - -0.48046875, - -0.13378906, - -0.39257812, - 0.29296875, - -0.047851562, - -0.5546875, - 0.08300781, - -0.14941406, - -0.07080078, - 0.12451172, - 0.1953125, - -0.51171875, - -0.048095703, - 0.1953125, - -0.37695312, - 0.46875, - -0.084472656, - 0.19042969, - -0.39453125, - 0.69921875, - -0.0065307617, - 0.25390625, - -0.16992188, - -0.5078125, - 0.016845703, - 0.27929688, - -0.22070312, - 0.671875, - 0.18652344, - 0.25, - -0.046875, - -0.012023926, - -0.36523438, - 0.36523438, - -0.11279297, - 0.421875, - 0.079589844, - -0.100097656, - 0.37304688, - 0.29882812, - -0.10546875, - -0.36523438, - 0.040039062, - 0.546875, - 0.12890625, - -0.06542969, - -0.38085938, - -0.35742188, - -0.6484375, - -0.28515625, - 0.0107421875, - -0.055664062, - 0.45703125, - 0.33984375, - 0.26367188, - -0.23144531, - 0.012878418, - -0.875, - 0.11035156, - 0.33984375, - 0.203125, - 0.38867188, - 0.24902344, - -0.37304688, - -0.98046875, - -0.122558594, - -0.17871094, - -0.09277344, - 0.1796875, - 0.4453125, - -0.66796875, - 0.78515625, - 0.12988281, - 0.35546875, - 0.44140625, - 0.58984375, - 0.29492188, - 0.7734375, - -0.21972656, - -0.40234375, - -0.22265625, - 0.18359375, - 0.54296875, - 0.17382812, - 0.59375, - -0.390625, - -0.92578125, - -0.017456055, - -0.25, - 0.73828125, - 0.7578125, - -0.3828125, - -0.25976562, - 0.049072266, - 0.046875, - -0.3515625, - 0.30078125, - -1.03125, - -0.48828125, - 0.0017929077, - -0.26171875, - 0.20214844, - 0.29882812, - 0.064941406, - 0.21484375, - -0.55078125, - -0.021362305, - 0.12988281, - 0.27148438, - 0.38867188, - -0.19726562, - -0.55078125, - 0.1640625, - 0.32226562, - -0.72265625, - 0.36132812, - 1.21875, - -0.22070312, - -0.32421875, - -0.29882812, - 0.0024414062, - 0.19921875, - 0.734375, - 0.16210938, - 0.17871094, - -0.19140625, - 0.38476562, - -0.06591797, - -0.47070312, - -0.040039062, - -0.33007812, - -0.07910156, - -0.2890625, - 0.00970459, - 0.12695312, - -0.12060547, - -0.18847656, - 1.015625, - -0.032958984, - 0.12451172, - -0.38476562, - 0.063964844, - 1.0859375, - 0.067871094, - -0.24511719, - 0.125, - 0.10546875, - -0.22460938, - -0.29101562, - 0.24414062, - -0.017944336, - -0.15625, - -0.60546875, - -0.25195312, - -0.46875, - 0.80859375, - -0.34960938, - 0.42382812, - 0.796875, - 0.296875, - -0.067871094, - 0.39453125, - 0.07470703, - 0.033935547, - 0.24414062, - 0.32617188, - 0.023925781, - 0.73046875, - 0.2109375, - -0.43164062, - 0.14453125, - 0.63671875, - 0.21972656, - -0.1875, - -0.18066406, - -0.22167969, - -1.3359375, - 0.52734375, - -0.40625, - -0.12988281, - 0.17480469, - -0.18066406, - 0.58984375, - -0.32421875, - -0.13476562, - 0.39257812, - -0.19238281, - 0.068359375, - 0.7265625, - -0.7109375, - -0.125, - 0.328125, - 0.34179688, - -0.48828125, - -0.10058594, - -0.83984375, - 0.30273438, - 0.008239746, - -1.390625, - 0.171875, - 0.34960938, - 0.44921875, - 0.22167969, - 0.60546875, - -0.36914062, - -0.028808594, - -0.19921875, - 0.6875, - 0.52734375, - -0.07421875, - 0.35546875, - 0.546875, - 0.08691406, - 0.23339844, - -0.984375, - -0.20507812, - 0.08544922, - 0.453125, - -0.07421875, - -0.953125, - 0.74609375, - -0.796875, - 0.47851562, - 0.81640625, - -0.44921875, - -0.33398438, - -0.54296875, - 0.46484375, - -0.390625, - -0.24121094, - -0.0115356445, - 1.1328125, - 1.0390625, - 0.6484375, - 0.35742188, - -0.29492188, - -0.0007095337, - -0.060302734, - 0.21777344, - 0.15136719, - -0.6171875, - 0.11328125, - -0.025878906, - 0.19238281, - 0.140625, - 0.171875, - 0.25195312, - 0.10546875, - 0.0008354187, - -0.13476562, - -0.26953125, - 0.025024414, - -0.28320312, - -0.107910156, - 1.015625, - 0.05493164, - -0.12988281, - 0.30859375, - 0.22558594, - -0.60546875, - 0.11328125, - -1.203125, - 0.6484375, - 0.087402344, - 0.32226562, - 0.63671875, - -0.07714844, - -1.390625, - -0.71875, - -0.34179688, - -0.10546875, - -0.37304688, - -0.09863281, - -0.41210938, - -0.14941406, - 0.41210938, - -0.20898438, - 0.18261719, - 0.67578125, - 0.41601562, - 0.32617188, - 0.2421875, - -0.14257812, - -0.6796875, - 0.01953125, - 0.34179688, - 0.20800781, - -0.123046875, - 0.087402344, - 0.85546875, - 0.33984375, - 0.33203125, - -0.68359375, - 0.44921875, - 0.50390625, - 0.083496094, - 0.10888672, - -0.09863281, - 0.55078125, - 0.09765625, - -0.50390625, - 0.13378906, - -0.29882812, - 0.030761719, - -0.64453125, - 0.22949219, - 0.43945312, - 0.16503906, - 0.10888672, - -0.12792969, - -0.039794922, - -0.111328125, - -0.35742188, - 0.053222656, - -0.78125, - -0.4375, - 0.359375, - -0.88671875, - -0.21972656, - -0.053710938, - 0.91796875, - -0.10644531, - 0.55859375, - -0.7734375, - 0.5078125, - 0.46484375, - 0.32226562, - 0.16796875, - -0.28515625, - 0.045410156, - -0.45117188, - 0.38867188, - -0.33398438, - -0.5234375, - 0.296875, - 0.6015625, - 0.3515625, - -0.734375, - 0.3984375, - -0.08251953, - 0.359375, - -0.28515625, - -0.88671875, - 0.0051879883, - 0.045166016, - -0.7421875, - -0.36523438, - 0.140625, - 0.18066406, - -0.171875, - -0.15625, - -0.53515625, - 0.2421875, - -0.19140625, - -0.18066406, - 0.25390625, - 0.6875, - -0.01965332, - -0.33203125, - 0.29492188, - 0.107421875, - -0.048339844, - -0.82421875, - 0.52734375, - 0.78125, - 0.8203125, - -0.90625, - 0.765625, - 0.0390625, - 0.045410156, - 0.26367188, - -0.14355469, - -0.26367188, - 0.390625, - -0.10888672, - 0.33007812, - -0.5625, - 0.08105469, - -0.13769531, - 0.8515625, - -0.14453125, - 0.77734375, - -0.48046875, - -0.3515625, - -0.25390625, - -0.09277344, - 0.23925781, - -0.022338867, - -0.45898438, - 0.36132812, - -0.23828125, - 0.265625, - -0.48632812, - -0.46875, - -0.75390625, - 1.3125, - 0.78125, - -0.63671875, - -1.21875, - 0.5078125, - -0.27734375, - -0.118652344, - 0.041992188, - -0.14648438, - -0.8046875, - 0.21679688, - -0.79296875, - 0.28320312, - -0.09667969, - 0.42773438, - 0.49414062, - 0.44726562, - 0.21972656, - -0.02746582, - -0.03540039, - -0.14941406, - -0.515625, - -0.27929688, - 0.9609375, - -0.007598877, - 0.34765625, - -0.060546875, - -0.44726562, - 0.7421875, - 0.15332031, - 0.45117188, - -0.4921875, - 0.07080078, - 0.5625, - 0.3984375, - -0.20019531, - 0.014892578, - 0.63671875, - -0.0071411133, - 0.016357422, - 1.0625, - 0.049316406, - 0.18066406, - 0.09814453, - -0.52734375, - -0.359375, - -0.072265625, - -0.41992188, - 0.39648438, - 0.38671875, - -0.30273438, - -0.056640625, - -0.640625, - -0.44921875, - 0.49414062, - 0.29101562, - 0.49609375, - 0.40429688, - -0.10205078, - 0.49414062, - -0.28125, - -0.12695312, - -0.0022735596, - -0.37304688, - 0.122558594, - 0.07519531, - -0.12597656, - -0.38085938, - -0.19824219, - -0.40039062, - 0.56640625, - -1.140625, - -0.515625, - -0.17578125, - -0.765625, - -0.43945312, - 0.3359375, - -0.24707031, - 0.32617188, - -0.45117188, - -0.37109375, - 0.45117188, - -0.27539062, - -0.38867188, - 0.09082031, - 0.17675781, - 0.49414062, - 0.19921875, - 0.17480469, - 0.8515625, - -0.23046875, - -0.234375, - -0.28515625, - 0.10253906, - 0.29101562, - -0.3359375, - -0.203125, - 0.6484375, - 0.11767578, - -0.20214844, - -0.42382812, - 0.26367188, - 0.6328125, - 0.0059509277, - 0.08691406, - -1.5625, - -0.43554688, - 0.17675781, - 0.091796875, - -0.5234375, - -0.09863281, - 0.20605469, - 0.16601562, - -0.578125, - 0.017700195, - 0.41015625, - 1.03125, - -0.55078125, - 0.21289062, - -0.35351562, - 0.24316406, - -0.123535156, - 0.11035156, - -0.48242188, - -0.34179688, - 0.45117188, - 0.3125, - -0.071777344, - 0.12792969, - 0.55859375, - 0.063964844, - -0.21191406, - 0.01965332, - -1.359375, - -0.21582031, - -0.019042969, - 0.16308594, - -0.3671875, - -0.40625, - -1.0234375, - -0.21289062, - 0.24023438, - -0.28125, - 0.26953125, - -0.14550781, - -0.087890625, - 0.16113281, - -0.49804688, - -0.17675781, - -0.890625, - 0.27929688, - 0.484375, - 0.27148438, - 0.11816406, - 0.83984375, - 0.029052734, - -0.890625, - 0.66796875, - 0.78515625, - -0.953125, - 0.49414062, - -0.546875, - 0.106933594, - -0.08251953, - 0.2890625, - -0.1484375, - -0.85546875, - 0.32421875, - -0.0040893555, - -0.16601562, - -0.16699219, - 0.24414062, - -0.5078125, - 0.25390625, - -0.10253906, - 0.15625, - 0.140625, - -0.27539062, - -0.546875, - -0.5546875, - -0.71875, - 0.37304688, - 0.060058594, - -0.076171875, - 0.44921875, - 0.06933594, - -0.28710938, - -0.22949219, - 0.17578125, - 0.09814453, - 0.4765625, - -0.95703125, - -0.03540039, - 0.21289062, - -0.7578125, - -0.07373047, - 0.10546875, - 0.07128906, - 0.76171875, - 0.4296875, - -0.09375, - 0.27539062, - -0.55078125, - 0.29882812, - -0.42382812, - 0.32617188, - -0.39648438, - 0.12451172, - 0.16503906, - -0.22460938, - -0.65625, - -0.022094727, - 0.61328125, - -0.024780273, - 0.62109375, - -0.033447266, - 0.515625, - 0.12890625, - -0.21875, - -0.08642578, - 0.49804688, - -0.2265625, - -0.29296875, - 0.19238281, - 0.3515625, - -1.265625, - 0.57421875, - 0.20117188, - -0.28320312, - 0.1953125, - -0.30664062, - 0.2265625, - -0.11230469, - 0.83984375, - 0.111328125, - 0.265625, - 0.71484375, - -0.625, - 0.38867188, - 0.47070312, - -0.32617188, - -0.171875, - 1.0078125, - 0.19726562, - -0.118652344, - 0.63671875, - -0.068359375, - -0.25585938, - 0.4140625, - -0.29296875, - 0.21386719, - -0.064453125, - 0.15820312, - -0.89453125, - -0.16308594, - 0.48046875, - 0.14648438, - -0.5703125, - 0.84765625, - -0.19042969, - 0.03515625, - 0.42578125, - -0.27539062, - -0.5390625, - 0.95703125, - 0.2734375, - 0.16699219, - -0.328125, - 0.11279297, - 0.003250122, - 0.47265625, - -0.31640625, - 0.546875, - 0.55859375, - 0.06933594, - -0.61328125, - -0.16210938, - -0.375, - 0.100097656, - -0.088378906, - 0.12695312, - 0.079589844, - 0.123535156, - -1.0078125, - 0.6875, - 0.022949219, - -0.40039062, - -0.09863281, - 0.29101562, - -1.2890625, - -0.20996094, - 0.36328125, - -0.3515625, - 0.7890625, - 0.12207031, - 0.48046875, - -0.13671875, - -0.041015625, - 0.19824219, - 0.19921875, - 0.01171875, - -0.37695312, - -0.62890625, - 0.9375, - -0.671875, - 0.24609375, - 0.6484375, - -0.29101562, - 0.076171875, - 0.62109375, - -0.5546875, - 0.36523438, - 0.75390625, - -0.19140625, - -0.875, - -0.8203125, - -0.24414062, - -0.625, - 0.1796875, - -0.40039062, - 0.25390625, - -0.14550781, - -0.21679688, - -0.828125, - 0.3359375, - 0.43554688, - 0.55078125, - -0.44921875, - -0.28710938, - 0.24023438, - 0.18066406, - -0.6953125, - 0.020385742, - -0.11376953, - 0.13867188, - -0.92578125, - 0.33398438, - -0.328125, - 0.78125, - -0.45507812, - -0.07470703, - 0.34179688, - 0.07080078, - 0.76171875, - 0.37890625, - -0.10644531, - 0.90234375, - -0.21875, - -0.15917969, - -0.36132812, - 0.2109375, - -0.45703125, - -0.76953125, - 0.21289062, - 0.26367188, - 0.49804688, - 0.35742188, - -0.20019531, - 0.31054688, - 0.34179688, - 0.17089844, - -0.15429688, - 0.39648438, - -0.5859375, - 0.20996094, - -0.40039062, - 0.5703125, - -0.515625, - 0.5234375, - 0.049560547, - 0.328125, - 0.24804688, - 0.42578125, - 0.609375, - 0.19238281, - 0.27929688, - 0.19335938, - 0.78125, - -0.9921875, - 0.23925781, - -1.3828125, - -0.22949219, - -0.578125, - -0.13964844, - -0.17382812, - -0.011169434, - 0.26171875, - -0.73046875, - -1.4375, - 0.6953125, - -0.7421875, - 0.052246094, - 0.12207031, - 1.3046875, - 0.38867188, - 0.040283203, - -0.546875, - -0.0021514893, - 0.18457031, - -0.5546875, - -0.51171875, - -0.16308594, - -0.104003906, - -0.38867188, - -0.20996094, - -0.8984375, - 0.6015625, - -0.30078125, - -0.13769531, - 0.16113281, - 0.58203125, - -0.23730469, - -0.125, - -1.0234375, - 0.875, - -0.7109375, - 0.29101562, - 0.09667969, - -0.3203125, - -0.48046875, - 0.37890625, - 0.734375, - -0.28710938, - -0.29882812, - -0.05493164, - 0.34765625, - -0.84375, - 0.65625, - 0.578125, - -0.20019531, - 0.13769531, - 0.10058594, - -0.37109375, - 0.36523438, - -0.22167969, - 0.72265625, - ], - "inputTextTokenCount": 6, - }, - ], - "amazon.titan-text-express-v1::What is 212 degrees Fahrenheit converted to Celsius?": [ - {"Content-Type": "application/json", "x-amzn-RequestId": "03524118-8d77-430f-9e08-63b5c03a40cf"}, - 200, - { - "inputTextTokenCount": 12, - "results": [ - { - "tokenCount": 75, - "outputText": "\nUse the formula,\n°C = (°F - 32) x 5/9\n= 212 x 5/9\n= 100 degrees Celsius\n212 degrees Fahrenheit is 100 degrees Celsius.", - "completionReason": "FINISH", - } - ], - }, - ], - "anthropic.claude-instant-v1::Human: What is 212 degrees Fahrenheit converted to Celsius? Assistant:": [ - {"Content-Type": "application/json", "x-amzn-RequestId": "7b0b37c6-85fb-4664-8f5b-361ca7b1aa18"}, - 200, - { - "completion": " Okay, here are the conversion steps:\n212 degrees Fahrenheit\n- Subtract 32 from 212 to get 180 (to convert from Fahrenheit to Celsius scale)\n- Multiply by 5/9 (because the formula is °C = (°F - 32) × 5/9)\n- 180 × 5/9 = 100\n\nSo 212 degrees Fahrenheit converted to Celsius is 100 degrees Celsius.", - "stop_reason": "stop_sequence", - "stop": "\n\nHuman:", - }, - ], - "cohere.command-text-v14::What is 212 degrees Fahrenheit converted to Celsius?": [ - {"Content-Type": "application/json", "x-amzn-RequestId": "e77422c8-fbbf-4e17-afeb-c758425c9f97"}, - 200, - { - "generations": [ - { - "finish_reason": "MAX_TOKENS", - "id": "d20c06b0-aafe-4230-b2c7-200f4069355e", - "text": " 212°F is equivalent to 100°C. \n\nFahrenheit and Celsius are two temperature scales commonly used in everyday life. The Fahrenheit scale is based on 32°F for the freezing point of water and 212°F for the boiling point of water. On the other hand, the Celsius scale uses 0°C and 100°C as the freezing and boiling points of water, respectively. \n\nTo convert from Fahrenheit to Celsius, we subtract 32 from the Fahrenheit temperature and multiply the result", - } - ], - "id": "e77422c8-fbbf-4e17-afeb-c758425c9f97", - "prompt": "What is 212 degrees Fahrenheit converted to Celsius?", - }, - ], - "does-not-exist::": [ - { - "Content-Type": "application/json", - "x-amzn-RequestId": "f4908827-3db9-4742-9103-2bbc34578b03", - "x-amzn-ErrorType": "ValidationException:http://internal.amazon.com/coral/com.amazon.bedrock/", - }, - 400, - {"message": "The provided model identifier is invalid."}, - ], - "ai21.j2-mid-v1::Invalid Token": [ - { - "Content-Type": "application/json", - "x-amzn-RequestId": "9021791d-3797-493d-9277-e33aa6f6d544", - "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", - }, - 403, - {"message": "The security token included in the request is invalid."}, - ], - "amazon.titan-embed-g1-text-02::Invalid Token": [ - { - "Content-Type": "application/json", - "x-amzn-RequestId": "73328313-506e-4da8-af0f-51017fa6ca3f", - "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", - }, - 403, - {"message": "The security token included in the request is invalid."}, - ], - "amazon.titan-embed-text-v1::Invalid Token": [ - { - "Content-Type": "application/json", - "x-amzn-RequestId": "aece6ad7-e2ff-443b-a953-ba7d385fd0cc", - "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", - }, - 403, - {"message": "The security token included in the request is invalid."}, - ], - "amazon.titan-text-express-v1::Invalid Token": [ - { - "Content-Type": "application/json", - "x-amzn-RequestId": "15b39c8b-8e85-42c9-9623-06720301bda3", - "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", - }, - 403, - {"message": "The security token included in the request is invalid."}, - ], - "anthropic.claude-instant-v1::Human: Invalid Token Assistant:": [ - { - "Content-Type": "application/json", - "x-amzn-RequestId": "37396f55-b721-4bae-9461-4c369f5a080d", - "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", - }, - 403, - {"message": "The security token included in the request is invalid."}, - ], - "cohere.command-text-v14::Invalid Token": [ - { - "Content-Type": "application/json", - "x-amzn-RequestId": "22476490-a0d6-42db-b5ea-32d0b8a7f751", - "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", - }, - 403, - {"message": "The security token included in the request is invalid."}, - ], -} - -MODEL_PATH_RE = re.compile(r"/model/([^/]+)/invoke") - - -def simple_get(self): - content_len = int(self.headers.get("content-length")) - content = json.loads(self.rfile.read(content_len).decode("utf-8")) - - model = MODEL_PATH_RE.match(self.path).group(1) - prompt = extract_shortened_prompt(content, model) - if not prompt: - self.send_response(500) - self.end_headers() - self.wfile.write("Could not parse prompt.".encode("utf-8")) - return - - headers, response = ({}, "") - for k, v in RESPONSES.items(): - if prompt.startswith(k): - headers, status_code, response = v - break - else: # If no matches found - self.send_response(500) - self.end_headers() - self.wfile.write(("Unknown Prompt:\n%s" % prompt).encode("utf-8")) - return - - # Send response code - self.send_response(status_code) - - # Send headers - for k, v in headers.items(): - self.send_header(k, v) - self.end_headers() - - # Send response body - self.wfile.write(json.dumps(response).encode("utf-8")) - return - - -def extract_shortened_prompt(content, model): - prompt = content.get("inputText", "") or content.get("prompt", "") - prompt = "::".join((model, prompt)) # Prepend model name to prompt key to keep separate copies - return prompt.lstrip().split("\n")[0] - - -class MockExternalBedrockServer(MockExternalHTTPServer): - # To use this class in a test one needs to start and stop this server - # before and after making requests to the test app that makes the external - # calls. - - def __init__(self, handler=simple_get, port=None, *args, **kwargs): - super(MockExternalBedrockServer, self).__init__(handler=handler, port=port, *args, **kwargs) - - -if __name__ == "__main__": - # Use this to sort dict for easier future incremental updates - print("RESPONSES = %s" % dict(sorted(RESPONSES.items(), key=lambda i: (i[1][1], i[0])))) - - with MockExternalBedrockServer() as server: - print("MockExternalBedrockServer serving on port %s" % str(server.port)) - while True: - pass # Serve forever diff --git a/tests/external_botocore/_test_bedrock_chat_completion.py b/tests/external_botocore/_test_bedrock_chat_completion.py deleted file mode 100644 index 1a66d74e43..0000000000 --- a/tests/external_botocore/_test_bedrock_chat_completion.py +++ /dev/null @@ -1,317 +0,0 @@ -chat_completion_payload_templates = { - "amazon.titan-text-express-v1": '{ "inputText": "%s", "textGenerationConfig": {"temperature": %f, "maxTokenCount": %d }}', - "ai21.j2-mid-v1": '{"prompt": "%s", "temperature": %f, "maxTokens": %d}', - "anthropic.claude-instant-v1": '{"prompt": "Human: %s Assistant:", "temperature": %f, "max_tokens_to_sample": %d}', - "cohere.command-text-v14": '{"prompt": "%s", "temperature": %f, "max_tokens": %d}', -} - -chat_completion_expected_events = { - "amazon.titan-text-express-v1": [ - ( - {"type": "LlmChatCompletionSummary"}, - { - "id": None, # UUID that varies with each run - "appName": "Python Agent Test (external_botocore)", - "conversation_id": "my-awesome-id", - "transaction_id": None, - "span_id": "span-id", - "trace_id": "trace-id", - "request_id": "03524118-8d77-430f-9e08-63b5c03a40cf", - "api_key_last_four_digits": "CRET", - "duration": None, # Response time varies each test run - "request.model": "amazon.titan-text-express-v1", - "response.model": "amazon.titan-text-express-v1", - "response.usage.completion_tokens": 75, - "response.usage.total_tokens": 87, - "response.usage.prompt_tokens": 12, - "request.temperature": 0.7, - "request.max_tokens": 100, - "response.choices.finish_reason": "FINISH", - "vendor": "bedrock", - "ingest_source": "Python", - "response.number_of_messages": 2, - }, - ), - ( - {"type": "LlmChatCompletionMessage"}, - { - "id": None, # UUID that varies with each run - "appName": "Python Agent Test (external_botocore)", - "conversation_id": "my-awesome-id", - "request_id": "03524118-8d77-430f-9e08-63b5c03a40cf", - "span_id": "span-id", - "trace_id": "trace-id", - "transaction_id": None, - "content": "What is 212 degrees Fahrenheit converted to Celsius?", - "role": "user", - "completion_id": None, - "sequence": 0, - "response.model": "amazon.titan-text-express-v1", - "vendor": "bedrock", - "ingest_source": "Python", - }, - ), - ( - {"type": "LlmChatCompletionMessage"}, - { - "id": None, # UUID that varies with each run - "appName": "Python Agent Test (external_botocore)", - "conversation_id": "my-awesome-id", - "request_id": "03524118-8d77-430f-9e08-63b5c03a40cf", - "span_id": "span-id", - "trace_id": "trace-id", - "transaction_id": None, - "content": "\nUse the formula,\n°C = (°F - 32) x 5/9\n= 212 x 5/9\n= 100 degrees Celsius\n212 degrees Fahrenheit is 100 degrees Celsius.", - "role": "assistant", - "completion_id": None, - "sequence": 1, - "response.model": "amazon.titan-text-express-v1", - "vendor": "bedrock", - "ingest_source": "Python", - }, - ), - ], - "ai21.j2-mid-v1": [ - ( - {"type": "LlmChatCompletionSummary"}, - { - "id": None, # UUID that varies with each run - "appName": "Python Agent Test (external_botocore)", - "conversation_id": "my-awesome-id", - "transaction_id": None, - "span_id": "span-id", - "trace_id": "trace-id", - "request_id": "c863d9fc-888b-421c-a175-ac5256baec62", - "response_id": "1234", - "api_key_last_four_digits": "CRET", - "duration": None, # Response time varies each test run - "request.model": "ai21.j2-mid-v1", - "response.model": "ai21.j2-mid-v1", - "request.temperature": 0.7, - "request.max_tokens": 100, - "response.choices.finish_reason": "endoftext", - "vendor": "bedrock", - "ingest_source": "Python", - "response.number_of_messages": 2, - }, - ), - ( - {"type": "LlmChatCompletionMessage"}, - { - "id": "1234-0", - "appName": "Python Agent Test (external_botocore)", - "conversation_id": "my-awesome-id", - "request_id": "c863d9fc-888b-421c-a175-ac5256baec62", - "span_id": "span-id", - "trace_id": "trace-id", - "transaction_id": None, - "content": "What is 212 degrees Fahrenheit converted to Celsius?", - "role": "user", - "completion_id": None, - "sequence": 0, - "response.model": "ai21.j2-mid-v1", - "vendor": "bedrock", - "ingest_source": "Python", - }, - ), - ( - {"type": "LlmChatCompletionMessage"}, - { - "id": "1234-1", - "appName": "Python Agent Test (external_botocore)", - "conversation_id": "my-awesome-id", - "request_id": "c863d9fc-888b-421c-a175-ac5256baec62", - "span_id": "span-id", - "trace_id": "trace-id", - "transaction_id": None, - "content": "\n212 degrees Fahrenheit is equal to 100 degrees Celsius.", - "role": "assistant", - "completion_id": None, - "sequence": 1, - "response.model": "ai21.j2-mid-v1", - "vendor": "bedrock", - "ingest_source": "Python", - }, - ), - ], - "anthropic.claude-instant-v1": [ - ( - {"type": "LlmChatCompletionSummary"}, - { - "id": None, # UUID that varies with each run - "appName": "Python Agent Test (external_botocore)", - "conversation_id": "my-awesome-id", - "transaction_id": None, - "span_id": "span-id", - "trace_id": "trace-id", - "request_id": "7b0b37c6-85fb-4664-8f5b-361ca7b1aa18", - "api_key_last_four_digits": "CRET", - "duration": None, # Response time varies each test run - "request.model": "anthropic.claude-instant-v1", - "response.model": "anthropic.claude-instant-v1", - "request.temperature": 0.7, - "request.max_tokens": 100, - "response.choices.finish_reason": "stop_sequence", - "vendor": "bedrock", - "ingest_source": "Python", - "response.number_of_messages": 2, - }, - ), - ( - {"type": "LlmChatCompletionMessage"}, - { - "id": None, # UUID that varies with each run - "appName": "Python Agent Test (external_botocore)", - "conversation_id": "my-awesome-id", - "request_id": "7b0b37c6-85fb-4664-8f5b-361ca7b1aa18", - "span_id": "span-id", - "trace_id": "trace-id", - "transaction_id": None, - "content": "Human: What is 212 degrees Fahrenheit converted to Celsius? Assistant:", - "role": "user", - "completion_id": None, - "sequence": 0, - "response.model": "anthropic.claude-instant-v1", - "vendor": "bedrock", - "ingest_source": "Python", - }, - ), - ( - {"type": "LlmChatCompletionMessage"}, - { - "id": None, # UUID that varies with each run - "appName": "Python Agent Test (external_botocore)", - "conversation_id": "my-awesome-id", - "request_id": "7b0b37c6-85fb-4664-8f5b-361ca7b1aa18", - "span_id": "span-id", - "trace_id": "trace-id", - "transaction_id": None, - "content": " Okay, here are the conversion steps:\n212 degrees Fahrenheit\n- Subtract 32 from 212 to get 180 (to convert from Fahrenheit to Celsius scale)\n- Multiply by 5/9 (because the formula is °C = (°F - 32) × 5/9)\n- 180 × 5/9 = 100\n\nSo 212 degrees Fahrenheit c", - "role": "assistant", - "completion_id": None, - "sequence": 1, - "response.model": "anthropic.claude-instant-v1", - "vendor": "bedrock", - "ingest_source": "Python", - }, - ), - ], - "cohere.command-text-v14": [ - ( - {"type": "LlmChatCompletionSummary"}, - { - "id": None, # UUID that varies with each run - "appName": "Python Agent Test (external_botocore)", - "conversation_id": "my-awesome-id", - "transaction_id": None, - "span_id": "span-id", - "trace_id": "trace-id", - "request_id": "e77422c8-fbbf-4e17-afeb-c758425c9f97", - "response_id": None, # UUID that varies with each run - "api_key_last_four_digits": "CRET", - "duration": None, # Response time varies each test run - "request.model": "cohere.command-text-v14", - "response.model": "cohere.command-text-v14", - "request.temperature": 0.7, - "request.max_tokens": 100, - "response.choices.finish_reason": "MAX_TOKENS", - "vendor": "bedrock", - "ingest_source": "Python", - "response.number_of_messages": 2, - }, - ), - ( - {"type": "LlmChatCompletionMessage"}, - { - "id": None, # UUID that varies with each run - "appName": "Python Agent Test (external_botocore)", - "conversation_id": "my-awesome-id", - "request_id": "e77422c8-fbbf-4e17-afeb-c758425c9f97", - "span_id": "span-id", - "trace_id": "trace-id", - "transaction_id": None, - "content": "What is 212 degrees Fahrenheit converted to Celsius?", - "role": "user", - "completion_id": None, - "sequence": 0, - "response.model": "cohere.command-text-v14", - "vendor": "bedrock", - "ingest_source": "Python", - }, - ), - ( - {"type": "LlmChatCompletionMessage"}, - { - "id": None, # UUID that varies with each run - "appName": "Python Agent Test (external_botocore)", - "conversation_id": "my-awesome-id", - "request_id": "e77422c8-fbbf-4e17-afeb-c758425c9f97", - "span_id": "span-id", - "trace_id": "trace-id", - "transaction_id": None, - "content": " 212°F is equivalent to 100°C. \n\nFahrenheit and Celsius are two temperature scales commonly used in everyday life. The Fahrenheit scale is based on 32°F for the freezing point of water and 212°F for the boiling point of water. On the other hand, the C", - "role": "assistant", - "completion_id": None, - "sequence": 1, - "response.model": "cohere.command-text-v14", - "vendor": "bedrock", - "ingest_source": "Python", - }, - ), - ], -} - -chat_completion_expected_client_errors = { - "amazon.titan-text-express-v1": { - "conversation_id": "my-awesome-id", - "request_id": "15b39c8b-8e85-42c9-9623-06720301bda3", - "api_key_last_four_digits": "-KEY", - "request.model": "amazon.titan-text-express-v1", - "request.temperature": 0.7, - "request.max_tokens": 100, - "vendor": "Bedrock", - "ingest_source": "Python", - "http.statusCode": 403, - "error.message": "The security token included in the request is invalid.", - "error.code": "UnrecognizedClientException", - }, - "ai21.j2-mid-v1": { - "conversation_id": "my-awesome-id", - "request_id": "9021791d-3797-493d-9277-e33aa6f6d544", - "api_key_last_four_digits": "-KEY", - "request.model": "ai21.j2-mid-v1", - "request.temperature": 0.7, - "request.max_tokens": 100, - "vendor": "Bedrock", - "ingest_source": "Python", - "http.statusCode": 403, - "error.message": "The security token included in the request is invalid.", - "error.code": "UnrecognizedClientException", - }, - "anthropic.claude-instant-v1": { - "conversation_id": "my-awesome-id", - "request_id": "37396f55-b721-4bae-9461-4c369f5a080d", - "api_key_last_four_digits": "-KEY", - "request.model": "anthropic.claude-instant-v1", - "request.temperature": 0.7, - "request.max_tokens": 100, - "vendor": "Bedrock", - "ingest_source": "Python", - "http.statusCode": 403, - "error.message": "The security token included in the request is invalid.", - "error.code": "UnrecognizedClientException", - }, - "cohere.command-text-v14": { - "conversation_id": "my-awesome-id", - "request_id": "22476490-a0d6-42db-b5ea-32d0b8a7f751", - "api_key_last_four_digits": "-KEY", - "request.model": "cohere.command-text-v14", - "request.temperature": 0.7, - "request.max_tokens": 100, - "vendor": "Bedrock", - "ingest_source": "Python", - "http.statusCode": 403, - "error.message": "The security token included in the request is invalid.", - "error.code": "UnrecognizedClientException", - }, -} diff --git a/tests/external_botocore/_test_bedrock_embeddings.py b/tests/external_botocore/_test_bedrock_embeddings.py deleted file mode 100644 index 8fb2ceecee..0000000000 --- a/tests/external_botocore/_test_bedrock_embeddings.py +++ /dev/null @@ -1,74 +0,0 @@ -embedding_payload_templates = { - "amazon.titan-embed-text-v1": '{ "inputText": "%s" }', - "amazon.titan-embed-g1-text-02": '{ "inputText": "%s" }', -} - -embedding_expected_events = { - "amazon.titan-embed-text-v1": [ - ( - {"type": "LlmEmbedding"}, - { - "id": None, # UUID that varies with each run - "appName": "Python Agent Test (external_botocore)", - "transaction_id": None, - "span_id": "span-id", - "trace_id": "trace-id", - "input": "This is an embedding test.", - "api_key_last_four_digits": "CRET", - "duration": None, # Response time varies each test run - "response.model": "amazon.titan-embed-text-v1", - "request.model": "amazon.titan-embed-text-v1", - "request_id": "11233989-07e8-4ecb-9ba6-79601ba6d8cc", - "response.usage.total_tokens": 6, - "response.usage.prompt_tokens": 6, - "vendor": "bedrock", - "ingest_source": "Python", - }, - ), - ], - "amazon.titan-embed-g1-text-02": [ - ( - {"type": "LlmEmbedding"}, - { - "id": None, # UUID that varies with each run - "appName": "Python Agent Test (external_botocore)", - "transaction_id": None, - "span_id": "span-id", - "trace_id": "trace-id", - "input": "This is an embedding test.", - "api_key_last_four_digits": "CRET", - "duration": None, # Response time varies each test run - "response.model": "amazon.titan-embed-g1-text-02", - "request.model": "amazon.titan-embed-g1-text-02", - "request_id": "b10ac895-eae3-4f07-b926-10b2866c55ed", - "response.usage.total_tokens": 6, - "response.usage.prompt_tokens": 6, - "vendor": "bedrock", - "ingest_source": "Python", - }, - ), - ] -} - -embedding_expected_client_errors = { - "amazon.titan-embed-text-v1": { - "request_id": "aece6ad7-e2ff-443b-a953-ba7d385fd0cc", - "api_key_last_four_digits": "-KEY", - "request.model": "amazon.titan-embed-text-v1", - "vendor": "Bedrock", - "ingest_source": "Python", - "http.statusCode": 403, - "error.message": "The security token included in the request is invalid.", - "error.code": "UnrecognizedClientException", - }, - "amazon.titan-embed-g1-text-02": { - "request_id": "73328313-506e-4da8-af0f-51017fa6ca3f", - "api_key_last_four_digits": "-KEY", - "request.model": "amazon.titan-embed-g1-text-02", - "vendor": "Bedrock", - "ingest_source": "Python", - "http.statusCode": 403, - "error.message": "The security token included in the request is invalid.", - "error.code": "UnrecognizedClientException", - }, -} diff --git a/tests/external_botocore/conftest.py b/tests/external_botocore/conftest.py index 8b19d3ce75..fb703c85e1 100644 --- a/tests/external_botocore/conftest.py +++ b/tests/external_botocore/conftest.py @@ -12,147 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json -import os -import re - -import pytest -from _mock_external_bedrock_server import ( - MockExternalBedrockServer, - extract_shortened_prompt, -) -from testing_support.fixtures import ( # noqa: F401, pylint: disable=W0611 +from testing_support.fixtures import ( # noqa: F401; pylint: disable=W0611 collector_agent_registration_fixture, collector_available_fixture, ) -from newrelic.api.time_trace import current_trace -from newrelic.api.transaction import current_transaction -from newrelic.common.object_wrapper import wrap_function_wrapper -from newrelic.common.package_version_utils import get_package_version_tuple - _default_settings = { "transaction_tracer.explain_threshold": 0.0, "transaction_tracer.transaction_threshold": 0.0, "transaction_tracer.stack_trace_threshold": 0.0, "debug.log_data_collector_payloads": True, "debug.record_transaction_failure": True, - "ml_insights_events.enabled": True, } + collector_agent_registration = collector_agent_registration_fixture( - app_name="Python Agent Test (external_botocore)", - default_settings=_default_settings, - linked_applications=["Python Agent Test (external_botocore)"], + app_name="Python Agent Test (external_botocore)", default_settings=_default_settings ) - - -# Bedrock Fixtures - -BEDROCK_AUDIT_LOG_FILE = os.path.join(os.path.realpath(os.path.dirname(__file__)), "bedrock_audit.log") -BEDROCK_AUDIT_LOG_CONTENTS = {} - - -@pytest.fixture(scope="session") -def bedrock_server(): - """ - This fixture will either create a mocked backend for testing purposes, or will - set up an audit log file to log responses of the real Bedrock backend to a file. - The behavior can be controlled by setting NEW_RELIC_TESTING_RECORD_BEDROCK_RESPONSES=1 as - an environment variable to run using the real Bedrock backend. (Default: mocking) - """ - import boto3 - - from newrelic.core.config import _environ_as_bool - - if get_package_version_tuple("botocore") < (1, 31, 57): - pytest.skip(reason="Bedrock Runtime not available.") - - if not _environ_as_bool("NEW_RELIC_TESTING_RECORD_BEDROCK_RESPONSES", False): - # Use mocked Bedrock backend and prerecorded responses - with MockExternalBedrockServer() as server: - client = boto3.client( # nosec - "bedrock-runtime", - "us-east-1", - endpoint_url="http://localhost:%d" % server.port, - aws_access_key_id="NOT-A-REAL-SECRET", - aws_secret_access_key="NOT-A-REAL-SECRET", - ) - - yield client - else: - # Use real Bedrock backend and record responses - assert ( - os.environ["AWS_ACCESS_KEY_ID"] and os.environ["AWS_SECRET_ACCESS_KEY"] - ), "AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are required." - - # Construct real client - client = boto3.client( - "bedrock-runtime", - "us-east-1", - ) - - # Apply function wrappers to record data - wrap_function_wrapper( - "botocore.endpoint", "Endpoint._do_get_response", wrap_botocore_endpoint_Endpoint__do_get_response - ) - yield client # Run tests - - # Write responses to audit log - bedrock_audit_log_contents = dict(sorted(BEDROCK_AUDIT_LOG_CONTENTS.items(), key=lambda i: (i[1][1], i[0]))) - with open(BEDROCK_AUDIT_LOG_FILE, "w") as audit_log_fp: - json.dump(bedrock_audit_log_contents, fp=audit_log_fp, indent=4) - - -# Intercept outgoing requests and log to file for mocking -RECORDED_HEADERS = set(["x-amzn-requestid", "x-amzn-errortype", "content-type"]) - - -def wrap_botocore_endpoint_Endpoint__do_get_response(wrapped, instance, args, kwargs): - request = bind__do_get_response(*args, **kwargs) - if not request: - return wrapped(*args, **kwargs) - - body = json.loads(request.body) - - match = re.search(r"/model/([0-9a-zA-Z.-]+)/", request.url) - model = match.group(1) - prompt = extract_shortened_prompt(body, model) - - # Send request - result = wrapped(*args, **kwargs) - - # Unpack response - success, exception = result - response = (success or exception)[0] - - # Clean up data - data = json.loads(response.content.decode("utf-8")) - headers = dict(response.headers.items()) - headers = dict( - filter( - lambda k: k[0].lower() in RECORDED_HEADERS or k[0].startswith("x-ratelimit"), - headers.items(), - ) - ) - status_code = response.status_code - - # Log response - BEDROCK_AUDIT_LOG_CONTENTS[prompt] = headers, status_code, data # Append response data to audit log - return result - - -def bind__do_get_response(request, operation_model, context): - return request - - -@pytest.fixture(scope="session") -def set_trace_info(): - def _set_trace_info(): - txn = current_transaction() - if txn: - txn._trace_id = "trace-id" - trace = current_trace() - if trace: - trace.guid = "span-id" - - return _set_trace_info diff --git a/tests/external_botocore/test_bedrock_chat_completion.py b/tests/external_botocore/test_bedrock_chat_completion.py deleted file mode 100644 index a1eb881cc1..0000000000 --- a/tests/external_botocore/test_bedrock_chat_completion.py +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright 2010 New Relic, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import botocore.exceptions - -import copy -import json -from io import BytesIO - -import pytest -from _test_bedrock_chat_completion import ( - chat_completion_expected_events, - chat_completion_payload_templates, - chat_completion_expected_client_errors, -) -from testing_support.fixtures import ( - dt_enabled, - override_application_settings, - reset_core_stats_engine, -) -from testing_support.validators.validate_ml_event_count import validate_ml_event_count -from testing_support.validators.validate_ml_events import validate_ml_events -from testing_support.validators.validate_span_events import validate_span_events -from testing_support.validators.validate_error_trace_attributes import ( - validate_error_trace_attributes, -) - -from newrelic.api.background_task import background_task -from newrelic.api.time_trace import current_trace -from newrelic.api.transaction import add_custom_attribute, current_transaction - -from newrelic.common.object_names import callable_name - -@pytest.fixture(scope="session", params=[False, True], ids=["Bytes", "Stream"]) -def is_file_payload(request): - return request.param - - -@pytest.fixture( - scope="module", - params=[ - "amazon.titan-text-express-v1", - "ai21.j2-mid-v1", - "anthropic.claude-instant-v1", - "cohere.command-text-v14", - ], -) -def model_id(request): - return request.param - - -@pytest.fixture(scope="module") -def exercise_model(bedrock_server, model_id, is_file_payload): - payload_template = chat_completion_payload_templates[model_id] - - def _exercise_model(prompt, temperature=0.7, max_tokens=100): - body = (payload_template % (prompt, temperature, max_tokens)).encode("utf-8") - if is_file_payload: - body = BytesIO(body) - - response = bedrock_server.invoke_model( - body=body, - modelId=model_id, - accept="application/json", - contentType="application/json", - ) - response_body = json.loads(response.get("body").read()) - assert response_body - - return _exercise_model - - -@pytest.fixture(scope="module") -def expected_events(model_id): - return chat_completion_expected_events[model_id] - - -@pytest.fixture(scope="module") -def expected_events_no_convo_id(model_id): - events = copy.deepcopy(chat_completion_expected_events[model_id]) - for event in events: - event[1]["conversation_id"] = "" - return events - - -@pytest.fixture(scope="module") -def expected_client_error(model_id): - return chat_completion_expected_client_errors[model_id] - - -_test_bedrock_chat_completion_prompt = "What is 212 degrees Fahrenheit converted to Celsius?" - - -@reset_core_stats_engine() -def test_bedrock_chat_completion_in_txn_with_convo_id(set_trace_info, exercise_model, expected_events): - @validate_ml_events(expected_events) - # One summary event, one user message, and one response message from the assistant - @validate_ml_event_count(count=3) - # @validate_transaction_metrics( - # name="test_bedrock_chat_completion_in_txn_with_convo_id", - # custom_metrics=[ - # ("Python/ML/OpenAI/%s" % openai.__version__, 1), - # ], - # background_task=True, - # ) - @background_task(name="test_bedrock_chat_completion_in_txn_with_convo_id") - def _test(): - set_trace_info() - add_custom_attribute("conversation_id", "my-awesome-id") - exercise_model(prompt=_test_bedrock_chat_completion_prompt, temperature=0.7, max_tokens=100) - - _test() - - -@reset_core_stats_engine() -def test_bedrock_chat_completion_in_txn_no_convo_id(set_trace_info, exercise_model, expected_events_no_convo_id): - @validate_ml_events(expected_events_no_convo_id) - # One summary event, one user message, and one response message from the assistant - @validate_ml_event_count(count=3) - # @validate_transaction_metrics( - # name="test_bedrock_chat_completion_in_txn_no_convo_id", - # custom_metrics=[ - # ("Python/ML/OpenAI/%s" % openai.__version__, 1), - # ], - # background_task=True, - # ) - @background_task(name="test_bedrock_chat_completion_in_txn_no_convo_id") - def _test(): - set_trace_info() - exercise_model(prompt=_test_bedrock_chat_completion_prompt, temperature=0.7, max_tokens=100) - - _test() - - -@reset_core_stats_engine() -@validate_ml_event_count(count=0) -def test_bedrock_chat_completion_outside_txn(set_trace_info, exercise_model): - set_trace_info() - add_custom_attribute("conversation_id", "my-awesome-id") - exercise_model(prompt=_test_bedrock_chat_completion_prompt, temperature=0.7, max_tokens=100) - - -disabled_ml_settings = {"machine_learning.enabled": False, "ml_insights_events.enabled": False} - - -@override_application_settings(disabled_ml_settings) -@reset_core_stats_engine() -@validate_ml_event_count(count=0) -# @validate_transaction_metrics( -# name="test_bedrock_chat_completion_disabled_settings", -# custom_metrics=[ -# ("Python/ML/OpenAI/%s" % openai.__version__, 1), -# ], -# background_task=True, -# ) -@background_task(name="test_bedrock_chat_completion_disabled_settings") -def test_bedrock_chat_completion_disabled_settings(set_trace_info, exercise_model): - set_trace_info() - exercise_model(prompt=_test_bedrock_chat_completion_prompt, temperature=0.7, max_tokens=100) - - - -_client_error = botocore.exceptions.ClientError -_client_error_name = callable_name(_client_error) - - -@validate_error_trace_attributes( - "botocore.errorfactory:ValidationException", - exact_attrs={ - "agent": {}, - "intrinsic": {}, - "user": { - "conversation_id": "my-awesome-id", - "request_id": "f4908827-3db9-4742-9103-2bbc34578b03", - "api_key_last_four_digits": "CRET", - "request.model": "does-not-exist", - "vendor": "Bedrock", - "ingest_source": "Python", - "http.statusCode": 400, - "error.message": "The provided model identifier is invalid.", - "error.code": "ValidationException", - }, - }, -) -@background_task() -def test_bedrock_chat_completion_error_invalid_model(bedrock_server, set_trace_info): - set_trace_info() - add_custom_attribute("conversation_id", "my-awesome-id") - with pytest.raises(_client_error): - bedrock_server.invoke_model( - body=b"{}", - modelId="does-not-exist", - accept="application/json", - contentType="application/json", - ) - - -@dt_enabled -@reset_core_stats_engine() -def test_bedrock_chat_completion_error_incorrect_access_key(monkeypatch, bedrock_server, exercise_model, set_trace_info, expected_client_error): - @validate_error_trace_attributes( - _client_error_name, - exact_attrs={ - "agent": {}, - "intrinsic": {}, - "user": expected_client_error, - }, - ) - @background_task() - def _test(): - monkeypatch.setattr(bedrock_server._request_signer._credentials, "access_key", "INVALID-ACCESS-KEY") - - with pytest.raises(_client_error): # not sure where this exception actually comes from - set_trace_info() - add_custom_attribute("conversation_id", "my-awesome-id") - exercise_model(prompt="Invalid Token", temperature=0.7, max_tokens=100) - - _test() diff --git a/tests/external_botocore/test_bedrock_embeddings.py b/tests/external_botocore/test_bedrock_embeddings.py deleted file mode 100644 index c374dd69c5..0000000000 --- a/tests/external_botocore/test_bedrock_embeddings.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright 2010 New Relic, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import botocore.exceptions - -import json -from io import BytesIO - -import pytest -from testing_support.fixtures import ( - dt_enabled, - override_application_settings, - reset_core_stats_engine, -) -from testing_support.validators.validate_ml_event_count import validate_ml_event_count -from testing_support.validators.validate_ml_events import validate_ml_events -from testing_support.validators.validate_transaction_metrics import ( - validate_transaction_metrics, -) -from testing_support.validators.validate_span_events import validate_span_events -from testing_support.validators.validate_error_trace_attributes import ( - validate_error_trace_attributes, -) - -from newrelic.api.background_task import background_task - -from _test_bedrock_embeddings import embedding_expected_events, embedding_payload_templates, embedding_expected_client_errors - -from newrelic.common.object_names import callable_name - - -disabled_ml_insights_settings = {"ml_insights_events.enabled": False} - - -@pytest.fixture(scope="session", params=[False, True], ids=["Bytes", "Stream"]) -def is_file_payload(request): - return request.param - - -@pytest.fixture( - scope="module", - params=[ - "amazon.titan-embed-text-v1", - "amazon.titan-embed-g1-text-02", - ], -) -def model_id(request): - return request.param - - -@pytest.fixture(scope="module") -def exercise_model(bedrock_server, model_id, is_file_payload): - payload_template = embedding_payload_templates[model_id] - - def _exercise_model(prompt, temperature=0.7, max_tokens=100): - body = (payload_template % prompt).encode("utf-8") - if is_file_payload: - body = BytesIO(body) - - response = bedrock_server.invoke_model( - body=body, - modelId=model_id, - accept="application/json", - contentType="application/json", - ) - response_body = json.loads(response.get("body").read()) - assert response_body - - return _exercise_model - - -@pytest.fixture(scope="module") -def expected_events(model_id): - return embedding_expected_events[model_id] - - -@pytest.fixture(scope="module") -def expected_client_error(model_id): - return embedding_expected_client_errors[model_id] - - -@reset_core_stats_engine() -def test_bedrock_embedding(set_trace_info, exercise_model, expected_events): - @validate_ml_events(expected_events) - @validate_ml_event_count(count=1) - # @validate_transaction_metrics( - # name="test_bedrock_embedding", - # custom_metrics=[ - # ("Python/ML/OpenAI/%s" % openai.__version__, 1), - # ], - # background_task=True, - # ) - @background_task(name="test_bedrock_embedding") - def _test(): - set_trace_info() - exercise_model(prompt="This is an embedding test.") - - _test() - - -@reset_core_stats_engine() -@validate_ml_event_count(count=0) -def test_bedrock_embedding_outside_txn(exercise_model): - exercise_model(prompt="This is an embedding test.") - - -_client_error = botocore.exceptions.ClientError -_client_error_name = callable_name(_client_error) - - -@override_application_settings(disabled_ml_insights_settings) -@reset_core_stats_engine() -@validate_ml_event_count(count=0) -# @validate_transaction_metrics( -# name="test_embeddings:test_bedrock_embedding_disabled_settings", -# custom_metrics=[ -# ("Python/ML/OpenAI/%s" % openai.__version__, 1), -# ], -# background_task=True, -# ) -@background_task() -def test_bedrock_embedding_disabled_settings(set_trace_info, exercise_model): - set_trace_info() - exercise_model(prompt="This is an embedding test.") - - -@dt_enabled -@reset_core_stats_engine() -def test_bedrock_embedding_error_incorrect_access_key(monkeypatch, bedrock_server, exercise_model, set_trace_info, expected_client_error): - @validate_error_trace_attributes( - _client_error_name, - exact_attrs={ - "agent": {}, - "intrinsic": {}, - "user": expected_client_error, - }, - ) - @background_task() - def _test(): - monkeypatch.setattr(bedrock_server._request_signer._credentials, "access_key", "INVALID-ACCESS-KEY") - - with pytest.raises(_client_error): # not sure where this exception actually comes from - set_trace_info() - exercise_model(prompt="Invalid Token", temperature=0.7, max_tokens=100) - - _test() From c0cb88b96f25192fe1ae9e2d5053f263a7c4691b Mon Sep 17 00:00:00 2001 From: hmstepanek Date: Sat, 11 Nov 2023 02:22:08 +0000 Subject: [PATCH 16/16] [Mega-Linter] Apply linters fixes --- newrelic/hooks/external_botocore.py | 1 - 1 file changed, 1 deletion(-) diff --git a/newrelic/hooks/external_botocore.py b/newrelic/hooks/external_botocore.py index 30c5d77088..2f2b8a1134 100644 --- a/newrelic/hooks/external_botocore.py +++ b/newrelic/hooks/external_botocore.py @@ -84,7 +84,6 @@ def _nr_endpoint_make_request_(wrapped, instance, args, kwargs): method = request_dict.get("method", None) with ExternalTrace(library="botocore", url=url, method=method, source=wrapped) as trace: - try: trace._add_agent_attribute("aws.operation", operation_model.name) except: