diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 5ebdbcceaa..41613e1783 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -26,10 +26,8 @@ RUN mkdir -p ${HOME} && \ useradd --uid ${UID} --gid ${GID} --home ${HOME} vscode && \ chown -R ${UID}:${GID} /home/vscode -# Fix pyenv installation -RUN echo 'eval "$(pyenv init -)"' >>${HOME}/.bashrc && \ - chown -R vscode:vscode "${PYENV_ROOT}" && \ - chown -R vscode:vscode "${PIPX_HOME}" +# Allow uv to download Python versions manually +ENV UV_PYTHON_DOWNLOADS=manual # Set user USER ${UID}:${GID} diff --git a/.github/.trivyignore b/.github/.trivyignore index a3242f56c5..1f9f11bd30 100644 --- a/.github/.trivyignore +++ b/.github/.trivyignore @@ -2,7 +2,7 @@ # Ignored Vulnerabilities # ======================= -# Accepting risk due to Python 3.7 and 3.8 support. +# Accepting risk due to Python 3.8 support. CVE-2025-50181 # Not relevant, only affects Pyodide diff --git a/codecov.yml b/.github/codecov.yml similarity index 96% rename from codecov.yml rename to .github/codecov.yml index dbe9088f0b..0d9f7afa22 100644 --- a/codecov.yml +++ b/.github/codecov.yml @@ -23,7 +23,6 @@ ignore: - "newrelic/hooks/database_psycopg2ct.py" - "newrelic/hooks/datastore_aioredis.py" - "newrelic/hooks/datastore_aredis.py" - - "newrelic/hooks/datastore_motor.py" - "newrelic/hooks/datastore_pyelasticsearch.py" - "newrelic/hooks/external_dropbox.py" - "newrelic/hooks/external_facepy.py" diff --git a/.github/containers/Dockerfile b/.github/containers/Dockerfile index c9e22a93cb..207332f3c0 100644 --- a/.github/containers/Dockerfile +++ b/.github/containers/Dockerfile @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM ubuntu:20.04 +FROM ubuntu:24.04 ARG TARGETPLATFORM # Install OS packages @@ -103,36 +103,34 @@ RUN ln -fs "/usr/share/zoneinfo/${TZ}" /etc/localtime && \ ENV HOME=/root WORKDIR "${HOME}" -# Install pyenv -ENV PYENV_ROOT="/usr/local/pyenv" -RUN curl https://pyenv.run/ | /bin/bash -ENV PATH="${PYENV_ROOT}/bin:${PYENV_ROOT}/shims:${PATH}" -RUN echo 'eval "$(pyenv init -)"' >>${HOME}/.bashrc && \ - pyenv update +# Install and configure uv +RUN curl -LsSf https://astral.sh/uv/install.sh | sh +ENV PATH="${HOME}/.local/bin:${PATH}" +ENV UV_PYTHON_PREFERENCE="only-managed" +ENV UV_LINK_MODE="copy" -# Install Python -ARG PYTHON_VERSIONS="3.12 3.11 3.10 3.9 3.8 3.7 3.13 pypy3.10-7.3.17" -COPY --chown=0:0 --chmod=755 ./install-python.sh /tmp/install-python.sh -RUN /tmp/install-python.sh && \ - rm /tmp/install-python.sh +# Install PyPy versions and rename shims +RUN uv python install -f pp3.11 pp3.10 +RUN mv "${HOME}/.local/bin/python3.11" "${HOME}/.local/bin/pypy3.11" && \ + mv "${HOME}/.local/bin/python3.10" "${HOME}/.local/bin/pypy3.10" -# Install dependencies for main python installation -COPY ./requirements.txt /tmp/requirements.txt -RUN pyenv exec pip install --upgrade -r /tmp/requirements.txt && \ - rm /tmp/requirements.txt +# Install CPython versions +RUN uv python install -f cp3.14 cp3.13 cp3.12 cp3.11 cp3.10 cp3.9 cp3.8 -# Install tools with pipx in isolated environments -COPY ./requirements-tools.txt /tmp/requirements-tools.txt -ENV PIPX_HOME="/opt/pipx" -ENV PIPX_BIN_DIR="${PIPX_HOME}/bin" -ENV PATH="${PIPX_BIN_DIR}:${PATH}" -RUN mkdir -p "${PIPX_BIN_DIR}" && \ - while IFS="" read -r line || [ -n "$line" ]; do \ - pyenv exec pipx install --global "${line}"; \ - done &2 - exit 1 - fi - - # Find all latest pyenv supported versions for requested python versions - PYENV_VERSIONS=() - for v in "${PYTHON_VERSIONS[@]}"; do - LATEST=$(pyenv latest -k "$v" || pyenv latest -k "$v-dev") - if [[ -z "$LATEST" ]]; then - echo "Latest version could not be found for ${v}." 1>&2 - exit 1 - fi - PYENV_VERSIONS+=($LATEST) - done - - # Install each specific version - for v in "${PYENV_VERSIONS[@]}"; do - pyenv install "$v" & - done - wait - - # Set all installed versions as globally accessible - pyenv global ${PYENV_VERSIONS[@]} -} - -main diff --git a/.github/containers/requirements-tools.txt b/.github/containers/requirements-tools.txt deleted file mode 100644 index 9b3032f8bb..0000000000 --- a/.github/containers/requirements-tools.txt +++ /dev/null @@ -1,2 +0,0 @@ -ruff -tox==4.23.2 \ No newline at end of file diff --git a/.github/containers/requirements-tox.txt b/.github/containers/requirements-tox.txt deleted file mode 100644 index 97fec6c257..0000000000 --- a/.github/containers/requirements-tox.txt +++ /dev/null @@ -1,2 +0,0 @@ -virtualenv==20.26.6 -pip==24.0 \ No newline at end of file diff --git a/.github/containers/requirements.txt b/.github/containers/requirements.txt deleted file mode 100644 index 3cb3f65a73..0000000000 --- a/.github/containers/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -pip==24.0 -pipx -setuptools -virtualenv==20.26.6 -wheel \ No newline at end of file diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml new file mode 100644 index 0000000000..b2718cb9d7 --- /dev/null +++ b/.github/workflows/benchmarks.yml @@ -0,0 +1,70 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +name: Benchmarks + +on: + pull_request: + +permissions: + contents: read + +concurrency: + group: ${{ github.ref || github.run_id }}-${{ github.workflow }} + cancel-in-progress: true + +jobs: + # Benchmarks + benchmark: + runs-on: ubuntu-24.04 + timeout-minutes: 30 + strategy: + matrix: + python: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] + + env: + ASV_FACTOR: "1.1" + BASE_SHA: ${{ github.event.pull_request.base.sha }} + + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # 5.0.0 + with: + fetch-depth: 0 + + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # 6.0.0 + with: + python-version: "${{ matrix.python }}" + + - name: Fetch git tags + run: | + git config --global --add safe.directory "$GITHUB_WORKSPACE" + git fetch --tags origin + + - name: Install Dependencies + run: | + pip install --upgrade pip + pip install asv virtualenv + + - name: Configure Machine Information + run: | + asv machine --yes + + - name: Run Benchmark + run: | + asv continuous \ + --show-stderr \ + --split \ + --factor "${ASV_FACTOR}" \ + --python=${{ matrix.python }} \ + "${BASE_SHA}" "${GITHUB_SHA}" diff --git a/.github/workflows/build-ci-image.yml b/.github/workflows/build-ci-image.yml index 147f633f7a..8d56ad35c9 100644 --- a/.github/workflows/build-ci-image.yml +++ b/.github/workflows/build-ci-image.yml @@ -75,7 +75,7 @@ jobs: - name: Login to GitHub Container Registry if: github.event_name != 'pull_request' - uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # 3.5.0 + uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # 3.6.0 with: registry: ghcr.io username: ${{ github.repository_owner }} @@ -89,8 +89,6 @@ jobs: platforms: ${{ matrix.platform }} labels: ${{ steps.meta.outputs.labels }} outputs: type=image,name=ghcr.io/${{ steps.image-name.outputs.IMAGE_NAME }},push-by-digest=true,name-canonical=true,push=true - cache-from: type=gha,scope=build-${{ matrix.cache_tag }} - cache-to: type=gha,scope=build-${{ matrix.cache_tag }} - name: Export Digest run: | @@ -124,7 +122,7 @@ jobs: - name: Login to GitHub Container Registry if: github.event_name != 'pull_request' - uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # 3.5.0 + uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # 3.6.0 with: registry: ghcr.io username: ${{ github.repository_owner }} diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 5ac5841135..6f6f8c2836 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -23,51 +23,6 @@ permissions: contents: read jobs: - build-wheels-legacy: - strategy: - fail-fast: false - matrix: - include: - # Linux glibc - - wheel: cp37-manylinux - os: ubuntu-24.04 - # Linux musllibc - - wheel: cp37-musllinux - os: ubuntu-24.04 - - name: Build wheels for ${{ matrix.wheel }} - runs-on: ${{ matrix.os }} - - steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # 5.0.0 - with: - persist-credentials: false - fetch-depth: 0 - - - name: Setup QEMU - if: runner.os == 'Linux' - uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # 3.6.0 - with: - platforms: arm64 - - - name: Build Wheels - uses: pypa/cibuildwheel@8d945475ac4b1aac4ae08b2fd27db9917158b6ce # 2.17.0 - env: - CIBW_PLATFORM: auto - CIBW_BUILD: "${{ matrix.wheel }}*" - CIBW_ARCHS_LINUX: x86_64 aarch64 - CIBW_ENVIRONMENT_LINUX: "LD_LIBRARY_PATH=/opt/rh/devtoolset-8/root/usr/lib64:/opt/rh/devtoolset-8/root/usr/lib:/opt/rh/devtoolset-8/root/usr/lib64/dyninst:/opt/rh/devtoolset-8/root/usr/lib/dyninst:/usr/local/lib64:/usr/local/lib" - CIBW_TEST_REQUIRES: pytest - CIBW_TEST_COMMAND_LINUX: "export PYTHONPATH={project}/tests; pytest {project}/tests/agent_unittests -vx" - - - name: Upload Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 - with: - name: ${{ github.job }}-${{ matrix.wheel }} - path: ./wheelhouse/*.whl - if-no-files-found: error - retention-days: 1 - build-wheels: strategy: fail-fast: false @@ -152,7 +107,7 @@ jobs: persist-credentials: false fetch-depth: 0 - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # 5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # 6.0.0 with: python-version: "3.13" @@ -189,7 +144,6 @@ jobs: id-token: write # IMPORTANT: this permission is mandatory for trusted publishing attestations: write needs: - - build-wheels-legacy - build-wheels - build-sdist @@ -226,19 +180,21 @@ jobs: - name: Upload Package to PyPI if: matrix.pypi-instance == 'pypi' - uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc # 1.12.4 + uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # 1.13.0 - name: Upload Package to TestPyPI if: matrix.pypi-instance == 'testpypi' - uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc # 1.12.4 + uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # 1.13.0 with: repository-url: https://test.pypi.org/legacy/ - name: Attest - uses: actions/attest-build-provenance@e8998f949152b193b063cb0ec769d69d929409be # 2.4.0 + uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # 3.0.0 id: attest with: - subject-path: ./dist/* + subject-path: | + ./dist/*.whl + ./dist/*.tar.gz - name: Wait for release to be available if: matrix.pypi-instance == 'pypi' diff --git a/.github/workflows/mega-linter.yml b/.github/workflows/mega-linter.yml index 3042624783..af88dcebbd 100644 --- a/.github/workflows/mega-linter.yml +++ b/.github/workflows/mega-linter.yml @@ -17,19 +17,16 @@ # More info at https://megalinter.io name: MegaLinter -on: - # Trigger mega-linter at every push. Action will also be visible from Pull Requests to main - # push: # Comment this line to trigger action only on pull-requests (not recommended if you don't pay for GH Actions) +on: # yamllint disable-line rule:truthy - false positive pull_request: permissions: contents: read -env: # Comment env block if you don't want to apply fixes - # Apply linter fixes configuration - APPLY_FIXES: all # When active, APPLY_FIXES must also be defined as environment variable (in github/workflows/mega-linter.yml or other CI tool) - APPLY_FIXES_EVENT: pull_request # Decide which event triggers application of fixes in a commit or a PR (pull_request, push, all) - APPLY_FIXES_MODE: commit # If APPLY_FIXES is used, defines if the fixes are directly committed (commit) or posted in a PR (pull_request) +env: + APPLY_FIXES: all + APPLY_FIXES_EVENT: pull_request + APPLY_FIXES_MODE: commit concurrency: group: ${{ github.ref || github.run_id }}-${{ github.workflow }} @@ -40,8 +37,8 @@ jobs: name: MegaLinter runs-on: ubuntu-24.04 permissions: - # Give the default GITHUB_TOKEN write permission to commit and push, comment issues & post new PR - # Remove the ones you do not need + # Give the default GITHUB_TOKEN write permission to commit and push, comment + # issues & post new PR; remove the ones you do not need contents: write issues: write pull-requests: write @@ -51,19 +48,19 @@ jobs: uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # 5.0.0 with: token: ${{ secrets.PAT || secrets.GITHUB_TOKEN }} + fetch-depth: 0 # Required for pushing commits to PRs # MegaLinter - name: MegaLinter id: ml - # You can override MegaLinter flavor used to have faster performances - # More info at https://megalinter.io/flavors/ - uses: oxsecurity/megalinter/flavors/python@e08c2b05e3dbc40af4c23f41172ef1e068a7d651 # 8.8.0 + uses: oxsecurity/megalinter/flavors/python@0dcbedd66ea456ba2d54fd350affaa15df8a0da3 # 9.0.1 env: # All available variables are described in documentation - # https://megalinter.io/configuration/ + # https://megalinter.io/latest/configuration/ VALIDATE_ALL_CODEBASE: "true" GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - # ADD YOUR CUSTOM ENV VARIABLES HERE OR DEFINE THEM IN A FILE .mega-linter.yml AT THE ROOT OF YOUR REPOSITORY + # ADD YOUR CUSTOM ENV VARIABLES HERE OR DEFINE THEM IN A FILE + # .mega-linter.yml AT THE ROOT OF YOUR REPOSITORY GITHUB_COMMENT_REPORTER: "true" PYTHON_RUFF_ARGUMENTS: --config pyproject.toml --config 'output-format="github"' PYTHON_RUFF_FORMAT_ARGUMENTS: --config pyproject.toml --config 'output-format="github"' @@ -74,19 +71,53 @@ jobs: uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 with: name: MegaLinter reports + include-hidden-files: "true" path: | megalinter-reports mega-linter.log - # Push new commit if applicable (for now works only on PR from same repository, not from forks) + # Set APPLY_FIXES_IF var for use in future steps + - name: Set APPLY_FIXES_IF var + run: | + printf 'APPLY_FIXES_IF=%s\n' "${{ + steps.ml.outputs.has_updated_sources == 1 && + ( + env.APPLY_FIXES_EVENT == 'all' || + env.APPLY_FIXES_EVENT == github.event_name + ) && + ( + github.event_name == 'push' || + github.event.pull_request.head.repo.full_name == github.repository + ) + }}" >> "${GITHUB_ENV}" + + # Set APPLY_FIXES_IF_* vars for use in future steps + - name: Set APPLY_FIXES_IF_* vars + run: | + printf 'APPLY_FIXES_IF_PR=%s\n' "${{ + env.APPLY_FIXES_IF == 'true' && + env.APPLY_FIXES_MODE == 'pull_request' + }}" >> "${GITHUB_ENV}" + printf 'APPLY_FIXES_IF_COMMIT=%s\n' "${{ + env.APPLY_FIXES_IF == 'true' && + env.APPLY_FIXES_MODE == 'commit' && + (!contains(fromJSON('["refs/heads/main", "refs/heads/master"]'), github.ref)) + }}" >> "${GITHUB_ENV}" + - name: Prepare commit - if: steps.ml.outputs.has_updated_sources == 1 && (env.APPLY_FIXES_EVENT == 'all' || env.APPLY_FIXES_EVENT == github.event_name) && env.APPLY_FIXES_MODE == 'commit' && github.ref != 'refs/heads/main' && (github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository) && !contains(github.event.head_commit.message, 'skip fix') + if: env.APPLY_FIXES_IF_COMMIT == 'true' run: sudo chown -Rc $UID .git/ + - name: Commit and push applied linter fixes - if: steps.ml.outputs.has_updated_sources == 1 && (env.APPLY_FIXES_EVENT == 'all' || env.APPLY_FIXES_EVENT == github.event_name) && env.APPLY_FIXES_MODE == 'commit' && github.ref != 'refs/heads/main' && (github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository) && !contains(github.event.head_commit.message, 'skip fix') uses: stefanzweifel/git-auto-commit-action@778341af668090896ca464160c2def5d1d1a3eb0 # 6.0.1 + if: env.APPLY_FIXES_IF_COMMIT == 'true' with: - branch: ${{ github.event.pull_request.head.ref || github.head_ref || github.ref }} + branch: >- + ${{ + github.event.pull_request.head.ref || + github.head_ref || + github.ref + }} commit_message: "[MegaLinter] Apply linters fixes" commit_user_name: newrelic-python-agent-team commit_user_email: 137356142+newrelic-python-agent-team@users.noreply.github.com diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 9291711bfb..2333b273cc 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -90,7 +90,7 @@ jobs: steps: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # 5.0.0 - - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # 5.6.0 + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # 6.0.0 with: python-version: "3.13" architecture: x64 @@ -108,7 +108,7 @@ jobs: coverage xml - name: Upload Coverage to Codecov - uses: codecov/codecov-action@fdcc8476540edceab3de004e990f80d881c6cc00 # 5.5.0 + uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # 5.5.1 with: files: coverage.xml fail_ci_if_error: true diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index f9bd9c4344..d5951057d6 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -39,7 +39,7 @@ jobs: - name: Run Trivy vulnerability scanner in repo mode if: ${{ github.event_name == 'pull_request' }} - uses: aquasecurity/trivy-action@dc5a429b52fcf669ce959baa2c2dd26090d2a6c4 # v0.32.0 + uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # v0.33.1 with: scan-type: "fs" ignore-unfixed: true @@ -50,7 +50,7 @@ jobs: - name: Run Trivy vulnerability scanner in repo mode if: ${{ github.event_name == 'schedule' }} - uses: aquasecurity/trivy-action@dc5a429b52fcf669ce959baa2c2dd26090d2a6c4 # v0.32.0 + uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # v0.33.1 with: scan-type: "fs" ignore-unfixed: true @@ -61,6 +61,6 @@ jobs: - name: Upload Trivy scan results to GitHub Security tab if: ${{ github.event_name == 'schedule' }} - uses: github/codeql-action/upload-sarif@3c3833e0f8c1c83d449a7478aa59c036a9165498 # 3.29.11 + uses: github/codeql-action/upload-sarif@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # 3.30.5 with: sarif_file: "trivy-results.sarif" diff --git a/.gitignore b/.gitignore index d4550713fe..4acad5ce17 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,9 @@ # Linter megalinter-reports/ +# Benchmarks +.asv/ + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] @@ -32,7 +35,10 @@ share/python-wheels/ .installed.cfg *.egg MANIFEST +_version.py version.txt +version.py +_version.py # PyInstaller # Usually these files are written by a python script from a template diff --git a/.mega-linter.yml b/.mega-linter.yml index 68027ec64a..ef6d98461c 100644 --- a/.mega-linter.yml +++ b/.mega-linter.yml @@ -20,6 +20,7 @@ DEFAULT_BRANCH: main # Usually master or main SHOW_ELAPSED_TIME: true FILEIO_REPORTER: false PRINT_ALPACA: false +FLAVOR_SUGGESTIONS: false CLEAR_REPORT_FOLDER: true VALIDATE_ALL_CODEBASE: true IGNORE_GITIGNORED_FILES: true diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 53b3ac49ec..89c61da1be 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,7 +29,7 @@ default_install_hook_types: repos: - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.12.5 + rev: v0.13.1 hooks: # Run the linter. - id: ruff-check @@ -40,7 +40,7 @@ repos: stages: [pre-push] - repo: https://github.com/google/addlicense - rev: 55a521bf81c24480094950caa3566548fa63875e + rev: v1.2.0 hooks: - id: addlicense args: diff --git a/THIRD_PARTY_NOTICES.md b/THIRD_PARTY_NOTICES.md index 7aa68f22dd..2aceaea9fa 100644 --- a/THIRD_PARTY_NOTICES.md +++ b/THIRD_PARTY_NOTICES.md @@ -35,15 +35,6 @@ Distributed under the following license(s): * [The Apache License, Version 2.0 License](https://opensource.org/license/apache-2-0/) -## [time.monotonic](newrelic/common/_monotonic.c) - -Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Python Software Foundation; All Rights Reserved - -Distributed under the following license(s): - -* [Python Software Foundation](https://docs.python.org/3/license.html) - - ## [urllib3](https://pypi.org/project/urllib3) Copyright (c) 2008-2019 Andrey Petrov and contributors (see CONTRIBUTORS.txt) diff --git a/asv.conf.json b/asv.conf.json new file mode 100644 index 0000000000..5826289a5e --- /dev/null +++ b/asv.conf.json @@ -0,0 +1,100 @@ +{ + "version": 1, // The version of the config file format. + "project": "newrelic", + "project_url": "https://github.com/newrelic/newrelic-python-agent", + "show_commit_url": "https://github.com/newrelic/newrelic-python-agent/commit/", + "repo": ".", + "environment_type": "virtualenv", + "install_timeout": 120, + "pythons": ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"], + "benchmark_dir": "tests/agent_benchmarks", + "env_dir": ".asv/env", + "results_dir": ".asv/results", + "html_dir": ".asv/html", + "regressions_thresholds": { + ".*": 0.2, // Threshold of 20% + }, + + // The matrix of dependencies to test. Each key of the "req" + // requirements dictionary is the name of a package (in PyPI) and + // the values are version numbers. An empty list or empty string + // indicates to just test against the default (latest) + // version. null indicates that the package is to not be + // installed. If the package to be tested is only available from + // PyPi, and the 'environment_type' is conda, then you can preface + // the package name by 'pip+', and the package will be installed + // via pip (with all the conda available packages installed first, + // followed by the pip installed packages). + // + // The ``@env`` and ``@env_nobuild`` keys contain the matrix of + // environment variables to pass to build and benchmark commands. + // An environment will be created for every combination of the + // cartesian product of the "@env" variables in this matrix. + // Variables in "@env_nobuild" will be passed to every environment + // during the benchmark phase, but will not trigger creation of + // new environments. A value of ``null`` means that the variable + // will not be set for the current combination. + // + // "matrix": { + // "req": { + // "numpy": ["1.6", "1.7"], + // "six": ["", null], // test with and without six installed + // "pip+emcee": [""] // emcee is only available for install with pip. + // }, + // "env": {"ENV_VAR_1": ["val1", "val2"]}, + // "env_nobuild": {"ENV_VAR_2": ["val3", null]}, + // }, + + // Combinations of libraries/python versions can be excluded/included + // from the set to test. Each entry is a dictionary containing additional + // key-value pairs to include/exclude. + // + // An exclude entry excludes entries where all values match. The + // values are regexps that should match the whole string. + // + // An include entry adds an environment. Only the packages listed + // are installed. The 'python' key is required. The exclude rules + // do not apply to includes. + // + // In addition to package names, the following keys are available: + // + // - python + // Python version, as in the *pythons* variable above. + // - environment_type + // Environment type, as above. + // - sys_platform + // Platform, as in sys.platform. Possible values for the common + // cases: 'linux2', 'win32', 'cygwin', 'darwin'. + // - req + // Required packages + // - env + // Environment variables + // - env_nobuild + // Non-build environment variables + // + // "exclude": [ + // {"python": "3.2", "sys_platform": "win32"}, // skip py3.2 on windows + // {"environment_type": "conda", "req": {"six": null}}, // don't run without six on conda + // {"env": {"ENV_VAR_1": "val2"}}, // skip val2 for ENV_VAR_1 + // ], + // + // "include": [ + // // additional env for python3.12 + // {"python": "3.12", "req": {"numpy": "1.26"}, "env_nobuild": {"FOO": "123"}}, + // // additional env if run on windows+conda + // {"platform": "win32", "environment_type": "conda", "python": "3.12", "req": {"libpython": ""}}, + // ], + + // The commits after which the regression search in `asv publish` + // should start looking for regressions. Dictionary whose keys are + // regexps matching to benchmark names, and values corresponding to + // the commit (exclusive) after which to start looking for + // regressions. The default is to start from the first commit + // with results. If the commit is `null`, regression detection is + // skipped for the matching benchmark. + // + // "regressions_first_commits": { + // "some_benchmark": "352cdf", // Consider regressions only after this commit + // "another_benchmark": null, // Skip regression detection altogether + // }, +} diff --git a/newrelic/__init__.py b/newrelic/__init__.py index c27a5e98e7..00d91bfd95 100644 --- a/newrelic/__init__.py +++ b/newrelic/__init__.py @@ -12,14 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from pathlib import Path - -VERSION_FILE = Path(__file__).parent / "version.txt" - try: - with VERSION_FILE.open() as f: - version = f.read() -except Exception: - version = "0.0.0" + from newrelic._version import __version__, __version_tuple__, version, version_tuple +except ImportError: # pragma: no cover + __version__ = version = "0.0.0" # pragma: no cover + __version_tuple__ = version_tuple = (0, 0, 0) # pragma: no cover -version_info = list(map(int, version.split("."))) +# Older compatibility attribute +version_info = version_tuple diff --git a/newrelic/agent.py b/newrelic/agent.py index 292a76fd2e..af98f03c55 100644 --- a/newrelic/agent.py +++ b/newrelic/agent.py @@ -15,62 +15,9 @@ from newrelic.api.application import application_instance as __application from newrelic.api.application import application_settings as __application_settings from newrelic.api.application import register_application as __register_application -from newrelic.api.log import NewRelicContextFormatter as __NewRelicContextFormatter -from newrelic.api.time_trace import add_custom_span_attribute as __add_custom_span_attribute -from newrelic.api.time_trace import current_trace as __current_trace -from newrelic.api.time_trace import get_linking_metadata as __get_linking_metadata -from newrelic.api.time_trace import notice_error as __notice_error -from newrelic.api.time_trace import record_exception as __record_exception -from newrelic.api.transaction import accept_distributed_trace_headers as __accept_distributed_trace_headers -from newrelic.api.transaction import accept_distributed_trace_payload as __accept_distributed_trace_payload -from newrelic.api.transaction import add_custom_attribute as __add_custom_attribute -from newrelic.api.transaction import add_custom_attributes as __add_custom_attributes -from newrelic.api.transaction import add_custom_parameter as __add_custom_parameter -from newrelic.api.transaction import add_custom_parameters as __add_custom_parameters -from newrelic.api.transaction import add_framework_info as __add_framework_info -from newrelic.api.transaction import capture_request_params as __capture_request_params -from newrelic.api.transaction import create_distributed_trace_payload as __create_distributed_trace_payload -from newrelic.api.transaction import current_span_id as __current_span_id -from newrelic.api.transaction import current_trace_id as __current_trace_id -from newrelic.api.transaction import current_transaction as __current_transaction -from newrelic.api.transaction import disable_browser_autorum as __disable_browser_autorum -from newrelic.api.transaction import end_of_transaction as __end_of_transaction -from newrelic.api.transaction import get_browser_timing_footer as __get_browser_timing_footer -from newrelic.api.transaction import get_browser_timing_header as __get_browser_timing_header -from newrelic.api.transaction import ignore_transaction as __ignore_transaction -from newrelic.api.transaction import insert_distributed_trace_headers as __insert_distributed_trace_headers -from newrelic.api.transaction import record_custom_event as __record_custom_event -from newrelic.api.transaction import record_custom_metric as __record_custom_metric -from newrelic.api.transaction import record_custom_metrics as __record_custom_metrics -from newrelic.api.transaction import record_log_event as __record_log_event -from newrelic.api.transaction import record_ml_event as __record_ml_event -from newrelic.api.transaction import set_background_task as __set_background_task -from newrelic.api.transaction import set_transaction_name as __set_transaction_name -from newrelic.api.transaction import suppress_apdex_metric as __suppress_apdex_metric -from newrelic.api.transaction import suppress_transaction_trace as __suppress_transaction_trace -from newrelic.api.wsgi_application import WSGIApplicationWrapper as __WSGIApplicationWrapper -from newrelic.api.wsgi_application import wrap_wsgi_application as __wrap_wsgi_application -from newrelic.api.wsgi_application import wsgi_application as __wsgi_application -from newrelic.config import extra_settings as __extra_settings -from newrelic.config import initialize as __initialize -from newrelic.core.agent import register_data_source as __register_data_source -from newrelic.core.agent import shutdown_agent as __shutdown_agent -from newrelic.core.config import global_settings as __global_settings -from newrelic.samplers.decorators import data_source_factory as __data_source_factory -from newrelic.samplers.decorators import data_source_generator as __data_source_generator - -try: - from newrelic.api.asgi_application import ASGIApplicationWrapper as __ASGIApplicationWrapper - from newrelic.api.asgi_application import asgi_application as __asgi_application - from newrelic.api.asgi_application import wrap_asgi_application as __wrap_asgi_application -except SyntaxError: - - def __asgi_application(*args, **kwargs): - pass - - __ASGIApplicationWrapper = __asgi_application - __wrap_asgi_application = __asgi_application - +from newrelic.api.asgi_application import ASGIApplicationWrapper as __ASGIApplicationWrapper +from newrelic.api.asgi_application import asgi_application as __asgi_application +from newrelic.api.asgi_application import wrap_asgi_application as __wrap_asgi_application from newrelic.api.background_task import BackgroundTask as __BackgroundTask from newrelic.api.background_task import BackgroundTaskWrapper as __BackgroundTaskWrapper from newrelic.api.background_task import background_task as __background_task @@ -101,9 +48,8 @@ def __asgi_application(*args, **kwargs): from newrelic.api.generator_trace import wrap_generator_trace as __wrap_generator_trace from newrelic.api.html_insertion import insert_html_snippet as __insert_html_snippet from newrelic.api.html_insertion import verify_body_exists as __verify_body_exists -from newrelic.api.lambda_handler import LambdaHandlerWrapper as __LambdaHandlerWrapper -from newrelic.api.lambda_handler import lambda_handler as __lambda_handler from newrelic.api.llm_custom_attributes import WithLlmCustomAttributes as __WithLlmCustomAttributes +from newrelic.api.log import NewRelicContextFormatter as __NewRelicContextFormatter from newrelic.api.message_trace import MessageTrace as __MessageTrace from newrelic.api.message_trace import MessageTraceWrapper as __MessageTraceWrapper from newrelic.api.message_trace import message_trace as __message_trace @@ -120,7 +66,33 @@ def __asgi_application(*args, **kwargs): from newrelic.api.profile_trace import wrap_profile_trace as __wrap_profile_trace from newrelic.api.settings import set_error_group_callback as __set_error_group_callback from newrelic.api.supportability import wrap_api_call as __wrap_api_call +from newrelic.api.time_trace import add_custom_span_attribute as __add_custom_span_attribute +from newrelic.api.time_trace import current_trace as __current_trace +from newrelic.api.time_trace import get_linking_metadata as __get_linking_metadata +from newrelic.api.time_trace import notice_error as __notice_error +from newrelic.api.transaction import accept_distributed_trace_headers as __accept_distributed_trace_headers +from newrelic.api.transaction import add_custom_attribute as __add_custom_attribute +from newrelic.api.transaction import add_custom_attributes as __add_custom_attributes +from newrelic.api.transaction import add_framework_info as __add_framework_info +from newrelic.api.transaction import capture_request_params as __capture_request_params +from newrelic.api.transaction import current_span_id as __current_span_id +from newrelic.api.transaction import current_trace_id as __current_trace_id +from newrelic.api.transaction import current_transaction as __current_transaction +from newrelic.api.transaction import disable_browser_autorum as __disable_browser_autorum +from newrelic.api.transaction import end_of_transaction as __end_of_transaction +from newrelic.api.transaction import get_browser_timing_header as __get_browser_timing_header +from newrelic.api.transaction import ignore_transaction as __ignore_transaction +from newrelic.api.transaction import insert_distributed_trace_headers as __insert_distributed_trace_headers +from newrelic.api.transaction import record_custom_event as __record_custom_event +from newrelic.api.transaction import record_custom_metric as __record_custom_metric +from newrelic.api.transaction import record_custom_metrics as __record_custom_metrics +from newrelic.api.transaction import record_log_event as __record_log_event +from newrelic.api.transaction import record_ml_event as __record_ml_event +from newrelic.api.transaction import set_background_task as __set_background_task +from newrelic.api.transaction import set_transaction_name as __set_transaction_name from newrelic.api.transaction import set_user_id as __set_user_id +from newrelic.api.transaction import suppress_apdex_metric as __suppress_apdex_metric +from newrelic.api.transaction import suppress_transaction_trace as __suppress_transaction_trace from newrelic.api.transaction_name import TransactionNameWrapper as __TransactionNameWrapper from newrelic.api.transaction_name import transaction_name as __transaction_name from newrelic.api.transaction_name import wrap_transaction_name as __wrap_transaction_name @@ -128,12 +100,14 @@ def __asgi_application(*args, **kwargs): from newrelic.api.web_transaction import WebTransactionWrapper as __WebTransactionWrapper from newrelic.api.web_transaction import web_transaction as __web_transaction from newrelic.api.web_transaction import wrap_web_transaction as __wrap_web_transaction +from newrelic.api.wsgi_application import WSGIApplicationWrapper as __WSGIApplicationWrapper +from newrelic.api.wsgi_application import wrap_wsgi_application as __wrap_wsgi_application +from newrelic.api.wsgi_application import wsgi_application as __wsgi_application from newrelic.common.object_names import callable_name as __callable_name from newrelic.common.object_wrapper import CallableObjectProxy as __CallableObjectProxy from newrelic.common.object_wrapper import FunctionWrapper as __FunctionWrapper from newrelic.common.object_wrapper import InFunctionWrapper as __InFunctionWrapper from newrelic.common.object_wrapper import ObjectProxy as __ObjectProxy -from newrelic.common.object_wrapper import ObjectWrapper as __ObjectWrapper from newrelic.common.object_wrapper import OutFunctionWrapper as __OutFunctionWrapper from newrelic.common.object_wrapper import PostFunctionWrapper as __PostFunctionWrapper from newrelic.common.object_wrapper import PreFunctionWrapper as __PreFunctionWrapper @@ -152,6 +126,13 @@ def __asgi_application(*args, **kwargs): from newrelic.common.object_wrapper import wrap_out_function as __wrap_out_function from newrelic.common.object_wrapper import wrap_post_function as __wrap_post_function from newrelic.common.object_wrapper import wrap_pre_function as __wrap_pre_function +from newrelic.config import extra_settings as __extra_settings +from newrelic.config import initialize as __initialize +from newrelic.core.agent import register_data_source as __register_data_source +from newrelic.core.agent import shutdown_agent as __shutdown_agent +from newrelic.core.config import global_settings as __global_settings +from newrelic.samplers.decorators import data_source_factory as __data_source_factory +from newrelic.samplers.decorators import data_source_generator as __data_source_generator # EXPERIMENTAL - Generator traces are currently experimental and may not # exist in this form in future versions of the agent. @@ -178,15 +159,11 @@ def __asgi_application(*args, **kwargs): ignore_transaction = __wrap_api_call(__ignore_transaction, "ignore_transaction") suppress_apdex_metric = __wrap_api_call(__suppress_apdex_metric, "suppress_apdex_metric") capture_request_params = __wrap_api_call(__capture_request_params, "capture_request_params") -add_custom_parameter = __wrap_api_call(__add_custom_parameter, "add_custom_parameter") -add_custom_parameters = __wrap_api_call(__add_custom_parameters, "add_custom_parameters") add_custom_attribute = __wrap_api_call(__add_custom_attribute, "add_custom_attribute") add_custom_attributes = __wrap_api_call(__add_custom_attributes, "add_custom_attributes") add_framework_info = __wrap_api_call(__add_framework_info, "add_framework_info") -record_exception = __wrap_api_call(__record_exception, "record_exception") notice_error = __wrap_api_call(__notice_error, "notice_error") get_browser_timing_header = __wrap_api_call(__get_browser_timing_header, "get_browser_timing_header") -get_browser_timing_footer = __wrap_api_call(__get_browser_timing_footer, "get_browser_timing_footer") disable_browser_autorum = __wrap_api_call(__disable_browser_autorum, "disable_browser_autorum") suppress_transaction_trace = __wrap_api_call(__suppress_transaction_trace, "suppress_transaction_trace") record_custom_metric = __wrap_api_call(__record_custom_metric, "record_custom_metric") @@ -195,12 +172,6 @@ def __asgi_application(*args, **kwargs): record_log_event = __wrap_api_call(__record_log_event, "record_log_event") record_ml_event = __wrap_api_call(__record_ml_event, "record_ml_event") WithLlmCustomAttributes = __wrap_api_call(__WithLlmCustomAttributes, "WithLlmCustomAttributes") -accept_distributed_trace_payload = __wrap_api_call( - __accept_distributed_trace_payload, "accept_distributed_trace_payload" -) -create_distributed_trace_payload = __wrap_api_call( - __create_distributed_trace_payload, "create_distributed_trace_payload" -) accept_distributed_trace_headers = __wrap_api_call( __accept_distributed_trace_headers, "accept_distributed_trace_headers" ) @@ -223,8 +194,6 @@ def __asgi_application(*args, **kwargs): BackgroundTask = __wrap_api_call(__BackgroundTask, "BackgroundTask") BackgroundTaskWrapper = __wrap_api_call(__BackgroundTaskWrapper, "BackgroundTaskWrapper") wrap_background_task = __wrap_api_call(__wrap_background_task, "wrap_background_task") -LambdaHandlerWrapper = __wrap_api_call(__LambdaHandlerWrapper, "LambdaHandlerWrapper") -lambda_handler = __wrap_api_call(__lambda_handler, "lambda_handler") NewRelicContextFormatter = __wrap_api_call(__NewRelicContextFormatter, "NewRelicContextFormatter") transaction_name = __wrap_api_call(__transaction_name, "transaction_name") TransactionNameWrapper = __wrap_api_call(__TransactionNameWrapper, "TransactionNameWrapper") @@ -275,7 +244,6 @@ def __asgi_application(*args, **kwargs): function_wrapper = __wrap_api_call(__function_wrapper, "function_wrapper") wrap_function_wrapper = __wrap_api_call(__wrap_function_wrapper, "wrap_function_wrapper") patch_function_wrapper = __wrap_api_call(__patch_function_wrapper, "patch_function_wrapper") -ObjectWrapper = __wrap_api_call(__ObjectWrapper, "ObjectWrapper") pre_function = __wrap_api_call(__pre_function, "pre_function") PreFunctionWrapper = __wrap_api_call(__PreFunctionWrapper, "PreFunctionWrapper") wrap_pre_function = __wrap_api_call(__wrap_pre_function, "wrap_pre_function") diff --git a/newrelic/api/application.py b/newrelic/api/application.py index 46f38967e7..9aa6d7b6b8 100644 --- a/newrelic/api/application.py +++ b/newrelic/api/application.py @@ -13,7 +13,6 @@ # limitations under the License. import threading -import warnings import newrelic.api.import_hook import newrelic.core.agent @@ -110,16 +109,6 @@ def linked_applications(self): def link_to_application(self, name): self._linked[name] = True - def record_exception(self, exc=None, value=None, tb=None, params=None, ignore_errors=None): - # Deprecation Warning - warnings.warn( - ("The record_exception function is deprecated. Please use the new api named notice_error instead."), - DeprecationWarning, - stacklevel=2, - ) - - self.notice_error(error=(exc, value, tb), attributes=params, ignore=ignore_errors) - def notice_error(self, error=None, attributes=None, expected=None, ignore=None, status_code=None): if not self.active: return diff --git a/newrelic/api/datastore_trace.py b/newrelic/api/datastore_trace.py index d5a61ab18a..4d3a0db0ad 100644 --- a/newrelic/api/datastore_trace.py +++ b/newrelic/api/datastore_trace.py @@ -17,6 +17,7 @@ from newrelic.api.time_trace import TimeTrace, current_trace from newrelic.common.async_wrapper import async_wrapper as get_async_wrapper from newrelic.common.object_wrapper import FunctionWrapper, wrap_object +from newrelic.core.config import global_settings from newrelic.core.datastore_node import DatastoreNode @@ -86,7 +87,8 @@ def __enter__(self): self.port_path_or_id = transaction._intern_string(self.port_path_or_id) self.database_name = transaction._intern_string(self.database_name) - datastore_tracer_settings = transaction.settings.datastore_tracer + settings = transaction.settings or global_settings() + datastore_tracer_settings = settings.datastore_tracer self.instance_reporting_enabled = datastore_tracer_settings.instance_reporting.enabled self.database_name_enabled = datastore_tracer_settings.database_name_reporting.enabled return result diff --git a/newrelic/api/error_trace.py b/newrelic/api/error_trace.py index b67261d90a..db63c54316 100644 --- a/newrelic/api/error_trace.py +++ b/newrelic/api/error_trace.py @@ -13,31 +13,21 @@ # limitations under the License. import functools -import warnings from newrelic.api.time_trace import current_trace, notice_error from newrelic.common.object_wrapper import FunctionWrapper, wrap_object class ErrorTrace: - def __init__(self, ignore_errors=None, ignore=None, expected=None, status_code=None, parent=None): - if ignore_errors is None: - ignore_errors = [] + def __init__(self, ignore=None, expected=None, status_code=None, parent=None): if parent is None: parent = current_trace() self._transaction = parent and parent.transaction - self._ignore = ignore if ignore is not None else ignore_errors + self._ignore = ignore self._expected = expected self._status_code = status_code - if ignore_errors: - warnings.warn( - ("The ignore_errors argument is deprecated. Please use the new ignore argument instead."), - DeprecationWarning, - stacklevel=2, - ) - def __enter__(self): return self @@ -53,33 +43,22 @@ def __exit__(self, exc, value, tb): ) -def ErrorTraceWrapper(wrapped, ignore_errors=None, ignore=None, expected=None, status_code=None): - if ignore_errors is None: - ignore_errors = [] - +def ErrorTraceWrapper(wrapped, ignore=None, expected=None, status_code=None): def wrapper(wrapped, instance, args, kwargs): parent = current_trace() if parent is None: return wrapped(*args, **kwargs) - with ErrorTrace(ignore_errors, ignore, expected, status_code, parent=parent): + with ErrorTrace(ignore, expected, status_code, parent=parent): return wrapped(*args, **kwargs) return FunctionWrapper(wrapped, wrapper) -def error_trace(ignore_errors=None, ignore=None, expected=None, status_code=None): - if ignore_errors is None: - ignore_errors = [] - - return functools.partial( - ErrorTraceWrapper, ignore_errors=ignore_errors, ignore=ignore, expected=expected, status_code=status_code - ) - +def error_trace(ignore=None, expected=None, status_code=None): + return functools.partial(ErrorTraceWrapper, ignore=ignore, expected=expected, status_code=status_code) -def wrap_error_trace(module, object_path, ignore_errors=None, ignore=None, expected=None, status_code=None): - if ignore_errors is None: - ignore_errors = [] - wrap_object(module, object_path, ErrorTraceWrapper, (ignore_errors, ignore, expected, status_code)) +def wrap_error_trace(module, object_path, ignore=None, expected=None, status_code=None): + wrap_object(module, object_path, ErrorTraceWrapper, (ignore, expected, status_code)) diff --git a/newrelic/api/graphql_trace.py b/newrelic/api/graphql_trace.py index 8767d1a6e2..63b6e8c227 100644 --- a/newrelic/api/graphql_trace.py +++ b/newrelic/api/graphql_trace.py @@ -18,6 +18,7 @@ from newrelic.api.transaction import current_transaction from newrelic.common.async_wrapper import async_wrapper as get_async_wrapper from newrelic.common.object_wrapper import FunctionWrapper, wrap_object +from newrelic.core.config import global_settings from newrelic.core.graphql_node import GraphQLOperationNode, GraphQLResolverNode @@ -49,7 +50,7 @@ def formatted(self): transaction = current_transaction(active_only=False) # Record SQL settings - settings = transaction.settings + settings = transaction.settings or global_settings() tt = settings.transaction_tracer self.graphql_format = tt.record_sql diff --git a/newrelic/api/in_function.py b/newrelic/api/in_function.py deleted file mode 100644 index 88edb084bd..0000000000 --- a/newrelic/api/in_function.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2010 New Relic, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Use of these from this module will be deprecated. - -from newrelic.common.object_wrapper import InFunctionWrapper, in_function, wrap_in_function # noqa: F401 diff --git a/newrelic/api/lambda_handler.py b/newrelic/api/lambda_handler.py deleted file mode 100644 index 97897c6ae9..0000000000 --- a/newrelic/api/lambda_handler.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright 2010 New Relic, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools -import warnings - -from newrelic.api.application import application_instance -from newrelic.api.transaction import current_transaction -from newrelic.api.web_transaction import WebTransaction -from newrelic.common.object_wrapper import FunctionWrapper -from newrelic.core.attribute import truncate -from newrelic.core.config import global_settings - -COLD_START_RECORDED = False -MEGABYTE_IN_BYTES = 2**20 - - -def extract_event_source_arn(event): - try: - arn = event.get("streamArn") or event.get("deliveryStreamArn") - - if not arn: - record = event["Records"][0] - arn = record.get("eventSourceARN") or record.get("EventSubscriptionArn") or record["s3"]["bucket"]["arn"] - - return truncate(str(arn)) - except Exception: - pass - - -def _LambdaHandlerWrapper(wrapped, application=None, name=None, group=None): - def _nr_lambda_handler_wrapper_(wrapped, instance, args, kwargs): - # Check to see if any transaction is present, even an inactive - # one which has been marked to be ignored or which has been - # stopped already. - - transaction = current_transaction(active_only=False) - - if transaction: - return wrapped(*args, **kwargs) - - try: - event, context = args[:2] - except Exception: - return wrapped(*args, **kwargs) - - target_application = application - - # If application has an activate() method we assume it is an - # actual application. Do this rather than check type so that - # can easily mock it for testing. - - # FIXME Should this allow for multiple apps if a string. - - if not hasattr(application, "activate"): - target_application = application_instance(application) - - try: - request_method = event["httpMethod"] - request_path = event["path"] - headers = event["headers"] - query_params = event.get("multiValueQueryStringParameters") - background_task = False - except Exception: - request_method = None - request_path = None - headers = None - query_params = None - background_task = True - - transaction_name = name or getattr(context, "function_name", None) - - transaction = WebTransaction( - target_application, - transaction_name, - group=group, - request_method=request_method, - request_path=request_path, - headers=headers, - ) - - transaction.background_task = background_task - - request_id = getattr(context, "aws_request_id", None) - aws_arn = getattr(context, "invoked_function_arn", None) - event_source = extract_event_source_arn(event) - - if request_id: - transaction._add_agent_attribute("aws.requestId", request_id) - if aws_arn: - transaction._add_agent_attribute("aws.lambda.arn", aws_arn) - if event_source: - transaction._add_agent_attribute("aws.lambda.eventSource.arn", event_source) - - # COLD_START_RECORDED is initialized to "False" when the container - # first starts up, and will remain that way until the below lines - # of code are encountered during the first transaction after the cold - # start. We record this occurence on the transaction so that an - # attribute is created, and then set COLD_START_RECORDED to False so - # that the attribute is not created again during future invocations of - # this container. - - global COLD_START_RECORDED - if COLD_START_RECORDED is False: - transaction._add_agent_attribute("aws.lambda.coldStart", True) - COLD_START_RECORDED = True - - settings = global_settings() - if query_params and not settings.high_security: - try: - transaction._request_params.update(query_params) - except: - pass - - if not settings.aws_lambda_metadata and aws_arn: - settings.aws_lambda_metadata["arn"] = aws_arn - - with transaction: - result = wrapped(*args, **kwargs) - - if not background_task: - try: - status_code = result.get("statusCode") - response_headers = result.get("headers") - - try: - response_headers = response_headers.items() - except Exception: - response_headers = None - - transaction.process_response(status_code, response_headers) - except Exception: - pass - - return result - - return FunctionWrapper(wrapped, _nr_lambda_handler_wrapper_) - - -def LambdaHandlerWrapper(*args, **kwargs): - warnings.warn( - ( - "The LambdaHandlerWrapper API has been deprecated. Please use the " - "APIs provided in the newrelic-lambda package." - ), - DeprecationWarning, - stacklevel=2, - ) - - return _LambdaHandlerWrapper(*args, **kwargs) - - -def lambda_handler(application=None, name=None, group=None): - warnings.warn( - ("The lambda_handler API has been deprecated. Please use the APIs provided in the newrelic-lambda package."), - DeprecationWarning, - stacklevel=2, - ) - - return functools.partial(_LambdaHandlerWrapper, application=application, name=name, group=group) diff --git a/newrelic/api/log.py b/newrelic/api/log.py index eacb270de3..65c147a265 100644 --- a/newrelic/api/log.py +++ b/newrelic/api/log.py @@ -14,17 +14,13 @@ import json import logging -import re -import warnings from traceback import format_exception from newrelic.api.application import application_instance from newrelic.api.time_trace import get_linking_metadata from newrelic.api.transaction import current_transaction, record_log_event -from newrelic.common import agent_http from newrelic.common.encoding_utils import json_encode from newrelic.common.object_names import parse_exc_info -from newrelic.core.attribute import truncate from newrelic.core.config import global_settings, is_expected_error @@ -186,85 +182,3 @@ def emit(self, record): def filter_record_attributes(cls, record): record_attrs = vars(record) return {k: record_attrs[k] for k in record_attrs if k not in cls.IGNORED_LOG_RECORD_KEYS} - - -class NewRelicLogHandler(logging.Handler): - """ - Deprecated: Please use NewRelicLogForwardingHandler instead. - This is an experimental log handler provided by the community. Use with caution. - """ - - PATH = "/log/v1" - - def __init__( - self, - level=logging.INFO, - license_key=None, - host=None, - port=443, - proxy_scheme=None, - proxy_host=None, - proxy_user=None, - proxy_pass=None, - timeout=None, - ca_bundle_path=None, - disable_certificate_validation=False, - ): - warnings.warn( - "The contributed NewRelicLogHandler has been superseded by automatic instrumentation for " - "logging in the standard lib. If for some reason you need to manually configure a handler, " - "please use newrelic.api.log.NewRelicLogForwardingHandler to take advantage of all the " - "features included in application log forwarding such as proper batching.", - DeprecationWarning, - stacklevel=2, - ) - super().__init__(level=level) - self.license_key = license_key or self.settings.license_key - self.host = host or self.settings.host or self.default_host(self.license_key) - - self.client = agent_http.HttpClient( - host=host, - port=port, - proxy_scheme=proxy_scheme, - proxy_host=proxy_host, - proxy_user=proxy_user, - proxy_pass=proxy_pass, - timeout=timeout, - ca_bundle_path=ca_bundle_path, - disable_certificate_validation=disable_certificate_validation, - ) - - self.setFormatter(NewRelicContextFormatter()) - - @property - def settings(self): - transaction = current_transaction() - if transaction: - return transaction.settings - return global_settings() - - def emit(self, record): - try: - headers = {"Api-Key": self.license_key or "", "Content-Type": "application/json"} - payload = self.format(record).encode("utf-8") - with self.client: - status_code, response = self.client.send_request(path=self.PATH, headers=headers, payload=payload) - if status_code < 200 or status_code >= 300: - raise RuntimeError( - f"An unexpected HTTP response of {status_code!r} was received for request made to https://{self.client._host}:{int(self.client._port)}{self.PATH}.The response payload for the request was {truncate(response.decode('utf-8'), 1024)!r}. If this issue persists then please report this problem to New Relic support for further investigation." - ) - - except Exception: - self.handleError(record) - - def default_host(self, license_key): - if not license_key: - return "log-api.newrelic.com" - - region_aware_match = re.match("^(.+?)x", license_key) - if not region_aware_match: - return "log-api.newrelic.com" - - region = region_aware_match.group(1) - host = f"log-api.{region}.newrelic.com" - return host diff --git a/newrelic/api/object_wrapper.py b/newrelic/api/object_wrapper.py index 3f76d00e53..480940f476 100644 --- a/newrelic/api/object_wrapper.py +++ b/newrelic/api/object_wrapper.py @@ -12,12 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# These have been moved. They are retained here until all references to -# them are moved at which point will mark as deprecated to ensure users -# weren't using them directly. -from newrelic.common.object_names import callable_name # noqa: F401 -from newrelic.common.object_wrapper import ObjectWrapper, wrap_object # noqa: F401 - # From Python 3.X. In older Python versions it fails if attributes do # not exist and don't maintain a __wrapped__ attribute. diff --git a/newrelic/api/out_function.py b/newrelic/api/out_function.py deleted file mode 100644 index 7672cf93d8..0000000000 --- a/newrelic/api/out_function.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2010 New Relic, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Use of these from this module will be deprecated. - -from newrelic.common.object_wrapper import OutFunctionWrapper, out_function, wrap_out_function # noqa: F401 diff --git a/newrelic/api/post_function.py b/newrelic/api/post_function.py deleted file mode 100644 index 436b15f98e..0000000000 --- a/newrelic/api/post_function.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2010 New Relic, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Use of these from this module will be deprecated. - -from newrelic.common.object_wrapper import PostFunctionWrapper, post_function, wrap_post_function # noqa: F401 diff --git a/newrelic/api/time_trace.py b/newrelic/api/time_trace.py index bef6f04561..fd0f62fdef 100644 --- a/newrelic/api/time_trace.py +++ b/newrelic/api/time_trace.py @@ -18,7 +18,6 @@ import sys import time import traceback -import warnings from newrelic.api.settings import STRIP_EXCEPTION_MESSAGE from newrelic.common.object_names import parse_exc_info @@ -253,7 +252,7 @@ def _observe_exception(self, exc_info=None, ignore=None, expected=None, status_c if getattr(value, "_nr_ignored", None): return - module, name, fullnames, message_raw = parse_exc_info((exc, value, tb)) + _module, name, fullnames, message_raw = parse_exc_info((exc, value, tb)) fullname = fullnames[0] # In case message is in JSON format for OpenAI models @@ -449,16 +448,6 @@ def notice_error(self, error=None, attributes=None, expected=None, ignore=None, settings, fullname, message, is_expected, error_group_name, custom_params, self.guid, tb, source=source ) - def record_exception(self, exc_info=None, params=None, ignore_errors=None): - # Deprecation Warning - warnings.warn( - ("The record_exception function is deprecated. Please use the new api named notice_error instead."), - DeprecationWarning, - stacklevel=2, - ) - - self.notice_error(error=exc_info, attributes=params, ignore=ignore_errors) - def _add_agent_attribute(self, key, value): self.agent_attributes[key] = value @@ -515,7 +504,7 @@ def _complete_trace(self): exc_data = self.exc_data self.exc_data = (None, None, None) - # Observe errors on the span only if record_exception hasn't been + # Observe errors on the span only if notice_error hasn't been # called already if exc_data[0] and "error.class" not in self.agent_attributes: self._observe_exception(exc_data) @@ -696,17 +685,6 @@ def get_linking_metadata(application=None): return metadata -def record_exception(exc=None, value=None, tb=None, params=None, ignore_errors=None, application=None): - # Deprecation Warning - warnings.warn( - ("The record_exception function is deprecated. Please use the new api named notice_error instead."), - DeprecationWarning, - stacklevel=2, - ) - - notice_error(error=(exc, value, tb), attributes=params, ignore=ignore_errors, application=application) - - def notice_error(error=None, attributes=None, expected=None, ignore=None, status_code=None, application=None): if application is None: trace = current_trace() diff --git a/newrelic/api/transaction.py b/newrelic/api/transaction.py index 28d3a07638..d856982a1b 100644 --- a/newrelic/api/transaction.py +++ b/newrelic/api/transaction.py @@ -19,7 +19,6 @@ import sys import threading import time -import warnings import weakref from collections import OrderedDict @@ -286,6 +285,7 @@ def __init__(self, application, enabled=None, source=None): self.tracestate = "" self._priority = None self._sampled = None + self._traceparent_sampled = None self._distributed_trace_state = 0 @@ -333,13 +333,9 @@ def __init__(self, application, enabled=None, source=None): self.enabled = True if self._settings: - self._custom_events = SampledDataSet( - capacity=self._settings.event_harvest_config.harvest_limits.custom_event_data - ) + self._custom_events = SampledDataSet(capacity=self._settings.custom_insights_events.max_samples_stored) self._ml_events = SampledDataSet(capacity=self._settings.event_harvest_config.harvest_limits.ml_event_data) - self._log_events = SampledDataSet( - capacity=self._settings.event_harvest_config.harvest_limits.log_event_data - ) + self._log_events = SampledDataSet(capacity=self._settings.application_logging.forwarding.max_samples_stored) else: self._custom_events = SampledDataSet(capacity=CUSTOM_EVENT_RESERVOIR_SIZE) self._log_events = SampledDataSet(capacity=LOG_EVENT_RESERVOIR_SIZE) @@ -1004,16 +1000,36 @@ def _update_agent_attributes(self): def user_attributes(self): return create_attributes(self._custom_params, DST_ALL, self.attribute_filter) - def _compute_sampled_and_priority(self): + def sampling_algo_compute_sampled_and_priority(self): if self._priority is None: - # truncate priority field to 6 digits past the decimal + # Truncate priority field to 6 digits past the decimal. self._priority = float(f"{random.random():.6f}") # noqa: S311 - if self._sampled is None: self._sampled = self._application.compute_sampled() if self._sampled: self._priority += 1 + def _compute_sampled_and_priority(self): + if self._traceparent_sampled is None: + config = "default" # Use sampling algo. + elif self._traceparent_sampled: + setting_path = "distributed_tracing.sampler.remote_parent_sampled" + config = self.settings.distributed_tracing.sampler.remote_parent_sampled + else: # self._traceparent_sampled is False. + setting_path = "distributed_tracing.sampler.remote_parent_not_sampled" + config = self.settings.distributed_tracing.sampler.remote_parent_not_sampled + + if config == "always_on": + self._sampled = True + self._priority = 2.0 + elif config == "always_off": + self._sampled = False + self._priority = 0 + else: + if config != "default": + _logger.warning("%s=%s is not a recognized value. Using 'default' instead.", setting_path, config) + self.sampling_algo_compute_sampled_and_priority() + def _freeze_path(self): if self._frozen_path is None: self._name_priority = None @@ -1104,29 +1120,6 @@ def _create_distributed_trace_data(self): return data - def _create_distributed_trace_payload(self): - try: - data = self._create_distributed_trace_data() - if data is None: - return - payload = DistributedTracePayload(v=DistributedTracePayload.version, d=data) - except: - self._record_supportability("Supportability/DistributedTrace/CreatePayload/Exception") - else: - self._record_supportability("Supportability/DistributedTrace/CreatePayload/Success") - return payload - - def create_distributed_trace_payload(self): - warnings.warn( - ( - "The create_distributed_trace_payload API has been deprecated. " - "Please use the insert_distributed_trace_headers API." - ), - DeprecationWarning, - stacklevel=2, - ) - return self._create_distributed_trace_payload() - def _generate_distributed_trace_headers(self, data=None): try: data = data or self._create_distributed_trace_data() @@ -1242,19 +1235,6 @@ def _accept_distributed_trace_payload(self, payload, transport_type="HTTP"): self._record_supportability("Supportability/DistributedTrace/AcceptPayload/Exception") return False - def accept_distributed_trace_payload(self, *args, **kwargs): - warnings.warn( - ( - "The accept_distributed_trace_payload API has been deprecated. " - "Please use the accept_distributed_trace_headers API." - ), - DeprecationWarning, - stacklevel=2, - ) - if not self._can_accept_distributed_trace_headers(): - return False - return self._accept_distributed_trace_payload(*args, **kwargs) - def _accept_distributed_trace_data(self, data, transport_type): if transport_type not in DISTRIBUTED_TRACE_TRANSPORT_TYPES: transport_type = "Unknown" @@ -1348,12 +1328,16 @@ def accept_distributed_trace_headers(self, headers, transport_type="HTTP"): else: self._record_supportability("Supportability/TraceContext/TraceState/NoNrEntry") + self._traceparent_sampled = data.get("sa") self._accept_distributed_trace_data(data, transport_type) self._record_supportability("Supportability/TraceContext/Accept/Success") return True elif distributed_header: - distributed_header = ensure_str(distributed_header) return self._accept_distributed_trace_payload(distributed_header, transport_type) + else: + # Do not return anything, but still generate supportability + # metric for the lack of payload/distributed_header + self._accept_distributed_trace_payload(distributed_header, transport_type) def _process_incoming_cat_headers(self, encoded_cross_process_id, encoded_txn_header): settings = self._settings @@ -1599,18 +1583,6 @@ def record_log_event(self, message, level=None, timestamp=None, attributes=None, self._log_events.add(event, priority=priority) - # This function has been deprecated (and will be removed eventually) - # and therefore does not need to be included in coverage analysis - def record_exception(self, exc=None, value=None, tb=None, params=None, ignore_errors=None): # pragma: no cover - # Deprecation Warning - warnings.warn( - ("The record_exception function is deprecated. Please use the new api named notice_error instead."), - DeprecationWarning, - stacklevel=2, - ) - - self.notice_error(error=(exc, value, tb), attributes=params, ignore=ignore_errors) - def notice_error(self, error=None, attributes=None, expected=None, ignore=None, status_code=None): settings = self._settings @@ -1788,28 +1760,6 @@ def add_custom_attributes(self, items): return result - # This function has been deprecated (and will be removed eventually) - # and therefore does not need to be included in coverage analysis - def add_custom_parameter(self, name, value): # pragma: no cover - # Deprecation warning - warnings.warn( - ("The add_custom_parameter API has been deprecated. Please use the add_custom_attribute API."), - DeprecationWarning, - stacklevel=2, - ) - return self.add_custom_attribute(name, value) - - # This function has been deprecated (and will be removed eventually) - # and therefore does not need to be included in coverage analysis - def add_custom_parameters(self, items): # pragma: no cover - # Deprecation warning - warnings.warn( - ("The add_custom_parameters API has been deprecated. Please use the add_custom_attributes API."), - DeprecationWarning, - stacklevel=2, - ) - return self.add_custom_attributes(items) - def add_framework_info(self, name, version=None): if name: self._frameworks.add((name, version)) @@ -1910,30 +1860,6 @@ def add_custom_attributes(items): return False -# This function has been deprecated (and will be removed eventually) -# and therefore does not need to be included in coverage analysis -def add_custom_parameter(key, value): # pragma: no cover - # Deprecation warning - warnings.warn( - ("The add_custom_parameter API has been deprecated. Please use the add_custom_attribute API."), - DeprecationWarning, - stacklevel=2, - ) - return add_custom_attribute(key, value) - - -# This function has been deprecated (and will be removed eventually) -# and therefore does not need to be included in coverage analysis -def add_custom_parameters(items): # pragma: no cover - # Deprecation warning - warnings.warn( - ("The add_custom_parameters API has been deprecated. Please use the add_custom_attributes API."), - DeprecationWarning, - stacklevel=2, - ) - return add_custom_attributes(items) - - def set_user_id(user_id): transaction = current_transaction() @@ -1962,15 +1888,6 @@ def get_browser_timing_header(nonce=None): return "" -def get_browser_timing_footer(nonce=None): - warnings.warn( - "The get_browser_timing_footer function is deprecated. Please migrate to only using the get_browser_timing_header API instead.", - DeprecationWarning, - stacklevel=2, - ) - return "" - - def disable_browser_autorum(flag=True): transaction = current_transaction() if transaction: @@ -2144,25 +2061,12 @@ def record_log_event(message, level=None, timestamp=None, attributes=None, appli application.record_log_event(message, level, timestamp, attributes=attributes, priority=priority) -def accept_distributed_trace_payload(payload, transport_type="HTTP"): - transaction = current_transaction() - if transaction: - return transaction.accept_distributed_trace_payload(payload, transport_type) - return False - - def accept_distributed_trace_headers(headers, transport_type="HTTP"): transaction = current_transaction() if transaction: return transaction.accept_distributed_trace_headers(headers, transport_type) -def create_distributed_trace_payload(): - transaction = current_transaction() - if transaction: - return transaction.create_distributed_trace_payload() - - def insert_distributed_trace_headers(headers): transaction = current_transaction() if transaction: diff --git a/newrelic/api/web_transaction.py b/newrelic/api/web_transaction.py index 4c2b575b96..c305663273 100644 --- a/newrelic/api/web_transaction.py +++ b/newrelic/api/web_transaction.py @@ -71,11 +71,27 @@ def _parse_time_stamp(time_stamp): TRUE_VALUES = {"on", "true", "1"} FALSE_VALUES = {"off", "false", "0"} +DEPRECATED_ENVIRON_SETTINGS = ( + "newrelic.set_background_task", + "newrelic.suppress_apdex_metric", + "newrelic.suppress_transaction_trace", + "newrelic.capture_request_params", + "newrelic.disable_browser_autorum", +) + def _lookup_environ_setting(environ, name, default=False): if name not in environ: return default + # Check for deprecated WSGI environ dictionary setting + if name in DEPRECATED_ENVIRON_SETTINGS: + warnings.warn( + f"Environ setting '{name}' is deprecated and will be removed in a future release.", + DeprecationWarning, + stacklevel=2, + ) + flag = environ[name] if isinstance(flag, str): diff --git a/newrelic/common/_monotonic.c b/newrelic/common/_monotonic.c deleted file mode 100644 index 95f28f1a05..0000000000 --- a/newrelic/common/_monotonic.c +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Copyright 2010 New Relic, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * This file is a modified back port of the monotonic() function from - * Python 3.3. The original code was released under the Python Software - * Foundation License Version 2. - */ - -#include - -#include - -#if defined(__APPLE__) -#include -#include -#endif - -#ifndef PyVarObject_HEAD_INIT -#define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size, -#endif - -/* ------------------------------------------------------------------------- */ - -static PyObject *monotonic(PyObject *self, PyObject *args) -{ -#if defined(MS_WINDOWS) - static ULONGLONG (*GetTickCount64) (void) = NULL; - static ULONGLONG (CALLBACK *Py_GetTickCount64)(void); - static int has_getickcount64 = -1; - double result; - - if (has_getickcount64 == -1) { - /* GetTickCount64() was added to Windows Vista */ - if (winver.dwMajorVersion >= 6) { - HINSTANCE hKernel32; - hKernel32 = GetModuleHandleW(L"KERNEL32"); - *(FARPROC*)&Py_GetTickCount64 = GetProcAddress(hKernel32, - "GetTickCount64"); - has_getickcount64 = (Py_GetTickCount64 != NULL); - } - else - has_getickcount64 = 0; - } - - if (has_getickcount64) { - ULONGLONG ticks; - ticks = Py_GetTickCount64(); - result = (double)ticks * 1e-3; - } - else { - static DWORD last_ticks = 0; - static DWORD n_overflow = 0; - DWORD ticks; - - ticks = GetTickCount(); - if (ticks < last_ticks) - n_overflow++; - last_ticks = ticks; - - result = ldexp(n_overflow, 32); - result += ticks; - result *= 1e-3; - } - - return PyFloat_FromDouble(result); - -#elif defined(__APPLE__) - static mach_timebase_info_data_t timebase; - uint64_t time; - double secs; - - if (timebase.denom == 0) { - /* According to the Technical Q&A QA1398, mach_timebase_info() cannot - fail: https://developer.apple.com/library/mac/#qa/qa1398/ */ - (void)mach_timebase_info(&timebase); - } - - time = mach_absolute_time(); - secs = (double)time * timebase.numer / timebase.denom * 1e-9; - - return PyFloat_FromDouble(secs); - -#elif (defined(CLOCK_HIGHRES) || defined(CLOCK_MONOTONIC)) - struct timespec tp; -#ifdef CLOCK_HIGHRES - const clockid_t clk_id = CLOCK_HIGHRES; -#else - const clockid_t clk_id = CLOCK_MONOTONIC; -#endif - - if (clock_gettime(clk_id, &tp) != 0) { - PyErr_SetFromErrno(PyExc_OSError); - return NULL; - } - - return PyFloat_FromDouble(tp.tv_sec + tp.tv_nsec * 1e-9); -#else - PyErr_SetNone(PyExc_NotImplementedError); - return NULL; -#endif -} - -/* ------------------------------------------------------------------------- */ - -static PyMethodDef monotonic_methods[] = { - { "monotonic", (PyCFunction)monotonic, METH_NOARGS, 0 }, - { NULL, NULL } -}; - -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_monotonic", /* m_name */ - NULL, /* m_doc */ - -1, /* m_size */ - monotonic_methods, /* m_methods */ - NULL, /* m_reload */ - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL, /* m_free */ -}; - -static PyObject * -moduleinit(void) -{ - PyObject *module; - - module = PyModule_Create(&moduledef); - - if (module == NULL) - return NULL; - - return module; -} - -PyMODINIT_FUNC PyInit__monotonic(void) -{ - return moduleinit(); -} - -/* ------------------------------------------------------------------------- */ - diff --git a/newrelic/common/encoding_utils.py b/newrelic/common/encoding_utils.py index 5508f4a7a9..6f7e9d199f 100644 --- a/newrelic/common/encoding_utils.py +++ b/newrelic/common/encoding_utils.py @@ -34,6 +34,7 @@ DELIMITER_FORMAT_RE = re.compile("[ \t]*,[ \t]*") PARENT_TYPE = {"0": "App", "1": "Browser", "2": "Mobile"} BASE64_DECODE_STR = getattr(base64, "decodestring", None) +FLAG_SAMPLED = 1 # Functions for encoding/decoding JSON. These wrappers are used in order @@ -455,7 +456,10 @@ def decode(cls, payload): if parent_id == "0" * 16 or trace_id == "0" * 32: return None - return cls(tr=trace_id, id=parent_id) + # Sampled flag + sa = bool(int(fields[3], 2) & FLAG_SAMPLED) + + return cls(tr=trace_id, id=parent_id, sa=sa) class W3CTraceState(OrderedDict): diff --git a/newrelic/common/object_wrapper.py b/newrelic/common/object_wrapper.py index dba775a381..be8c351f4e 100644 --- a/newrelic/common/object_wrapper.py +++ b/newrelic/common/object_wrapper.py @@ -20,7 +20,6 @@ """ import inspect -import warnings from newrelic.packages.wrapt import BoundFunctionWrapper as _BoundFunctionWrapper from newrelic.packages.wrapt import CallableObjectProxy as _CallableObjectProxy @@ -113,23 +112,6 @@ class CallableObjectProxy(ObjectProxy, _CallableObjectProxy): pass -# The ObjectWrapper class needs to be deprecated and removed once all our -# own code no longer uses it. It reaches down into what are wrapt internals -# at present which shouldn't be doing. - - -class ObjectWrapper(FunctionWrapper): - def __init__(self, wrapped, instance, wrapper): - warnings.warn( - ( - "The ObjectWrapper API is deprecated. Please use one of ObjectProxy, FunctionWrapper, or CallableObjectProxy instead." - ), - DeprecationWarning, - stacklevel=2, - ) - super().__init__(wrapped, wrapper) - - # Function for creating a decorator for applying to functions, as well as # short cut functions for applying wrapper functions via monkey patching. diff --git a/newrelic/common/package_version_utils.py b/newrelic/common/package_version_utils.py index 5b2886505c..da40f0dffa 100644 --- a/newrelic/common/package_version_utils.py +++ b/newrelic/common/package_version_utils.py @@ -69,7 +69,7 @@ def int_or_str(value): return version -@lru_cache() +@lru_cache def _get_package_version(name): module = sys.modules.get(name, None) version = None diff --git a/newrelic/common/stopwatch.py b/newrelic/common/stopwatch.py index 2305dda7e6..7c2edee4a8 100644 --- a/newrelic/common/stopwatch.py +++ b/newrelic/common/stopwatch.py @@ -20,36 +20,8 @@ import time -try: - # Python 3.3 and later implements PEP 418. Use the - # performance counter it provides which is monotonically - # increasing. - - default_timer = time.perf_counter - timer_implementation = "time.perf_counter()" - -except AttributeError: - try: - # Next try our own bundled back port of the monotonic() - # function. Python 3.3 does on Windows use a different - # clock for the performance counter, but the standard - # monotonic clock should suit our requirements okay. - - from newrelic.common._monotonic import monotonic as default_timer - - default_timer() - timer_implementation = "_monotonic.monotonic()" - - except (ImportError, NotImplementedError, OSError): - # If neither of the above, fallback to using the default - # timer from the timeit module. This will use the best - # resolution clock available on a particular platform, - # albeit that it isn't monotonically increasing. - - import timeit - - default_timer = timeit.default_timer - timer_implementation = "timeit.default_timer()" +default_timer = time.perf_counter +timer_implementation = "time.perf_counter()" # A timer class which deals with remembering the start time based on # wall clock time and duration based on a monotonic clock where diff --git a/newrelic/config.py b/newrelic/config.py index e9f5442e9f..3221b26438 100644 --- a/newrelic/config.py +++ b/newrelic/config.py @@ -20,6 +20,7 @@ import threading import time import traceback +from datetime import datetime, timezone from pathlib import Path import newrelic.api.application @@ -48,12 +49,14 @@ agent_control_health_instance, agent_control_healthcheck_loop, ) -from newrelic.core.config import Settings, apply_config_setting, default_host, fetch_config_setting +from newrelic.core.config import Settings, apply_config_setting, default_host __all__ = ["filter_app_factory", "initialize"] _logger = logging.getLogger(__name__) +DEPRECATED_MODULES = {"aioredis": datetime(2022, 2, 22, 0, 0, tzinfo=timezone.utc)} + def _map_aws_account_id(s): return newrelic.core.config._map_aws_account_id(s, _logger) @@ -331,7 +334,6 @@ def _process_configuration(section): _process_setting(section, "port", "getint", None) _process_setting(section, "otlp_host", "get", None) _process_setting(section, "otlp_port", "getint", None) - _process_setting(section, "ssl", "getboolean", None) _process_setting(section, "proxy_scheme", "get", None) _process_setting(section, "proxy_host", "get", None) _process_setting(section, "proxy_port", "getint", None) @@ -343,7 +345,6 @@ def _process_configuration(section): _process_setting(section, "developer_mode", "getboolean", None) _process_setting(section, "high_security", "getboolean", None) _process_setting(section, "capture_params", "getboolean", None) - _process_setting(section, "ignored_params", "get", _map_split_strings) _process_setting(section, "capture_environ", "getboolean", None) _process_setting(section, "include_environ", "get", _map_split_strings) _process_setting(section, "max_stack_trace_lines", "getint", None) @@ -374,7 +375,6 @@ def _process_configuration(section): _process_setting(section, "error_collector.capture_events", "getboolean", None) _process_setting(section, "error_collector.max_event_samples_stored", "getint", None) _process_setting(section, "error_collector.capture_source", "getboolean", None) - _process_setting(section, "error_collector.ignore_errors", "get", _map_split_strings) _process_setting(section, "error_collector.ignore_classes", "get", _map_split_strings) _process_setting(section, "error_collector.ignore_status_codes", "get", _merge_ignore_status_codes) _process_setting(section, "error_collector.expected_classes", "get", _map_split_strings) @@ -404,6 +404,8 @@ def _process_configuration(section): _process_setting(section, "ml_insights_events.enabled", "getboolean", None) _process_setting(section, "distributed_tracing.enabled", "getboolean", None) _process_setting(section, "distributed_tracing.exclude_newrelic_header", "getboolean", None) + _process_setting(section, "distributed_tracing.sampler.remote_parent_sampled", "get", None) + _process_setting(section, "distributed_tracing.sampler.remote_parent_not_sampled", "get", None) _process_setting(section, "span_events.enabled", "getboolean", None) _process_setting(section, "span_events.max_samples_stored", "getint", None) _process_setting(section, "span_events.attributes.enabled", "getboolean", None) @@ -421,7 +423,6 @@ def _process_configuration(section): _process_setting(section, "agent_limits.sql_explain_plans", "getint", None) _process_setting(section, "agent_limits.sql_explain_plans_per_harvest", "getint", None) _process_setting(section, "agent_limits.slow_sql_data", "getint", None) - _process_setting(section, "agent_limits.merge_stats_maximum", "getint", None) _process_setting(section, "agent_limits.errors_per_transaction", "getint", None) _process_setting(section, "agent_limits.errors_per_harvest", "getint", None) _process_setting(section, "agent_limits.slow_transaction_dry_harvests", "getint", None) @@ -673,12 +674,12 @@ def translate_deprecated_settings(settings, cached_settings): ("browser_monitoring.capture_attributes", "browser_monitoring.attributes.enabled"), ("analytics_events.capture_attributes", "transaction_events.attributes.enabled"), ("analytics_events.enabled", "transaction_events.enabled"), - ("analytics_events.max_samples_stored", "event_harvest_config.harvest_limits.analytic_event_data"), - ("transaction_events.max_samples_stored", "event_harvest_config.harvest_limits.analytic_event_data"), - ("span_events.max_samples_stored", "event_harvest_config.harvest_limits.span_event_data"), - ("error_collector.max_event_samples_stored", "event_harvest_config.harvest_limits.error_event_data"), - ("custom_insights_events.max_samples_stored", "event_harvest_config.harvest_limits.custom_event_data"), - ("application_logging.forwarding.max_samples_stored", "event_harvest_config.harvest_limits.log_event_data"), + ("analytics_events.max_samples_stored", "transaction_events.max_samples_stored"), + ("event_harvest_config.harvest_limits.analytic_event_data", "transaction_events.max_samples_stored"), + ("event_harvest_config.harvest_limits.span_event_data", "span_events.max_samples_stored"), + ("event_harvest_config.harvest_limits.error_event_data", "error_collector.max_event_samples_stored"), + ("event_harvest_config.harvest_limits.custom_event_data", "custom_insights_events.max_samples_stored"), + ("event_harvest_config.harvest_limits.log_event_data", "application_logging.forwarding.max_samples_stored"), ("error_collector.ignore_errors", "error_collector.ignore_classes"), ("strip_exception_messages.whitelist", "strip_exception_messages.allowlist"), ] @@ -695,41 +696,6 @@ def translate_deprecated_settings(settings, cached_settings): delete_setting(settings, old_key) - # The 'ignored_params' setting is more complicated than the above - # deprecated settings, so it gets handled separately. - - if "ignored_params" in cached: - _logger.info( - "Deprecated setting found: ignored_params. Please use " - "new setting: attributes.exclude. For the new setting, an " - "ignored parameter should be prefaced with " - '"request.parameters.". For example, ignoring a parameter ' - 'named "foo" should be added added to attributes.exclude as ' - '"request.parameters.foo."' - ) - - # Don't merge 'ignored_params' settings. If user set - # 'attributes.exclude' setting, only use those values, - # and ignore 'ignored_params' settings. - - if "attributes.exclude" in cached: - _logger.info("Ignoring deprecated setting: ignored_params. Using new setting: attributes.exclude.") - - else: - ignored_params = fetch_config_setting(settings, "ignored_params") - - for p in ignored_params: - attr_value = f"request.parameters.{p}" - excluded_attrs = fetch_config_setting(settings, "attributes.exclude") - - if attr_value not in excluded_attrs: - settings.attributes.exclude.append(attr_value) - _logger.info( - "Applying value of deprecated setting ignored_params to attributes.exclude: %r.", attr_value - ) - - delete_setting(settings, "ignored_params") - # The 'capture_params' setting is deprecated, but since it affects # attribute filter default destinations, it is not translated here. We # log a message, but keep the capture_params setting. @@ -754,17 +720,6 @@ def translate_deprecated_settings(settings, cached_settings): "https://docs.newrelic.com/docs/distributed-tracing/concepts/distributed-tracing-planning-guide/#changes." ) - if not settings.ssl: - settings.ssl = True - _logger.info("Ignoring deprecated setting: ssl. Enabling ssl is now mandatory. Setting ssl=true.") - - if settings.agent_limits.merge_stats_maximum is not None: - _logger.info( - "Ignoring deprecated setting: " - "agent_limits.merge_stats_maximum. The agent will now respect " - "server-side commands." - ) - return settings @@ -1146,6 +1101,18 @@ def _module_import_hook(target, module, function): def _instrument(target): _logger.debug("instrument module %s", ((target, module, function),)) + # Deprecation warning for archived/unsupported modules + library_name = target.__package__.split(".")[0] + + if library_name in DEPRECATED_MODULES: + _logger.warning( + "%(module)s has been archived by the developers " + "and has not been supported since %(date)s. %(module)s " + "support will be removed from New Relic in a future " + "release.", + {"module": library_name, "date": DEPRECATED_MODULES[library_name].strftime("%B %d, %Y")}, + ) + try: instrumented = target._nr_instrumented except AttributeError: @@ -2919,7 +2886,20 @@ def _process_module_builtin_defaults(): _process_module_definition("loguru", "newrelic.hooks.logger_loguru", "instrument_loguru") _process_module_definition("loguru._logger", "newrelic.hooks.logger_loguru", "instrument_loguru_logger") + _process_module_definition( + "autogen_ext.tools.mcp._base", "newrelic.hooks.mlmodel_autogen", "instrument_autogen_ext_tools_mcp__base" + ) + _process_module_definition( + "autogen_agentchat.agents._assistant_agent", + "newrelic.hooks.mlmodel_autogen", + "instrument_autogen_agentchat_agents__assistant_agent", + ) _process_module_definition("mcp.client.session", "newrelic.hooks.adapter_mcp", "instrument_mcp_client_session") + _process_module_definition( + "mcp.server.fastmcp.tools.tool_manager", + "newrelic.hooks.adapter_mcp", + "instrument_mcp_server_fastmcp_tools_tool_manager", + ) _process_module_definition("structlog._base", "newrelic.hooks.logger_structlog", "instrument_structlog__base") _process_module_definition("structlog._frames", "newrelic.hooks.logger_structlog", "instrument_structlog__frames") @@ -4153,6 +4133,12 @@ def _process_module_builtin_defaults(): "newrelic.hooks.framework_azurefunctions", "instrument_azure_functions_worker_dispatcher", ) + _process_module_definition( + "pyzeebe.client.client", "newrelic.hooks.external_pyzeebe", "instrument_pyzeebe_client_client" + ) + _process_module_definition( + "pyzeebe.worker.job_executor", "newrelic.hooks.external_pyzeebe", "instrument_pyzeebe_worker_job_executor" + ) def _process_module_entry_points(): diff --git a/newrelic/console.py b/newrelic/console.py index 2ed64dead9..0add2f38e6 100644 --- a/newrelic/console.py +++ b/newrelic/console.py @@ -354,7 +354,7 @@ def do_transactions(self): """ """ for item in _trace_cache.active_threads(): - transaction, thread_id, thread_type, frame = item + transaction, _thread_id, _thread_type, _frame = item print("THREAD", item, file=self.stdout) if transaction is not None: transaction.dump(self.stdout) @@ -460,7 +460,7 @@ def __thread_run(self): listener.listen(5) while True: - client, addr = listener.accept() + client, _addr = listener.accept() if not self.__console_initialized: self.__console_initialized = True diff --git a/newrelic/core/agent.py b/newrelic/core/agent.py index ab5cdc19bd..fbfc06b260 100644 --- a/newrelic/core/agent.py +++ b/newrelic/core/agent.py @@ -25,7 +25,6 @@ import threading import time import traceback -import warnings import newrelic import newrelic.core.application @@ -473,16 +472,6 @@ def remove_thread_utilization(self): _utilization_trackers.clear() - def record_exception(self, app_name, exc=None, value=None, tb=None, params=None, ignore_errors=None): - # Deprecation Warning - warnings.warn( - ("The record_exception function is deprecated. Please use the new api named notice_error instead."), - DeprecationWarning, - stacklevel=2, - ) - - self.notice_error(app_name, error=(exc, value, tb), attributes=params, ignore=ignore_errors) - def notice_error(self, app_name, error=None, attributes=None, expected=None, ignore=None, status_code=None): application = self._applications.get(app_name, None) if application is None or not application.active: diff --git a/newrelic/core/application.py b/newrelic/core/application.py index 43fdddc0ed..3ba8168d60 100644 --- a/newrelic/core/application.py +++ b/newrelic/core/application.py @@ -20,7 +20,6 @@ import threading import time import traceback -import warnings from functools import partial from newrelic.common.object_names import callable_name @@ -803,20 +802,6 @@ def remove_data_source(self, name): self._data_samplers.remove(data_sampler) - def record_exception(self, exc=None, value=None, tb=None, params=None, ignore_errors=None): - """Record a global exception against the application independent - of a specific transaction. - - """ - # Deprecation Warning - warnings.warn( - ("The record_exception function is deprecated. Please use the new api named notice_error instead."), - DeprecationWarning, - stacklevel=2, - ) - - self.notice_error(error=(exc, value, tb), attributes=params, ignore=ignore_errors) - def notice_error(self, error=None, attributes=None, expected=None, ignore=None, status_code=None): """Record a global exception against the application independent of a specific transaction. diff --git a/newrelic/core/attribute.py b/newrelic/core/attribute.py index 9cc14cfb29..79b9a56cb2 100644 --- a/newrelic/core/attribute.py +++ b/newrelic/core/attribute.py @@ -100,6 +100,12 @@ "response.headers.contentType", "response.status", "server.address", + "zeebe.client.bpmnProcessId", + "zeebe.client.messageName", + "zeebe.client.correlationKey", + "zeebe.client.messageId", + "zeebe.client.resourceCount", + "zeebe.client.resourceFile", } MAX_NUM_USER_ATTRIBUTES = 128 diff --git a/newrelic/core/attribute_filter.py b/newrelic/core/attribute_filter.py index 4eb67266bb..9c2be31373 100644 --- a/newrelic/core/attribute_filter.py +++ b/newrelic/core/attribute_filter.py @@ -209,6 +209,9 @@ def __ge__(self, other): def __repr__(self): return f"({self.name}, {bin(self.destinations)}, {self.is_wildcard}, {self.is_include})" + def __hash__(self): + return hash((self.name, self.destinations, self.is_include, self.is_wildcard)) + def name_match(self, name): if self.is_wildcard: return name.startswith(self.name) diff --git a/newrelic/core/config.py b/newrelic/core/config.py index 47a0421b49..8375a618c8 100644 --- a/newrelic/core/config.py +++ b/newrelic/core/config.py @@ -333,6 +333,10 @@ class DistributedTracingSettings(Settings): pass +class DistributedTracingSamplerSettings(Settings): + pass + + class ServerlessModeSettings(Settings): pass @@ -502,6 +506,7 @@ class EventHarvestConfigHarvestLimitSettings(Settings): _settings.datastore_tracer.instance_reporting = DatastoreTracerInstanceReportingSettings() _settings.debug = DebugSettings() _settings.distributed_tracing = DistributedTracingSettings() +_settings.distributed_tracing.sampler = DistributedTracingSamplerSettings() _settings.error_collector = ErrorCollectorSettings() _settings.error_collector.attributes = ErrorCollectorAttributesSettings() _settings.event_harvest_config = EventHarvestConfigSettings() @@ -712,8 +717,6 @@ def default_otlp_host(host): _settings.license_key = os.environ.get("NEW_RELIC_LICENSE_KEY", None) _settings.api_key = os.environ.get("NEW_RELIC_API_KEY", None) -_settings.ssl = _environ_as_bool("NEW_RELIC_SSL", True) - _settings.host = os.environ.get("NEW_RELIC_HOST") _settings.otlp_host = os.environ.get("NEW_RELIC_OTLP_HOST") _settings.port = int(os.environ.get("NEW_RELIC_PORT", "0")) @@ -757,7 +760,6 @@ def default_otlp_host(host): _settings.web_transactions_apdex = {} _settings.capture_params = None -_settings.ignored_params = [] _settings.capture_environ = True _settings.include_environ = [ @@ -817,11 +819,17 @@ def default_otlp_host(host): ) _settings.transaction_events.enabled = True +_settings.transaction_events.max_samples_stored = _environ_as_int( + "NEW_RELIC_ANALYTICS_EVENTS_MAX_SAMPLES_STORED", default=DEFAULT_RESERVOIR_SIZE +) _settings.transaction_events.attributes.enabled = True _settings.transaction_events.attributes.exclude = [] _settings.transaction_events.attributes.include = [] _settings.custom_insights_events.enabled = True +_settings.custom_insights_events.max_samples_stored = _environ_as_int( + "NEW_RELIC_CUSTOM_INSIGHTS_EVENTS_MAX_SAMPLES_STORED", default=CUSTOM_EVENT_RESERVOIR_SIZE +) _settings.custom_insights_events.max_attribute_value = _environ_as_int( "NEW_RELIC_CUSTOM_INSIGHTS_EVENTS_MAX_ATTRIBUTE_VALUE", default=MAX_ATTRIBUTE_LENGTH ) @@ -829,8 +837,17 @@ def default_otlp_host(host): _settings.ml_insights_events.enabled = False _settings.distributed_tracing.enabled = _environ_as_bool("NEW_RELIC_DISTRIBUTED_TRACING_ENABLED", default=True) +_settings.distributed_tracing.sampler.remote_parent_sampled = os.environ.get( + "NEW_RELIC_DISTRIBUTED_TRACING_SAMPLER_REMOTE_PARENT_SAMPLED", "default" +) +_settings.distributed_tracing.sampler.remote_parent_not_sampled = os.environ.get( + "NEW_RELIC_DISTRIBUTED_TRACING_SAMPLER_REMOTE_PARENT_NOT_SAMPLED", "default" +) _settings.distributed_tracing.exclude_newrelic_header = False _settings.span_events.enabled = _environ_as_bool("NEW_RELIC_SPAN_EVENTS_ENABLED", default=True) +_settings.span_events.max_samples_stored = _environ_as_int( + "NEW_RELIC_SPAN_EVENTS_MAX_SAMPLES_STORED", default=SPAN_EVENT_RESERVOIR_SIZE +) _settings.span_events.attributes.enabled = True _settings.span_events.attributes.exclude = [] _settings.span_events.attributes.include = [] @@ -858,6 +875,9 @@ def default_otlp_host(host): _settings.error_collector.ignore_classes = [] _settings.error_collector.ignore_status_codes = _parse_status_codes("100-102 200-208 226 300-308 404", set()) _settings.error_collector.expected_classes = [] +_settings.error_collector.max_event_samples_stored = _environ_as_int( + "NEW_RELIC_ERROR_COLLECTOR_MAX_EVENT_SAMPLES_STORED", default=ERROR_EVENT_RESERVOIR_SIZE +) _settings.error_collector.expected_status_codes = set() _settings.error_collector._error_group_callback = None _settings.error_collector.attributes.enabled = True @@ -890,7 +910,6 @@ def default_otlp_host(host): _settings.agent_limits.sql_explain_plans = 30 _settings.agent_limits.sql_explain_plans_per_harvest = 60 _settings.agent_limits.slow_sql_data = 10 -_settings.agent_limits.merge_stats_maximum = None _settings.agent_limits.errors_per_transaction = 5 _settings.agent_limits.errors_per_harvest = 20 _settings.agent_limits.slow_transaction_dry_harvests = 5 @@ -1015,6 +1034,9 @@ def default_otlp_host(host): _settings.application_logging.forwarding.custom_attributes = _environ_as_mapping( "NEW_RELIC_APPLICATION_LOGGING_FORWARDING_CUSTOM_ATTRIBUTES", default="" ) +_settings.application_logging.forwarding.max_samples_stored = _environ_as_int( + "NEW_RELIC_APPLICATION_LOGGING_FORWARDING_MAX_SAMPLES_STORED", default=LOG_EVENT_RESERVOIR_SIZE +) _settings.application_logging.forwarding.labels.enabled = _environ_as_bool( "NEW_RELIC_APPLICATION_LOGGING_FORWARDING_LABELS_ENABLED", default=False @@ -1280,13 +1302,6 @@ def apply_server_side_settings(server_side_config=None, settings=_settings): if value == "apdex_f": agent_config["transaction_tracer.transaction_threshold"] = None - # If ignore_errors exists, and either ignore_classes is not set or it is empty - if "error_collector.ignore_errors" in agent_config and ( - "error_collector.ignore_classes" not in agent_config or not agent_config["error_collector.ignore_classes"] - ): - # Remap to newer config key - agent_config["error_collector.ignore_classes"] = agent_config.pop("error_collector.ignore_errors") - # Overlay with agent server side configuration settings. for name, value in agent_config.items(): @@ -1307,9 +1322,7 @@ def apply_server_side_settings(server_side_config=None, settings=_settings): span_event_harvest_config = server_side_config.get("span_event_harvest_config", {}) span_event_harvest_limit = span_event_harvest_config.get("harvest_limit", None) if span_event_harvest_limit is not None: - apply_config_setting( - settings_snapshot, "event_harvest_config.harvest_limits.span_event_data", span_event_harvest_limit - ) + apply_config_setting(settings_snapshot, "span_events.max_samples_stored", span_event_harvest_limit) # Check to see if collect_ai appears in the connect response to handle account-level AIM toggling collect_ai = server_side_config.get("collect_ai", None) diff --git a/newrelic/core/stats_engine.py b/newrelic/core/stats_engine.py index cc1bb42bee..5b566ac4cc 100644 --- a/newrelic/core/stats_engine.py +++ b/newrelic/core/stats_engine.py @@ -27,7 +27,6 @@ import sys import time import traceback -import warnings import zlib from heapq import heapify, heapreplace @@ -676,16 +675,6 @@ def record_time_metrics(self, metrics): for metric in metrics: self.record_time_metric(metric) - def record_exception(self, exc=None, value=None, tb=None, params=None, ignore_errors=None): - # Deprecation Warning - warnings.warn( - ("The record_exception function is deprecated. Please use the new api named notice_error instead."), - DeprecationWarning, - stacklevel=2, - ) - - self.notice_error(error=(exc, value, tb), attributes=params, ignore=ignore_errors) - def notice_error(self, error=None, attributes=None, expected=None, ignore=None, status_code=None): attributes = attributes if attributes is not None else {} settings = self.__settings @@ -714,7 +703,7 @@ def notice_error(self, error=None, attributes=None, expected=None, ignore=None, if getattr(value, "_nr_ignored", None): return - module, name, fullnames, message_raw = parse_exc_info(error) + _module, name, fullnames, message_raw = parse_exc_info(error) fullname = fullnames[0] # In the case case of JSON formatting for OpenAI models @@ -1715,21 +1704,19 @@ def reset_transaction_events(self): """ if self.__settings is not None: - self._transaction_events = SampledDataSet( - self.__settings.event_harvest_config.harvest_limits.analytic_event_data - ) + self._transaction_events = SampledDataSet(self.__settings.transaction_events.max_samples_stored) else: self._transaction_events = SampledDataSet() def reset_error_events(self): if self.__settings is not None: - self._error_events = SampledDataSet(self.__settings.event_harvest_config.harvest_limits.error_event_data) + self._error_events = SampledDataSet(self.__settings.error_collector.max_event_samples_stored) else: self._error_events = SampledDataSet() def reset_custom_events(self): if self.__settings is not None: - self._custom_events = SampledDataSet(self.__settings.event_harvest_config.harvest_limits.custom_event_data) + self._custom_events = SampledDataSet(self.__settings.custom_insights_events.max_samples_stored) else: self._custom_events = SampledDataSet() @@ -1741,13 +1728,13 @@ def reset_ml_events(self): def reset_span_events(self): if self.__settings is not None: - self._span_events = SampledDataSet(self.__settings.event_harvest_config.harvest_limits.span_event_data) + self._span_events = SampledDataSet(self.__settings.span_events.max_samples_stored) else: self._span_events = SampledDataSet() def reset_log_events(self): if self.__settings is not None: - self._log_events = SampledDataSet(self.__settings.event_harvest_config.harvest_limits.log_event_data) + self._log_events = SampledDataSet(self.__settings.application_logging.forwarding.max_samples_stored) else: self._log_events = SampledDataSet() diff --git a/newrelic/hooks/adapter_cheroot.py b/newrelic/hooks/adapter_cheroot.py index 44497d8f52..a1dbf6fec7 100644 --- a/newrelic/hooks/adapter_cheroot.py +++ b/newrelic/hooks/adapter_cheroot.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import newrelic.api.in_function import newrelic.api.wsgi_application +from newrelic.common.object_wrapper import wrap_in_function def instrument_cheroot_wsgiserver(module): @@ -22,4 +22,4 @@ def wrap_wsgi_application_entry_point(server, bind_addr, wsgi_app, *args, **kwar args = [server, bind_addr, application, *args] return (args, kwargs) - newrelic.api.in_function.wrap_in_function(module, "Server.__init__", wrap_wsgi_application_entry_point) + wrap_in_function(module, "Server.__init__", wrap_wsgi_application_entry_point) diff --git a/newrelic/hooks/adapter_cherrypy.py b/newrelic/hooks/adapter_cherrypy.py index d30795e8f9..243156a5d1 100644 --- a/newrelic/hooks/adapter_cherrypy.py +++ b/newrelic/hooks/adapter_cherrypy.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import newrelic.api.in_function import newrelic.api.wsgi_application +from newrelic.common.object_wrapper import wrap_in_function def instrument_cherrypy_wsgiserver(module): @@ -22,4 +22,4 @@ def wrap_wsgi_application_entry_point(server, bind_addr, wsgi_app, *args, **kwar args = [server, bind_addr, application, *args] return (args, kwargs) - newrelic.api.in_function.wrap_in_function(module, "CherryPyWSGIServer.__init__", wrap_wsgi_application_entry_point) + wrap_in_function(module, "CherryPyWSGIServer.__init__", wrap_wsgi_application_entry_point) diff --git a/newrelic/hooks/adapter_flup.py b/newrelic/hooks/adapter_flup.py index 93f30bf72b..b84e9883d9 100644 --- a/newrelic/hooks/adapter_flup.py +++ b/newrelic/hooks/adapter_flup.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import newrelic.api.in_function import newrelic.api.wsgi_application +from newrelic.common.object_wrapper import wrap_in_function def wrap_wsgi_application_entry_point(server, application, *args, **kwargs): @@ -23,16 +23,16 @@ def wrap_wsgi_application_entry_point(server, application, *args, **kwargs): def instrument_flup_server_cgi(module): - newrelic.api.in_function.wrap_in_function(module, "WSGIServer.__init__", wrap_wsgi_application_entry_point) + wrap_in_function(module, "WSGIServer.__init__", wrap_wsgi_application_entry_point) def instrument_flup_server_ajp_base(module): - newrelic.api.in_function.wrap_in_function(module, "BaseAJPServer.__init__", wrap_wsgi_application_entry_point) + wrap_in_function(module, "BaseAJPServer.__init__", wrap_wsgi_application_entry_point) def instrument_flup_server_fcgi_base(module): - newrelic.api.in_function.wrap_in_function(module, "BaseFCGIServer.__init__", wrap_wsgi_application_entry_point) + wrap_in_function(module, "BaseFCGIServer.__init__", wrap_wsgi_application_entry_point) def instrument_flup_server_scgi_base(module): - newrelic.api.in_function.wrap_in_function(module, "BaseSCGIServer.__init__", wrap_wsgi_application_entry_point) + wrap_in_function(module, "BaseSCGIServer.__init__", wrap_wsgi_application_entry_point) diff --git a/newrelic/hooks/adapter_mcp.py b/newrelic/hooks/adapter_mcp.py index 355609b7aa..bcc8ae0a39 100644 --- a/newrelic/hooks/adapter_mcp.py +++ b/newrelic/hooks/adapter_mcp.py @@ -19,6 +19,7 @@ from newrelic.common.object_names import callable_name from newrelic.common.object_wrapper import wrap_function_wrapper from newrelic.common.signature import bind_args +from newrelic.core.config import global_settings _logger = logging.getLogger(__name__) @@ -28,6 +29,10 @@ async def wrap_call_tool(wrapped, instance, args, kwargs): if not transaction: return await wrapped(*args, **kwargs) + settings = transaction.settings or global_settings() + if not settings.ai_monitoring.enabled: + return await wrapped(*args, **kwargs) + func_name = callable_name(wrapped) bound_args = bind_args(wrapped, args, kwargs) tool_name = bound_args.get("name") or "tool" @@ -42,6 +47,10 @@ async def wrap_read_resource(wrapped, instance, args, kwargs): if not transaction: return await wrapped(*args, **kwargs) + settings = transaction.settings or global_settings() + if not settings.ai_monitoring.enabled: + return await wrapped(*args, **kwargs) + func_name = callable_name(wrapped) bound_args = bind_args(wrapped, args, kwargs) # Set a default value in case we can't parse out the URI scheme successfully @@ -64,6 +73,10 @@ async def wrap_get_prompt(wrapped, instance, args, kwargs): if not transaction: return await wrapped(*args, **kwargs) + settings = transaction.settings or global_settings() + if not settings.ai_monitoring.enabled: + return await wrapped(*args, **kwargs) + func_name = callable_name(wrapped) bound_args = bind_args(wrapped, args, kwargs) prompt_name = bound_args.get("name") or "prompt" @@ -81,3 +94,9 @@ def instrument_mcp_client_session(module): wrap_function_wrapper(module, "ClientSession.read_resource", wrap_read_resource) if hasattr(module.ClientSession, "get_prompt"): wrap_function_wrapper(module, "ClientSession.get_prompt", wrap_get_prompt) + + +def instrument_mcp_server_fastmcp_tools_tool_manager(module): + if hasattr(module, "ToolManager"): + if hasattr(module.ToolManager, "call_tool"): + wrap_function_wrapper(module, "ToolManager.call_tool", wrap_call_tool) diff --git a/newrelic/hooks/adapter_meinheld.py b/newrelic/hooks/adapter_meinheld.py index 8e69a668e0..5bbf6de1c8 100644 --- a/newrelic/hooks/adapter_meinheld.py +++ b/newrelic/hooks/adapter_meinheld.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import newrelic.api.in_function import newrelic.api.wsgi_application +from newrelic.common.object_wrapper import wrap_in_function def instrument_meinheld_server(module): @@ -22,4 +22,4 @@ def wrap_wsgi_application_entry_point(application, *args, **kwargs): args = [application, *args] return (args, kwargs) - newrelic.api.in_function.wrap_in_function(module, "run", wrap_wsgi_application_entry_point) + wrap_in_function(module, "run", wrap_wsgi_application_entry_point) diff --git a/newrelic/hooks/adapter_paste.py b/newrelic/hooks/adapter_paste.py index 0f1d38ca39..7a2fa9af58 100644 --- a/newrelic/hooks/adapter_paste.py +++ b/newrelic/hooks/adapter_paste.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import newrelic.api.in_function import newrelic.api.wsgi_application +from newrelic.common.object_wrapper import wrap_in_function def instrument_paste_httpserver(module): @@ -22,4 +22,4 @@ def wrap_wsgi_application_entry_point(server, application, *args, **kwargs): args = [server, application, *args] return (args, kwargs) - newrelic.api.in_function.wrap_in_function(module, "WSGIServerBase.__init__", wrap_wsgi_application_entry_point) + wrap_in_function(module, "WSGIServerBase.__init__", wrap_wsgi_application_entry_point) diff --git a/newrelic/hooks/adapter_waitress.py b/newrelic/hooks/adapter_waitress.py index e1a5e485f9..df251129ec 100644 --- a/newrelic/hooks/adapter_waitress.py +++ b/newrelic/hooks/adapter_waitress.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from newrelic.api.in_function import wrap_in_function from newrelic.api.wsgi_application import WSGIApplicationWrapper +from newrelic.common.object_wrapper import wrap_in_function from newrelic.common.package_version_utils import get_package_version diff --git a/newrelic/hooks/adapter_wsgiref.py b/newrelic/hooks/adapter_wsgiref.py index c368194adf..d788fdf738 100644 --- a/newrelic/hooks/adapter_wsgiref.py +++ b/newrelic/hooks/adapter_wsgiref.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import newrelic.api.in_function import newrelic.api.wsgi_application +from newrelic.common.object_wrapper import wrap_in_function def instrument_wsgiref_simple_server(module): @@ -22,4 +22,4 @@ def wrap_wsgi_application_entry_point(server, application, *args, **kwargs): args = [server, application, *args] return (args, kwargs) - newrelic.api.in_function.wrap_in_function(module, "WSGIServer.set_app", wrap_wsgi_application_entry_point) + wrap_in_function(module, "WSGIServer.set_app", wrap_wsgi_application_entry_point) diff --git a/newrelic/hooks/application_celery.py b/newrelic/hooks/application_celery.py index aa4e31c133..e5856141f0 100644 --- a/newrelic/hooks/application_celery.py +++ b/newrelic/hooks/application_celery.py @@ -26,9 +26,8 @@ from newrelic.api.background_task import BackgroundTask from newrelic.api.function_trace import FunctionTrace from newrelic.api.message_trace import MessageTrace -from newrelic.api.pre_function import wrap_pre_function from newrelic.api.transaction import current_transaction -from newrelic.common.object_wrapper import FunctionWrapper, wrap_function_wrapper +from newrelic.common.object_wrapper import FunctionWrapper, wrap_function_wrapper, wrap_pre_function from newrelic.common.signature import bind_args from newrelic.core.agent import shutdown_agent diff --git a/newrelic/hooks/component_piston.py b/newrelic/hooks/component_piston.py index f2abb2c760..4b805fa7ee 100644 --- a/newrelic/hooks/component_piston.py +++ b/newrelic/hooks/component_piston.py @@ -14,10 +14,10 @@ import newrelic.api.function_trace -import newrelic.api.in_function import newrelic.api.transaction import newrelic.common.object_wrapper from newrelic.common.object_names import callable_name +from newrelic.common.object_wrapper import wrap_in_function class MethodWrapper: @@ -82,4 +82,4 @@ def in_HandlerMethod_init(self, method, *args, **kwargs): method = method._nr_wrapped return ((self, method, *args), kwargs) - newrelic.api.in_function.wrap_in_function(module, "HandlerMethod.__init__", in_HandlerMethod_init) + wrap_in_function(module, "HandlerMethod.__init__", in_HandlerMethod_init) diff --git a/newrelic/hooks/coroutines_gevent.py b/newrelic/hooks/coroutines_gevent.py index 618c249aac..45380b48a6 100644 --- a/newrelic/hooks/coroutines_gevent.py +++ b/newrelic/hooks/coroutines_gevent.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from newrelic.api.post_function import wrap_post_function +from newrelic.common.object_wrapper import wrap_post_function def _patch_thread(threading=True, *args, **kwargs): diff --git a/newrelic/hooks/database_psycopg.py b/newrelic/hooks/database_psycopg.py index 48f205caf2..c028dbdd1e 100644 --- a/newrelic/hooks/database_psycopg.py +++ b/newrelic/hooks/database_psycopg.py @@ -418,7 +418,7 @@ def _add_defaults(parsed_host, parsed_hostaddr, parsed_port, parsed_database): def wrapper_psycopg_as_string(wrapped, instance, args, kwargs): - def _bind_params(context, *args, **kwargs): + def _bind_params(context=None, *args, **kwargs): return context, args, kwargs context, _args, _kwargs = _bind_params(*args, **kwargs) diff --git a/newrelic/hooks/datastore_elasticsearch.py b/newrelic/hooks/datastore_elasticsearch.py index 6f0ac69b5f..92867d1b83 100644 --- a/newrelic/hooks/datastore_elasticsearch.py +++ b/newrelic/hooks/datastore_elasticsearch.py @@ -16,6 +16,7 @@ from newrelic.api.transaction import current_transaction from newrelic.common.object_wrapper import function_wrapper, wrap_function_wrapper from newrelic.common.package_version_utils import get_package_version_tuple +from newrelic.core.config import global_settings # An index name can be a string, None or a sequence. In the case of None # an empty string or '*', it is the same as using '_all'. When a string @@ -139,7 +140,8 @@ def _nr_wrapper_Elasticsearch_method_(wrapped, instance, args, kwargs): with trace: result = wrapped(*args, **kwargs) - tracer_settings = trace.settings.datastore_tracer + settings = trace.settings or global_settings() + tracer_settings = settings.datastore_tracer if tracer_settings.instance_reporting.enabled: try: @@ -182,7 +184,8 @@ async def _nr_wrapper_AsyncElasticsearch_method_(wrapped, instance, args, kwargs with trace: result = await wrapped(*args, **kwargs) - tracer_settings = trace.settings.datastore_tracer + settings = trace.settings or global_settings() + tracer_settings = settings.datastore_tracer if tracer_settings.instance_reporting.enabled: try: @@ -267,7 +270,6 @@ async def _nr_wrapper_AsyncElasticsearch_method_(wrapped, instance, args, kwargs ("msearch_template", _extract_args_search_templates_index), ("mtermvectors", _extract_args_index), ("open_point_in_time", _extract_args_index), - ("options", None), ("ping", None), ("put_script", None), ("rank_eval", _extract_args_requests_index), @@ -783,7 +785,8 @@ def _nr_get_connection_wrapper(wrapped, instance, args, kwargs): host = port_path_or_id = "unknown" try: - tracer_settings = trace.settings.datastore_tracer + settings = trace.settings or global_settings() + tracer_settings = settings.datastore_tracer if tracer_settings.instance_reporting.enabled: host, port_path_or_id = conn._nr_host_port diff --git a/newrelic/hooks/datastore_memcache.py b/newrelic/hooks/datastore_memcache.py index 94f42ec44f..ca06438684 100644 --- a/newrelic/hooks/datastore_memcache.py +++ b/newrelic/hooks/datastore_memcache.py @@ -69,7 +69,7 @@ def _nr_datastore_trace_wrapper_(wrapped, instance, args, kwargs): result = wrapped(*args, **kwargs) instance_info = transaction._nr_datastore_instance_info - (host, port_path_or_id, db) = instance_info + (host, port_path_or_id, _db) = instance_info dt.host = host dt.port_path_or_id = port_path_or_id diff --git a/newrelic/hooks/external_aiobotocore.py b/newrelic/hooks/external_aiobotocore.py index 4cbaef3374..ddb9d4d056 100644 --- a/newrelic/hooks/external_aiobotocore.py +++ b/newrelic/hooks/external_aiobotocore.py @@ -11,9 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import logging -import sys -import traceback from io import BytesIO from aiobotocore.response import StreamingBody @@ -22,8 +21,10 @@ from newrelic.common.object_wrapper import wrap_function_wrapper from newrelic.hooks.external_botocore import ( EMBEDDING_STREAMING_UNSUPPORTED_LOG_MESSAGE, + REQUEST_EXTRACTOR_FAILURE_LOG_MESSAGE, RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE, AsyncEventStreamWrapper, + extract_bedrock_converse_attrs, handle_bedrock_exception, run_bedrock_request_extractor, run_bedrock_response_extractor, @@ -97,23 +98,33 @@ async def wrap_client__make_api_call(wrapped, instance, args, kwargs): response_extractor = getattr(instance, "_nr_response_extractor", None) stream_extractor = getattr(instance, "_nr_stream_extractor", None) response_streaming = getattr(instance, "_nr_response_streaming", False) - + is_converse = getattr(instance, "_nr_is_converse", False) ft = getattr(instance, "_nr_ft", None) - if len(args) >= 2: - model = args[1].get("modelId") - request_body = args[1].get("body") - is_embedding = "embed" in model - else: - model = "" - request_body = None - is_embedding = False + try: + bedrock_args = args[1] if len(args) >= 2 else {} + model = bedrock_args.get("modelId") + is_embedding = "embed" in model if model else False + + request_body = {} if is_converse else bedrock_args.get("body") + except Exception: + _logger.warning(REQUEST_EXTRACTOR_FAILURE_LOG_MESSAGE, exc_info=True) try: response = await wrapped(*args, **kwargs) except Exception as exc: handle_bedrock_exception( - exc, is_embedding, model, span_id, trace_id, request_extractor, request_body, ft, transaction + exc, + is_embedding, + model, + span_id, + trace_id, + request_extractor, + request_body, + ft, + transaction, + bedrock_args, + is_converse, ) raise @@ -132,36 +143,43 @@ async def wrap_client__make_api_call(wrapped, instance, args, kwargs): return response response_headers = response.get("ResponseMetadata", {}).get("HTTPHeaders") or {} - bedrock_attrs = { - "request_id": response_headers.get("x-amzn-requestid"), - "model": model, - "span_id": span_id, - "trace_id": trace_id, - } - - run_bedrock_request_extractor(request_extractor, request_body, bedrock_attrs) - try: - if response_streaming: - # Wrap EventStream object here to intercept __iter__ method instead of instrumenting class. - # This class is used in numerous other services in botocore, and would cause conflicts. - response["body"] = body = AsyncEventStreamWrapper(response["body"]) - body._nr_ft = ft or None - body._nr_bedrock_attrs = bedrock_attrs or {} - body._nr_model_extractor = stream_extractor or None - return response - - # Read and replace response streaming bodies - response_body = await response["body"].read() + if is_converse: + response_body = {} + bedrock_attrs = extract_bedrock_converse_attrs( + args[1], response, response_headers, model, span_id, trace_id + ) + else: + bedrock_attrs = { + "request_id": response_headers.get("x-amzn-requestid"), + "model": model, + "span_id": span_id, + "trace_id": trace_id, + } + # We only need to run the request extractor if invoke_model was called since the request formats are different + # across models + run_bedrock_request_extractor(request_extractor, request_body, bedrock_attrs) + + if response_streaming: + # Wrap EventStream object here to intercept __iter__ method instead of instrumenting class. + # This class is used in numerous other services in botocore, and would cause conflicts. + response["body"] = body = AsyncEventStreamWrapper(response["body"]) + body._nr_ft = ft or None + body._nr_bedrock_attrs = bedrock_attrs or {} + body._nr_model_extractor = stream_extractor or None + return response + + # Read and replace response streaming bodies + response_body = await response["body"].read() + response["body"] = StreamingBody(AsyncBytesIO(response_body), len(response_body)) if ft: ft.__exit__(None, None, None) bedrock_attrs["duration"] = ft.duration * 1000 - response["body"] = StreamingBody(AsyncBytesIO(response_body), len(response_body)) run_bedrock_response_extractor(response_extractor, response_body, bedrock_attrs, is_embedding, transaction) except Exception: - _logger.warning(RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE, traceback.format_exception(*sys.exc_info())) + _logger.warning(RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE, exc_info=True) return response diff --git a/newrelic/hooks/external_botocore.py b/newrelic/hooks/external_botocore.py index a512a605ba..d8c18b49db 100644 --- a/newrelic/hooks/external_botocore.py +++ b/newrelic/hooks/external_botocore.py @@ -17,7 +17,6 @@ import logging import re import sys -import traceback import uuid from io import BytesIO @@ -41,10 +40,10 @@ _logger = logging.getLogger(__name__) -EXCEPTION_HANDLING_FAILURE_LOG_MESSAGE = "Exception occurred in botocore instrumentation for AWS Bedrock: While reporting an exception in botocore, another exception occurred. Report this issue to New Relic Support.\n%s" -REQUEST_EXTACTOR_FAILURE_LOG_MESSAGE = "Exception occurred in botocore instrumentation for AWS Bedrock: Failed to extract request information. Report this issue to New Relic Support.\n%s" -RESPONSE_EXTRACTOR_FAILURE_LOG_MESSAGE = "Exception occurred in botocore instrumentation for AWS Bedrock: Failed to extract response information. If the issue persists, report this issue to New Relic support.\n%s" -RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE = "Exception occurred in botocore instrumentation for AWS Bedrock: Failed to report response data. Report this issue to New Relic Support.\n%s" +EXCEPTION_HANDLING_FAILURE_LOG_MESSAGE = "Exception occurred in botocore instrumentation for AWS Bedrock: While reporting an exception in botocore, another exception occurred. Report this issue to New Relic Support.\n" +REQUEST_EXTRACTOR_FAILURE_LOG_MESSAGE = "Exception occurred in botocore instrumentation for AWS Bedrock: Failed to extract request information. Report this issue to New Relic Support.\n" +RESPONSE_EXTRACTOR_FAILURE_LOG_MESSAGE = "Exception occurred in botocore instrumentation for AWS Bedrock: Failed to extract response information. If the issue persists, report this issue to New Relic support.\n" +RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE = "Exception occurred in botocore instrumentation for AWS Bedrock: Failed to report response data. Report this issue to New Relic Support.\n" EMBEDDING_STREAMING_UNSUPPORTED_LOG_MESSAGE = "Response streaming with embedding models is unsupported in botocore instrumentation for AWS Bedrock. If this feature is now supported by AWS and botocore, report this issue to New Relic Support." UNSUPPORTED_MODEL_WARNING_SENT = False @@ -163,6 +162,8 @@ def extractor_string(*args, **kwargs): def bedrock_error_attributes(exception, bedrock_attrs): + # In some cases, such as a botocore.exceptions.ParamValidationError, the exception may not have a response attr + # We still want to record the error, so we add `error: True` to bedrock_attrs immediately response = getattr(exception, "response", None) if not response: return bedrock_attrs @@ -534,18 +535,33 @@ def extract_bedrock_cohere_model_streaming_response(response_body, bedrock_attrs def handle_bedrock_exception( - exc, is_embedding, model, span_id, trace_id, request_extractor, request_body, ft, transaction + exc, is_embedding, model, span_id, trace_id, request_extractor, request_body, ft, transaction, kwargs, is_converse ): try: bedrock_attrs = {"model": model, "span_id": span_id, "trace_id": trace_id} + if is_converse: + try: + input_message_list = [ + {"role": "user", "content": result["text"]} for result in kwargs["messages"][-1].get("content", []) + ] + if "system" in kwargs.keys(): + input_message_list.append({"role": "system", "content": kwargs.get("system")[0].get("text")}) + except Exception: + input_message_list = [] + + bedrock_attrs["input_message_list"] = input_message_list + bedrock_attrs["request.max_tokens"] = kwargs.get("inferenceConfig", {}).get("maxTokens", None) + bedrock_attrs["request.temperature"] = kwargs.get("inferenceConfig", {}).get("temperature", None) + try: request_extractor(request_body, bedrock_attrs) except json.decoder.JSONDecodeError: pass except Exception: - _logger.warning(REQUEST_EXTACTOR_FAILURE_LOG_MESSAGE, traceback.format_exception(*sys.exc_info())) + _logger.warning(REQUEST_EXTRACTOR_FAILURE_LOG_MESSAGE, exc_info=True) error_attributes = bedrock_error_attributes(exc, bedrock_attrs) + notice_error_attributes = { "http.statusCode": error_attributes.get("http.statusCode"), "error.message": error_attributes.get("error.message"), @@ -568,7 +584,7 @@ def handle_bedrock_exception( else: handle_chat_completion_event(transaction, error_attributes) except Exception: - _logger.warning(EXCEPTION_HANDLING_FAILURE_LOG_MESSAGE, traceback.format_exception(*sys.exc_info())) + _logger.warning(EXCEPTION_HANDLING_FAILURE_LOG_MESSAGE, exc_info=True) def run_bedrock_response_extractor(response_extractor, response_body, bedrock_attrs, is_embedding, transaction): @@ -576,7 +592,7 @@ def run_bedrock_response_extractor(response_extractor, response_body, bedrock_at try: response_extractor(response_body, bedrock_attrs) except Exception: - _logger.warning(RESPONSE_EXTRACTOR_FAILURE_LOG_MESSAGE, traceback.format_exception(*sys.exc_info())) + _logger.warning(RESPONSE_EXTRACTOR_FAILURE_LOG_MESSAGE, exc_info=True) if is_embedding: handle_embedding_event(transaction, bedrock_attrs) @@ -590,7 +606,7 @@ def run_bedrock_request_extractor(request_extractor, request_body, bedrock_attrs except json.decoder.JSONDecodeError: pass except Exception: - _logger.warning(REQUEST_EXTACTOR_FAILURE_LOG_MESSAGE, traceback.format_exception(*sys.exc_info())) + _logger.warning(REQUEST_EXTRACTOR_FAILURE_LOG_MESSAGE, exc_info=True) def wrap_bedrock_runtime_invoke_model(response_streaming=False): @@ -669,7 +685,17 @@ def _wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): response = wrapped(*args, **kwargs) except Exception as exc: handle_bedrock_exception( - exc, is_embedding, model, span_id, trace_id, request_extractor, request_body, ft, transaction + exc, + is_embedding, + model, + span_id, + trace_id, + request_extractor, + request_body, + ft, + transaction, + kwargs, + is_converse=False, ) raise @@ -718,13 +744,124 @@ def _wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): run_bedrock_response_extractor(response_extractor, response_body, bedrock_attrs, is_embedding, transaction) except Exception: - _logger.warning(RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE, traceback.format_exception(*sys.exc_info())) + _logger.warning(RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE, exc_info=True) return response return _wrap_bedrock_runtime_invoke_model +def wrap_bedrock_runtime_converse(response_streaming=False): + @function_wrapper + def _wrap_bedrock_runtime_converse(wrapped, instance, args, kwargs): + # Wrapped function only takes keyword arguments, no need for binding + transaction = current_transaction() + if not transaction: + return wrapped(*args, **kwargs) + + settings = transaction.settings or global_settings + if not settings.ai_monitoring.enabled: + return wrapped(*args, **kwargs) + + transaction.add_ml_model_info("Bedrock", BOTOCORE_VERSION) + transaction._add_agent_attribute("llm", True) + + model = kwargs.get("modelId") + if not model: + return wrapped(*args, **kwargs) + + # Extractors are not needed for Converse API since the request and response formats are consistent across models + request_extractor = response_extractor = stream_extractor = NULL_EXTRACTOR + + function_name = wrapped.__name__ + # Function trace may not be exited in this function in the case of streaming, so start manually + ft = FunctionTrace(name=function_name, group="Llm/completion/Bedrock") + ft.__enter__() + + # Get trace information + available_metadata = get_trace_linking_metadata() + span_id = available_metadata.get("span.id") + trace_id = available_metadata.get("trace.id") + + # Store data on instance to pass context to async instrumentation in aiobotocore + instance._nr_trace_id = trace_id + instance._nr_span_id = span_id + instance._nr_request_extractor = request_extractor + instance._nr_response_extractor = response_extractor + instance._nr_stream_extractor = stream_extractor + instance._nr_txn = transaction + instance._nr_ft = ft + instance._nr_response_streaming = response_streaming + instance._nr_settings = settings + instance._nr_is_converse = True + + # Add a bedrock flag to instance so we can determine when make_api_call instrumentation is hit from non-Bedrock paths and bypass it if so + instance._nr_is_bedrock = True + + try: + # For aioboto3 clients, this will call make_api_call instrumentation in external_aiobotocore + response = wrapped(*args, **kwargs) + except Exception as exc: + handle_bedrock_exception( + exc, False, model, span_id, trace_id, request_extractor, {}, ft, transaction, kwargs, is_converse=True + ) + raise + + if not response or (response_streaming and not settings.ai_monitoring.streaming.enabled): + ft.__exit__(None, None, None) + return response + + # Let the instrumentation of make_api_call in the aioboto3 client handle it if we have an async case + if inspect.iscoroutine(response): + return response + + response_headers = response.get("ResponseMetadata", {}).get("HTTPHeaders") or {} + bedrock_attrs = extract_bedrock_converse_attrs(kwargs, response, response_headers, model, span_id, trace_id) + + try: + ft.__exit__(None, None, None) + bedrock_attrs["duration"] = ft.duration * 1000 + run_bedrock_response_extractor(response_extractor, {}, bedrock_attrs, False, transaction) + + except Exception: + _logger.warning(RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE, exc_info=True) + + return response + + return _wrap_bedrock_runtime_converse + + +def extract_bedrock_converse_attrs(kwargs, response, response_headers, model, span_id, trace_id): + input_message_list = [] + # If a system message is supplied, it is under its own key in kwargs rather than with the other input messages + if "system" in kwargs.keys(): + input_message_list.extend({"role": "system", "content": result["text"]} for result in kwargs.get("system", [])) + + # kwargs["messages"] can hold multiple requests and responses to maintain conversation history + # We grab the last message (the newest request) in the list each time, so we don't duplicate recorded data + input_message_list.extend( + [{"role": "user", "content": result["text"]} for result in kwargs["messages"][-1].get("content", [])] + ) + + output_message_list = [ + {"role": "assistant", "content": result["text"]} + for result in response.get("output").get("message").get("content", []) + ] + + bedrock_attrs = { + "request_id": response_headers.get("x-amzn-requestid"), + "model": model, + "span_id": span_id, + "trace_id": trace_id, + "response.choices.finish_reason": response.get("stopReason"), + "output_message_list": output_message_list, + "request.max_tokens": kwargs.get("inferenceConfig", {}).get("maxTokens", None), + "request.temperature": kwargs.get("inferenceConfig", {}).get("temperature", None), + "input_message_list": input_message_list, + } + return bedrock_attrs + + class EventStreamWrapper(ObjectProxy): def __iter__(self): g = GeneratorProxy(self.__wrapped__.__iter__()) @@ -809,7 +946,7 @@ def record_stream_chunk(self, return_val, transaction): if _type == "content_block_stop": record_events_on_stop_iteration(self, transaction) except Exception: - _logger.warning(RESPONSE_EXTRACTOR_FAILURE_LOG_MESSAGE, traceback.format_exception(*sys.exc_info())) + _logger.warning(RESPONSE_EXTRACTOR_FAILURE_LOG_MESSAGE, exc_info=True) def record_events_on_stop_iteration(self, transaction): @@ -825,7 +962,7 @@ def record_events_on_stop_iteration(self, transaction): bedrock_attrs["duration"] = self._nr_ft.duration * 1000 handle_chat_completion_event(transaction, bedrock_attrs) except Exception: - _logger.warning(RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE, traceback.format_exception(*sys.exc_info())) + _logger.warning(RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE, exc_info=True) # Clear cached data as this can be very large. self._nr_bedrock_attrs.clear() @@ -859,7 +996,7 @@ def record_error(self, transaction, exc): # Clear cached data as this can be very large. error_attributes.clear() except Exception: - _logger.warning(EXCEPTION_HANDLING_FAILURE_LOG_MESSAGE, traceback.format_exception(*sys.exc_info())) + _logger.warning(EXCEPTION_HANDLING_FAILURE_LOG_MESSAGE, exc_info=True) def handle_embedding_event(transaction, bedrock_attrs): @@ -905,7 +1042,6 @@ def handle_embedding_event(transaction, bedrock_attrs): def handle_chat_completion_event(transaction, bedrock_attrs): chat_completion_id = str(uuid.uuid4()) - # Grab LLM-related custom attributes off of the transaction to store as metadata on LLM events custom_attrs_dict = transaction._custom_params llm_metadata_dict = {key: value for key, value in custom_attrs_dict.items() if key.startswith("llm.")} @@ -944,7 +1080,6 @@ def handle_chat_completion_event(transaction, bedrock_attrs): } chat_completion_summary_dict.update(llm_metadata_dict) chat_completion_summary_dict = {k: v for k, v in chat_completion_summary_dict.items() if v is not None} - transaction.record_custom_event("LlmChatCompletionSummary", chat_completion_summary_dict) create_chat_completion_message_event( @@ -1390,6 +1525,7 @@ def wrap_serialize_to_request(wrapped, instance, args, kwargs): ("bedrock-runtime", "invoke_model_with_response_stream"): wrap_bedrock_runtime_invoke_model( response_streaming=True ), + ("bedrock-runtime", "converse"): wrap_bedrock_runtime_converse(response_streaming=False), } @@ -1399,8 +1535,8 @@ def bind__create_api_method(py_operation_name, operation_name, service_model, *a def _nr_clientcreator__create_api_method_(wrapped, instance, args, kwargs): (py_operation_name, service_model) = bind__create_api_method(*args, **kwargs) - service_name = service_model.service_name.lower() + tracer = CUSTOM_TRACE_POINTS.get((service_name, py_operation_name)) wrapped = wrapped(*args, **kwargs) diff --git a/newrelic/hooks/external_pyzeebe.py b/newrelic/hooks/external_pyzeebe.py new file mode 100644 index 0000000000..6d976670af --- /dev/null +++ b/newrelic/hooks/external_pyzeebe.py @@ -0,0 +1,126 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging + +from newrelic.api.application import application_instance +from newrelic.api.function_trace import FunctionTrace +from newrelic.api.transaction import current_transaction +from newrelic.api.web_transaction import WebTransaction +from newrelic.common.object_wrapper import wrap_function_wrapper + +_logger = logging.getLogger(__name__) + +CLIENT_ATTRIBUTES_DEPLOY_RESOURCE_LOG_MSG = "Exception occurred in PyZeebe instrumentation: Failed to extract resource count/file for method `deploy_resource`. Report this issue to New Relic support." + + +# Adds client method params as txn or span attributes +def _add_client_input_attributes(method_name, trace, args, kwargs): + bpmn_id = extract_agent_attribute_from_methods( + args, kwargs, method_name, ("run_process", "run_process_with_result"), "bpmn_process_id", 0 + ) + if bpmn_id: + trace._add_agent_attribute("zeebe.client.bpmnProcessId", bpmn_id) + + msg_name = extract_agent_attribute_from_methods(args, kwargs, method_name, ("publish_message"), "name", 0) + if msg_name: + trace._add_agent_attribute("zeebe.client.messageName", msg_name) + + correlation_key = extract_agent_attribute_from_methods( + args, kwargs, method_name, ("publish_message"), "correlation_key", 1 + ) + if correlation_key: + trace._add_agent_attribute("zeebe.client.correlationKey", correlation_key) + + message_id = extract_agent_attribute_from_methods(args, kwargs, method_name, ("publish_message"), "message_id", 4) + if message_id: + trace._add_agent_attribute("zeebe.client.messageId", message_id) + + resource = extract_agent_attribute_from_methods(args, {}, method_name, ("deploy_resource"), None, 0) + if resource: + try: + trace._add_agent_attribute("zeebe.client.resourceFile", resource) + trace._add_agent_attribute("zeebe.client.resourceCount", len(list(args))) + except Exception: + _logger.warning(CLIENT_ATTRIBUTES_DEPLOY_RESOURCE_LOG_MSG, exc_info=True) + + +def extract_agent_attribute_from_methods(args, kwargs, method_name, methods, param, index): + try: + if method_name in methods: + value = kwargs.get(param) + if not value and args and len(args) > index: + value = args[index] + return value + except Exception: + _logger.warning( + "Exception occurred in PyZeebe instrumentation: failed to extract %s from %s. Report this issue to New Relic support.", + param, + method_name, + exc_info=True, + ) + + +# Async wrapper that instruments router/worker annotations` +async def _nr_wrapper_execute_one_job(wrapped, instance, args, kwargs): + job = args[0] if args else kwargs.get("job") + process_id = getattr(job, "bpmn_process_id", None) or "UnknownProcess" + task_type = getattr(job, "type", None) or "UnknownType" + txn_name = f"{process_id}/{task_type}" + + with WebTransaction(application_instance(), txn_name, group="ZeebeTask") as txn: + if job is not None: + if hasattr(job, "key"): + txn.add_custom_attribute("zeebe.job.key", job.key) + if hasattr(job, "type"): + txn.add_custom_attribute("zeebe.job.type", job.type) + if hasattr(job, "bpmn_process_id"): + txn.add_custom_attribute("zeebe.job.bpmnProcessId", job.bpmn_process_id) + if hasattr(job, "process_instance_key"): + txn.add_custom_attribute("zeebe.job.processInstanceKey", job.process_instance_key) + if hasattr(job, "element_id"): + txn.add_custom_attribute("zeebe.job.elementId", job.element_id) + + return await wrapped(*args, **kwargs) + + +# Async wrapper that instruments a ZeebeClient method. +def _nr_client_wrapper(method_name): + async def _client_wrapper(wrapped, instance, args, kwargs): + txn = current_transaction() + if not txn: + return await wrapped(*args, **kwargs) + + with FunctionTrace(name=method_name, group="ZeebeClient") as trace: + _add_client_input_attributes(method_name, trace, args, kwargs) + return await wrapped(*args, **kwargs) + + return _client_wrapper + + +# Instrument JobExecutor.execute_one_job to create a background transaction per job (invoked from @router.task or @worker.task annotations) +def instrument_pyzeebe_worker_job_executor(module): + if hasattr(module, "JobExecutor"): + wrap_function_wrapper(module, "JobExecutor.execute_one_job", _nr_wrapper_execute_one_job) + + +# Instrument ZeebeClient methods to trace client calls. +def instrument_pyzeebe_client_client(module): + target_methods = ("run_process", "run_process_with_result", "deploy_resource", "publish_message") + + for method_name in target_methods: + if hasattr(module, "ZeebeClient"): + if hasattr(module.ZeebeClient, method_name): + wrap_function_wrapper(module, f"ZeebeClient.{method_name}", _nr_client_wrapper(method_name)) diff --git a/newrelic/hooks/framework_tornado.py b/newrelic/hooks/framework_tornado.py index c80a45a585..636be31250 100644 --- a/newrelic/hooks/framework_tornado.py +++ b/newrelic/hooks/framework_tornado.py @@ -91,7 +91,7 @@ def _wrap_headers_received(wrapped, instance, args, kwargs): except: pass - path, sep, query = start_line.path.partition("?") + path, _sep, query = start_line.path.partition("?") transaction = WebTransaction( application=application_instance(), diff --git a/newrelic/hooks/framework_webpy.py b/newrelic/hooks/framework_webpy.py index b8e28e5829..67bf7e33c3 100644 --- a/newrelic/hooks/framework_webpy.py +++ b/newrelic/hooks/framework_webpy.py @@ -12,15 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. - import newrelic.api.function_trace -import newrelic.api.in_function -import newrelic.api.out_function -import newrelic.api.pre_function import newrelic.api.transaction from newrelic.api.time_trace import notice_error from newrelic.api.wsgi_application import WSGIApplicationWrapper from newrelic.common.object_names import callable_name +from newrelic.common.object_wrapper import wrap_in_function, wrap_out_function, wrap_pre_function def transaction_name_delegate(*args, **kwargs): @@ -46,9 +43,9 @@ def template_name(render_obj, name): def instrument(module): if module.__name__ == "web.application": - newrelic.api.out_function.wrap_out_function(module, "application.wsgifunc", WSGIApplicationWrapper) - newrelic.api.in_function.wrap_in_function(module, "application._delegate", transaction_name_delegate) - newrelic.api.pre_function.wrap_pre_function(module, "application.internalerror", wrap_handle_exception) + wrap_out_function(module, "application.wsgifunc", WSGIApplicationWrapper) + wrap_in_function(module, "application._delegate", transaction_name_delegate) + wrap_pre_function(module, "application.internalerror", wrap_handle_exception) elif module.__name__ == "web.template": newrelic.api.function_trace.wrap_function_trace(module, "render.__getattr__", template_name, "Template/Render") diff --git a/newrelic/hooks/mlmodel_autogen.py b/newrelic/hooks/mlmodel_autogen.py new file mode 100644 index 0000000000..87d94a4c44 --- /dev/null +++ b/newrelic/hooks/mlmodel_autogen.py @@ -0,0 +1,224 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +import sys +import uuid + +from newrelic.api.function_trace import FunctionTrace +from newrelic.api.time_trace import get_trace_linking_metadata +from newrelic.api.transaction import current_transaction +from newrelic.common.object_names import callable_name +from newrelic.common.object_wrapper import wrap_function_wrapper +from newrelic.common.package_version_utils import get_package_version +from newrelic.common.signature import bind_args +from newrelic.core.config import global_settings + +# Check for the presence of the autogen-core, autogen-agentchat, or autogen-ext package as they should all have the +# same version and one or multiple could be installed +AUTOGEN_VERSION = ( + get_package_version("autogen-core") + or get_package_version("autogen-agentchat") + or get_package_version("autogen-ext") +) + + +RECORD_EVENTS_FAILURE_LOG_MESSAGE = "Exception occurred in Autogen instrumentation: Failed to record LLM events. Please report this issue to New Relic Support.\n%s" + + +_logger = logging.getLogger(__name__) + + +async def wrap_from_server_params(wrapped, instance, args, kwargs): + transaction = current_transaction() + if not transaction: + return await wrapped(*args, **kwargs) + + func_name = callable_name(wrapped) + bound_args = bind_args(wrapped, args, kwargs) + tool_name = bound_args.get("tool_name") or "tool" + function_trace_name = f"{func_name}/{tool_name}" + with FunctionTrace(name=function_trace_name, group="Llm", source=wrapped): + return await wrapped(*args, **kwargs) + + +def wrap_on_messages_stream(wrapped, instance, args, kwargs): + transaction = current_transaction() + if not transaction: + return wrapped(*args, **kwargs) + + settings = transaction.settings or global_settings() + if not settings.ai_monitoring.enabled: + return wrapped(*args, **kwargs) + + # Framework metric also used for entity tagging in the UI + transaction.add_ml_model_info("Autogen", AUTOGEN_VERSION) + transaction._add_agent_attribute("llm", True) + + agent_name = getattr(instance, "name", "agent") + agent_id = str(uuid.uuid4()) + agent_event_dict = _construct_base_agent_event_dict(agent_name, agent_id, transaction) + func_name = callable_name(wrapped) + function_trace_name = f"{func_name}/{agent_name}" + + ft = FunctionTrace(name=function_trace_name, group="Llm/agent/Autogen") + ft.__enter__() + + try: + return_val = wrapped(*args, **kwargs) + except Exception: + ft.notice_error(attributes={"agent_id": agent_id}) + ft.__exit__(*sys.exc_info()) + # If we hit an exception, append the error attribute and duration from the exited function trace + agent_event_dict.update({"duration": ft.duration * 1000, "error": True}) + transaction.record_custom_event("LlmAgent", agent_event_dict) + raise + + ft.__exit__(None, None, None) + agent_event_dict.update({"duration": ft.duration * 1000}) + + transaction.record_custom_event("LlmAgent", agent_event_dict) + + return return_val + + +def _get_llm_metadata(transaction): + # Grab LLM-related custom attributes off of the transaction to store as metadata on LLM events + custom_attrs_dict = transaction._custom_params + llm_metadata_dict = {key: value for key, value in custom_attrs_dict.items() if key.startswith("llm.")} + llm_context_attrs = getattr(transaction, "_llm_context_attrs", None) + if llm_context_attrs: + llm_metadata_dict.update(llm_context_attrs) + + return llm_metadata_dict + + +def _extract_tool_output(return_val, tool_name): + try: + output = getattr(return_val[1], "content", None) + return output + except Exception: + _logger.warning("Unable to parse tool output value from %s. Omitting output from LlmTool event.", tool_name) + return None + + +def _construct_base_tool_event_dict(bound_args, tool_call_data, tool_id, transaction, settings): + try: + _input = getattr(tool_call_data, "arguments", None) + tool_input = str(_input) if _input else None + run_id = getattr(tool_call_data, "id", None) + tool_name = getattr(tool_call_data, "name", "tool") + agent_name = bound_args.get("agent_name") + linking_metadata = get_trace_linking_metadata() + + tool_event_dict = { + "id": tool_id, + "run_id": run_id, + "name": tool_name, + "span_id": linking_metadata.get("span.id"), + "trace_id": linking_metadata.get("trace.id"), + "agent_name": agent_name, + "vendor": "autogen", + "ingest_source": "Python", + } + if settings.ai_monitoring.record_content.enabled: + tool_event_dict.update({"input": tool_input}) + tool_event_dict.update(_get_llm_metadata(transaction)) + except Exception: + tool_event_dict = {} + _logger.warning(RECORD_EVENTS_FAILURE_LOG_MESSAGE, exc_info=True) + + return tool_event_dict + + +def _construct_base_agent_event_dict(agent_name, agent_id, transaction): + try: + linking_metadata = get_trace_linking_metadata() + + agent_event_dict = { + "id": agent_id, + "name": agent_name, + "span_id": linking_metadata.get("span.id"), + "trace_id": linking_metadata.get("trace.id"), + "vendor": "autogen", + "ingest_source": "Python", + } + agent_event_dict.update(_get_llm_metadata(transaction)) + except Exception: + agent_event_dict = {} + _logger.warning(RECORD_EVENTS_FAILURE_LOG_MESSAGE, exc_info=True) + + return agent_event_dict + + +async def wrap__execute_tool_call(wrapped, instance, args, kwargs): + transaction = current_transaction() + if not transaction: + return await wrapped(*args, **kwargs) + + settings = transaction.settings or global_settings() + if not settings.ai_monitoring.enabled: + return await wrapped(*args, **kwargs) + + # Framework metric also used for entity tagging in the UI + transaction.add_ml_model_info("Autogen", AUTOGEN_VERSION) + transaction._add_agent_attribute("llm", True) + + tool_id = str(uuid.uuid4()) + bound_args = bind_args(wrapped, args, kwargs) + tool_call_data = bound_args.get("tool_call") + tool_event_dict = _construct_base_tool_event_dict(bound_args, tool_call_data, tool_id, transaction, settings) + + tool_name = getattr(tool_call_data, "name", "tool") + + func_name = callable_name(wrapped) + ft = FunctionTrace(name=f"{func_name}/{tool_name}", group="Llm/tool/Autogen") + ft.__enter__() + + try: + return_val = await wrapped(*args, **kwargs) + except Exception: + ft.notice_error(attributes={"tool_id": tool_id}) + ft.__exit__(*sys.exc_info()) + # If we hit an exception, append the error attribute and duration from the exited function trace + tool_event_dict.update({"duration": ft.duration * 1000, "error": True}) + transaction.record_custom_event("LlmTool", tool_event_dict) + raise + + ft.__exit__(None, None, None) + tool_event_dict.update({"duration": ft.duration * 1000}) + + # If the tool was executed successfully, we can grab the tool output from the result + tool_output = _extract_tool_output(return_val, tool_name) + if settings.ai_monitoring.record_content.enabled: + tool_event_dict.update({"output": tool_output}) + + transaction.record_custom_event("LlmTool", tool_event_dict) + + return return_val + + +def instrument_autogen_agentchat_agents__assistant_agent(module): + if hasattr(module, "AssistantAgent"): + if hasattr(module.AssistantAgent, "on_messages_stream"): + wrap_function_wrapper(module, "AssistantAgent.on_messages_stream", wrap_on_messages_stream) + if hasattr(module.AssistantAgent, "_execute_tool_call"): + wrap_function_wrapper(module, "AssistantAgent._execute_tool_call", wrap__execute_tool_call) + + +def instrument_autogen_ext_tools_mcp__base(module): + if hasattr(module, "McpToolAdapter"): + if hasattr(module.McpToolAdapter, "from_server_params"): + wrap_function_wrapper(module, "McpToolAdapter.from_server_params", wrap_from_server_params) diff --git a/newrelic/hooks/mlmodel_openai.py b/newrelic/hooks/mlmodel_openai.py index e7a9170946..c3f7960b6e 100644 --- a/newrelic/hooks/mlmodel_openai.py +++ b/newrelic/hooks/mlmodel_openai.py @@ -442,6 +442,7 @@ def _handle_completion_success(transaction, linking_metadata, completion_id, kwa # The function trace will be exited when in the final iteration of the response # generator. return_val._nr_ft = ft + return_val._nr_metadata = linking_metadata return_val._nr_openai_attrs = getattr(return_val, "_nr_openai_attrs", {}) return_val._nr_openai_attrs["messages"] = kwargs.get("messages", []) return_val._nr_openai_attrs["temperature"] = kwargs.get("temperature") @@ -488,14 +489,20 @@ def _record_completion_success(transaction, linking_metadata, completion_id, kwa choices[0].get("message") or {"content": choices[0].get("text"), "role": "assistant"} ] finish_reason = choices[0].get("finish_reason") + if "tool_calls" in output_message_list[0] and not output_message_list[0].get("content"): + output_message_list = [] else: response_model = kwargs.get("response.model") response_id = kwargs.get("id") output_message_list = [] - finish_reason = None + finish_reason = kwargs.get("finish_reason") if "content" in kwargs: output_message_list = [{"content": kwargs.get("content"), "role": kwargs.get("role")}] - finish_reason = kwargs.get("finish_reason") + # When tools are involved, the content key may hold an empty string which we do not want to report + # In this case, the content we are interested in capturing will already be covered in the input_message_list + # We empty out the output_message_list so that we do not report an empty message + if "tool_call" in finish_reason and not kwargs.get("content"): + output_message_list = [] request_model = kwargs.get("model") or kwargs.get("engine") request_id = response_headers.get("x-request-id") @@ -765,7 +772,10 @@ def _record_stream_chunk(self, return_val): def _record_events_on_stop_iteration(self, transaction): if hasattr(self, "_nr_ft"): - linking_metadata = get_trace_linking_metadata() + # We first check for our saved linking metadata before making a new call to get_trace_linking_metadata + # Directly calling get_trace_linking_metadata() causes the incorrect span ID to be captured and associated with the LLM call + # This leads to incorrect linking of the LLM call in the UI + linking_metadata = self._nr_metadata or get_trace_linking_metadata() self._nr_ft.__exit__(None, None, None) try: openai_attrs = getattr(self, "_nr_openai_attrs", {}) @@ -872,6 +882,8 @@ def set_attrs_on_generator_proxy(proxy, instance): proxy._nr_response_headers = instance._nr_response_headers if hasattr(instance, "_nr_openai_attrs"): proxy._nr_openai_attrs = instance._nr_openai_attrs + if hasattr(instance, "_nr_metadata"): + proxy._nr_metadata = instance._nr_metadata def wrap_engine_api_resource_create_sync(wrapped, instance, args, kwargs): diff --git a/newrelic/hooks/mlmodel_sklearn.py b/newrelic/hooks/mlmodel_sklearn.py index 8e0207a2db..3fa2e7a1b7 100644 --- a/newrelic/hooks/mlmodel_sklearn.py +++ b/newrelic/hooks/mlmodel_sklearn.py @@ -284,7 +284,7 @@ def create_prediction_event(transaction, class_, instance, args, kwargs, return_ "modelName": model_name, }, ) - features, predictions = np_casted_data_set.shape + _features, _predictions = np_casted_data_set.shape for prediction_index, prediction in enumerate(np_casted_data_set): inference_id = uuid.uuid4() @@ -346,7 +346,7 @@ def wrap_metric_scorer(wrapped, instance, args, kwargs): score = wrapped(*args, **kwargs) - y_true, y_pred, args, kwargs = _bind_scorer(*args, **kwargs) + _y_true, y_pred, args, kwargs = _bind_scorer(*args, **kwargs) model_name = "Unknown" training_step = "Unknown" if hasattr(y_pred, "_nr_model_name"): diff --git a/pyproject.toml b/pyproject.toml index 2c449f8651..337aaae634 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,14 +12,102 @@ # See the License for the specific language governing permissions and # limitations under the License. +# =========================== +# Project Build Configuration +# =========================== + +[project] +name="newrelic" +description = "New Relic Python Agent" +authors = [{name = "New Relic", email = "support@newrelic.com"}] +maintainers = [{name = "New Relic", email = "support@newrelic.com"}] +readme = "README.md" +# License requires setuptools>=77.0.3 for pyproject.toml, which is Python 3.9+ +# license = "Apache-2.0" +# license-files = [ +# "LICENSE", +# "THIRD_PARTY_NOTICES.md", +# ] +requires-python = ">=3.8" # python_requires is also located in setup.py +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", + "Topic :: System :: Monitoring", +] +dynamic = ["version", "license"] + +[project.urls] +Homepage = "https://docs.newrelic.com/docs/apm/agents/python-agent/" +GitHub = "https://github.com/newrelic/newrelic-python-agent" +"Release Notes" = "https://docs.newrelic.com/docs/release-notes/agent-release-notes/python-release-notes/" + +[project.optional-dependencies] +infinite-tracing = ["grpcio", "protobuf"] + +[project.scripts] +newrelic-admin = "newrelic.admin:main" + +[build-system] +requires = [ + "setuptools>=61.2", + "setuptools_scm>=6.4,<10", +] +build-backend = "setuptools.build_meta" + +[tool.setuptools] +zip-safe = false +packages = [ + "newrelic", + "newrelic.admin", + "newrelic.api", + "newrelic.bootstrap", + "newrelic.common", + "newrelic.core", + "newrelic.extras", + "newrelic.extras.framework_django", + "newrelic.extras.framework_django.templatetags", + "newrelic.hooks", + "newrelic.network", + "newrelic.packages", + "newrelic.packages.isort", + "newrelic.packages.isort.stdlibs", + "newrelic.packages.urllib3", + "newrelic.packages.urllib3.util", + "newrelic.packages.urllib3.contrib", + "newrelic.packages.urllib3.contrib._securetransport", + "newrelic.packages.urllib3.packages", + "newrelic.packages.urllib3.packages.backports", + "newrelic.packages.wrapt", + "newrelic.packages.opentelemetry_proto", + "newrelic.samplers", +] + +[tool.setuptools.package-data] +newrelic = ["newrelic.ini", "version.txt", "packages/urllib3/LICENSE.txt", "common/cacert.pem", "scripts/azure-prebuild.sh"] + +[tool.setuptools_scm] +write_to = "newrelic/_version.py" +# Don't convert git_describe_command to an array, it doesn't seem to work properly on any versions of setuptools_scm. +git_describe_command = 'git describe --dirty --tags --long --match "*.*.*"' + +# ================== +# Ruff Configuration +# ================== + [tool.ruff] output-format = "grouped" line-length = 120 -target-version = "py37" +target-version = "py38" force-exclude = true # Fixes issue with megalinter config preventing exclusion of files extend-exclude = [ "newrelic/packages/", - "setup.py", "newrelic/core/infinite_tracing_*_pb2.py", ] namespace-packages = ["testing_support"] @@ -111,7 +199,7 @@ ignore = [ "PT012", # pytest-raises-with-multiple-statements (too many to fix all at once) # Permanently disabled rules "PLC0415", # import-outside-top-level (intentionally used frequently) - "UP006", # non-pep585-annotation (not compatible with Python 3.7 or 3.8) + "UP006", # non-pep585-annotation (not compatible with Python 3.8) "D203", # incorrect-blank-line-before-class "D213", # multi-line-summary-second-line "ARG001", # unused-argument @@ -135,6 +223,13 @@ ignore = [ ] [tool.ruff.lint.per-file-ignores] +"setup.py" = [ + # Disabled rules in setup.py + # setup.py needs to not immediately crash on Python 2 to log error messages + "UP032", # f-string (Python 3+ syntax) + "B904", # raise-without-from-inside-except (Python 3+ syntax) + "E402", # module-import-not-at-top-of-file (intentional) +] "tests/*" = [ # Disabled rules in tests "S", # flake8-bandit (security checks are not necessary in tests) @@ -159,61 +254,9 @@ ignore = [ "S108", # flake8-bandit (hardcoded log files are never used as input) ] -# Alternate linters and formatters -[tool.black] -line-length = 120 -include = '\.pyi?$' - -[tool.isort] -profile = "black" - -[tool.pylint.messages_control] -disable = [ - "C0103", - "C0114", - "C0115", - "C0116", - "C0209", - "C0302", - "C0415", - "E0401", - "E1120", - "R0205", - "R0401", - "R0801", - "R0902", - "R0903", - "R0904", - "R0911", - "R0912", - "R0913", - "R0914", - "R0915", - "R1705", - "R1710", - "R1725", - "W0201", - "W0212", - "W0223", - "W0402", - "W0603", - "W0612", - "W0613", - "W0702", - "W0703", - "W0706", - "line-too-long", - "redefined-outer-name", -] - -[tool.pylint.format] -max-line-length = "120" - -[tool.pylint.basic] -good-names = "exc,val,tb" - -[tool.bandit] -skips = ["B110", "B101", "B404"] +# ========================= +# Other Tools Configuration +# ========================= [tool.flynt] line-length = 999999 diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 8a41f1534d..0000000000 --- a/setup.cfg +++ /dev/null @@ -1,8 +0,0 @@ -[metadata] -license_files = - LICENSE - THIRD_PARTY_NOTICES.md - -[flake8] -max-line-length = 120 -extend-ignore = E122,E126,E127,E128,E203,E501,E722,F841,W504,E731,F811 diff --git a/setup.py b/setup.py index bff6b8b268..81a40ae967 100644 --- a/setup.py +++ b/setup.py @@ -15,15 +15,13 @@ import os import sys -from pathlib import Path - python_version = sys.version_info[:2] -if python_version >= (3, 7): +if python_version >= (3, 8): pass else: error_msg = ( - "The New Relic Python agent only supports Python 3.7+. We recommend upgrading to a newer version of Python." + "The New Relic Python agent only supports Python 3.8+. We recommend upgrading to a newer version of Python." ) try: @@ -35,14 +33,14 @@ (3, 4): "4.20.0.120", (3, 5): "5.24.0.153", (3, 6): "7.16.0.178", + (3, 7): "10.17.0", } last_supported_version = last_supported_version_lookup.get(python_version, None) if last_supported_version: - python_version_str = "%s.%s" % (python_version[0], python_version[1]) - error_msg += " The last agent version to support Python %s was v%s." % ( - python_version_str, - last_supported_version, + python_version_str = "{}.{}".format(python_version[0], python_version[1]) + error_msg += " The last agent version to support Python {} was v{}.".format( + python_version_str, last_supported_version ) except Exception: pass @@ -62,33 +60,7 @@ from distutils.command.build_ext import build_ext from distutils.core import Extension from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError - - -def newrelic_agent_guess_next_version(tag_version): - if hasattr(tag_version, "tag"): # For setuptools_scm 7.0+ - tag_version = tag_version.tag - - version, _, _ = str(tag_version).partition("+") - version_info = list(map(int, version.split("."))) - if len(version_info) < 3: - return version - version_info[1] += 1 - version_info[2] = 0 - return ".".join(map(str, version_info)) - - -def newrelic_agent_next_version(version): - if version.exact: - return version.format_with("{tag}") - else: - return version.format_next_version(newrelic_agent_guess_next_version, fmt="{guessed}") - - -script_directory = Path(__file__).parent - -readme_file = script_directory / "README.md" -with readme_file.open() as f: - readme_file_contents = f.read() +from pathlib import Path build_ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError, OSError) @@ -102,101 +74,74 @@ def run(self): try: build_ext.run(self) except DistutilsPlatformError: - raise BuildExtFailed() + raise BuildExtFailed def build_extension(self, ext): try: build_ext.build_extension(self, ext) except build_ext_errors: - raise BuildExtFailed() - - -packages = [ - "newrelic", - "newrelic.admin", - "newrelic.api", - "newrelic.bootstrap", - "newrelic.common", - "newrelic.core", - "newrelic.extras", - "newrelic.extras.framework_django", - "newrelic.extras.framework_django.templatetags", - "newrelic.hooks", - "newrelic.network", - "newrelic/packages", - "newrelic/packages/isort", - "newrelic/packages/isort/stdlibs", - "newrelic/packages/urllib3", - "newrelic/packages/urllib3/util", - "newrelic/packages/urllib3/contrib", - "newrelic/packages/urllib3/contrib/_securetransport", - "newrelic/packages/urllib3/packages", - "newrelic/packages/urllib3/packages/backports", - "newrelic/packages/wrapt", - "newrelic/packages/opentelemetry_proto", - "newrelic.samplers", -] - -classifiers = [ - "Development Status :: 5 - Production/Stable", - "License :: OSI Approved :: Apache Software License", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", - "Programming Language :: Python :: 3.13", - "Programming Language :: Python :: Implementation :: CPython", - "Programming Language :: Python :: Implementation :: PyPy", - "Topic :: System :: Monitoring", -] - -kwargs = dict( - name="newrelic", - use_scm_version={ - "version_scheme": newrelic_agent_next_version, - "local_scheme": "no-local-version", - "git_describe_command": "git describe --dirty --tags --long --match *.*.*", - "write_to": "newrelic/version.txt", - }, - setup_requires=["setuptools_scm>=3.2,<9"], - description="New Relic Python Agent", - long_description=readme_file_contents, - long_description_content_type="text/markdown", - url="https://docs.newrelic.com/docs/apm/agents/python-agent/", - project_urls={"Source": "https://github.com/newrelic/newrelic-python-agent"}, - author="New Relic", - author_email="support@newrelic.com", - maintainer="New Relic", - maintainer_email="support@newrelic.com", - license="Apache-2.0", - zip_safe=False, - classifiers=classifiers, - packages=packages, - python_requires=">=3.7", - package_data={ - "newrelic": ["newrelic.ini", "version.txt", "packages/urllib3/LICENSE.txt", "common/cacert.pem", "scripts/azure-prebuild.sh"], - }, - extras_require={"infinite-tracing": ["grpcio", "protobuf"]}, -) - -if with_setuptools: - kwargs["entry_points"] = { - "console_scripts": ["newrelic-admin = newrelic.admin:main"], - } -else: - kwargs["scripts"] = ["scripts/newrelic-admin"] + raise BuildExtFailed -def with_librt(): - try: - if sys.platform.startswith("linux"): - import ctypes.util +kwargs = { + "name": "newrelic", + "setup_requires": ["setuptools>=61.2", "setuptools_scm>=6.4,<10"], + "license": "Apache-2.0", +} - return ctypes.util.find_library("rt") - except Exception: - pass +if not with_setuptools: + script_directory = Path(__file__).parent + if not script_directory: + script_directory = Path.cwd() + + readme_file = script_directory / "README.md" + + kwargs["scripts"] = ["scripts/newrelic-admin"] + + # Old config that now lives in pyproject.toml + # Preserved here for backwards compatibility with distutils + packages = [ + "newrelic", + "newrelic.admin", + "newrelic.api", + "newrelic.bootstrap", + "newrelic.common", + "newrelic.core", + "newrelic.extras", + "newrelic.extras.framework_django", + "newrelic.extras.framework_django.templatetags", + "newrelic.hooks", + "newrelic.network", + "newrelic.packages", + "newrelic.packages.isort", + "newrelic.packages.isort.stdlibs", + "newrelic.packages.urllib3", + "newrelic.packages.urllib3.util", + "newrelic.packages.urllib3.contrib", + "newrelic.packages.urllib3.contrib._securetransport", + "newrelic.packages.urllib3.packages", + "newrelic.packages.urllib3.packages.backports", + "newrelic.packages.wrapt", + "newrelic.packages.opentelemetry_proto", + "newrelic.samplers", + ] + + kwargs.update( + { + "python_requires": ">=3.8", # python_requires is also located in pyproject.toml + "zip_safe": False, + "packages": packages, + "package_data": { + "newrelic": [ + "newrelic.ini", + "version.txt", + "packages/urllib3/LICENSE.txt", + "common/cacert.pem", + "scripts/azure-prebuild.sh", + ] + }, + } + ) def run_setup(with_extensions): @@ -211,19 +156,7 @@ def _run_setup(): Extension("newrelic.packages.wrapt._wrappers", ["newrelic/packages/wrapt/_wrappers.c"]), Extension("newrelic.core._thread_utilization", ["newrelic/core/_thread_utilization.c"]), ] - if not is_windows: - # This extension is only supported on POSIX platforms. - monotonic_libraries = [] - if with_librt(): - monotonic_libraries = ["rt"] - - kwargs_tmp["ext_modules"].append( - Extension( - "newrelic.common._monotonic", ["newrelic/common/_monotonic.c"], libraries=monotonic_libraries - ) - ) - - kwargs_tmp["cmdclass"] = dict(build_ext=optional_build_ext) + kwargs_tmp["cmdclass"] = {"build_ext": optional_build_ext} setup(**kwargs_tmp) @@ -256,9 +189,9 @@ def _run_setup(): with_extensions = os.environ.get("NEW_RELIC_EXTENSIONS", None) if with_extensions: - if with_extensions.lower() == "true": + if with_extensions.lower() in ["on", "true", "1"]: with_extensions = True - elif with_extensions.lower() == "false": + elif with_extensions.lower() in ["off", "false", "0"]: with_extensions = False else: with_extensions = None diff --git a/tests/adapter_mcp/conftest.py b/tests/adapter_mcp/conftest.py index 875d78abf2..6abc5b2896 100644 --- a/tests/adapter_mcp/conftest.py +++ b/tests/adapter_mcp/conftest.py @@ -22,6 +22,7 @@ "transaction_tracer.stack_trace_threshold": 0.0, "debug.log_data_collector_payloads": True, "debug.record_transaction_failure": True, + "ai_monitoring.enabled": True, } collector_agent_registration = collector_agent_registration_fixture( diff --git a/tests/adapter_mcp/test_mcp.py b/tests/adapter_mcp/test_mcp.py index 98330069bf..5ba6a81074 100644 --- a/tests/adapter_mcp/test_mcp.py +++ b/tests/adapter_mcp/test_mcp.py @@ -16,6 +16,9 @@ from fastmcp.client import Client from fastmcp.client.transports import FastMCPTransport from fastmcp.server.server import FastMCP +from mcp.server.fastmcp.tools import ToolManager +from testing_support.ml_testing_utils import disabled_ai_monitoring_settings +from testing_support.validators.validate_function_not_called import validate_function_not_called from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics from newrelic.api.background_task import background_task @@ -49,24 +52,43 @@ def echo_prompt(message: str): @validate_transaction_metrics( - "test_mcp:test_tool_tracing", + "test_mcp:test_tool_tracing_via_client_session", scoped_metrics=[("Llm/tool/MCP/mcp.client.session:ClientSession.call_tool/add_exclamation", 1)], rollup_metrics=[("Llm/tool/MCP/mcp.client.session:ClientSession.call_tool/add_exclamation", 1)], background_task=True, ) @background_task() -def test_tool_tracing(loop, fastmcp_server): +def test_tool_tracing_via_client_session(loop, fastmcp_server): async def _test(): async with Client(transport=FastMCPTransport(fastmcp_server)) as client: # Call the MCP tool, so we can validate the trace naming is correct. result = await client.call_tool("add_exclamation", {"phrase": "Python is awesome"}) - content = str(result.content[0]) assert "Python is awesome!" in content loop.run_until_complete(_test()) +@validate_transaction_metrics( + "test_mcp:test_tool_tracing_via_tool_manager", + scoped_metrics=[("Llm/tool/MCP/mcp.server.fastmcp.tools.tool_manager:ToolManager.call_tool/add_exclamation", 1)], + rollup_metrics=[("Llm/tool/MCP/mcp.server.fastmcp.tools.tool_manager:ToolManager.call_tool/add_exclamation", 1)], + background_task=True, +) +@background_task() +def test_tool_tracing_via_tool_manager(loop): + async def _test(): + def add_exclamation(phrase): + return f"{phrase}!" + + manager = ToolManager() + manager.add_tool(add_exclamation) + result = await manager.call_tool("add_exclamation", {"phrase": "Python is awesome"}) + assert result == "Python is awesome!" + + loop.run_until_complete(_test()) + + # Separate out the test function to work with the transaction metrics validator def run_read_resources(loop, fastmcp_server, resource_uri): async def _test(): @@ -114,3 +136,38 @@ async def _test(): assert "Python is cool" in content loop.run_until_complete(_test()) + + +@disabled_ai_monitoring_settings +@validate_function_not_called("newrelic.api.function_trace", "FunctionTrace.__enter__") +@background_task() +def test_tool_tracing_aim_disabled(loop, fastmcp_server): + async def _test(): + async with Client(transport=FastMCPTransport(fastmcp_server)) as client: + # Call the MCP tool, so we can validate the trace naming is correct. + result = await client.call_tool("add_exclamation", {"phrase": "Python is awesome"}) + content = str(result.content[0]) + assert "Python is awesome!" in content + + loop.run_until_complete(_test()) + + +@disabled_ai_monitoring_settings +@validate_function_not_called("newrelic.api.function_trace", "FunctionTrace.__enter__") +@background_task() +def test_resource_tracing_aim_disabled(loop, fastmcp_server): + run_read_resources(loop, fastmcp_server, "greeting://Python") + + +@disabled_ai_monitoring_settings +@validate_function_not_called("newrelic.api.function_trace", "FunctionTrace.__enter__") +@background_task() +def test_prompt_tracing_aim_disabled(loop, fastmcp_server): + async def _test(): + async with Client(transport=FastMCPTransport(fastmcp_server)) as client: + result = await client.get_prompt("echo_prompt", {"message": "Python is cool"}) + + content = str(result) + assert "Python is cool" in content + + loop.run_until_complete(_test()) diff --git a/tests/agent_benchmarks/__init__.py b/tests/agent_benchmarks/__init__.py new file mode 100644 index 0000000000..97e9818c13 --- /dev/null +++ b/tests/agent_benchmarks/__init__.py @@ -0,0 +1,47 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ._agent_initialization import collector_agent_registration + +BENCHMARK_PREFIXES = ("time", "mem") +REPLACE_PREFIX = "bench_" + + +def benchmark(cls): + # Find all methods not prefixed with underscores and treat them as benchmark methods + benchmark_methods = { + name: method for name, method in vars(cls).items() if callable(method) and name.startswith(REPLACE_PREFIX) + } + + # Remove setup function from benchmark methods and save it + cls._setup = benchmark_methods.pop("setup", None) + + # Patch in benchmark methods for each prefix + for name, method in benchmark_methods.items(): + name = name[len(REPLACE_PREFIX) :] # Remove "bench_" prefix + for prefix in BENCHMARK_PREFIXES: + setattr(cls, f"{prefix}_{name}", method) + + # Define agent activation as setup function + def setup(self): + collector_agent_registration(self) + + # Call the original setup if it exists + if getattr(self, "_setup", None) is not None: + self._setup() + + # Patch in new setup method + cls.setup = setup + + return cls diff --git a/tests/agent_benchmarks/_agent_initialization.py b/tests/agent_benchmarks/_agent_initialization.py new file mode 100644 index 0000000000..65a0ccb9aa --- /dev/null +++ b/tests/agent_benchmarks/_agent_initialization.py @@ -0,0 +1,70 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +from pathlib import Path +from threading import Lock + +# Amend sys.path to allow importing fixtures from testing_support +tests_path = Path(__file__).parent.parent +sys.path.append(str(tests_path)) + +from testing_support.fixtures import collector_agent_registration_fixture, collector_available_fixture # noqa: E402 + +_default_settings = { + "package_reporting.enabled": False, # Turn off package reporting for testing as it causes slow downs. + "transaction_tracer.explain_threshold": 0.0, + "transaction_tracer.transaction_threshold": 0.0, + "transaction_tracer.stack_trace_threshold": 0.0, + "debug.log_data_collector_payloads": True, + "debug.record_transaction_failure": True, +} + +_collector_agent_registration_fixture = collector_agent_registration_fixture( + app_name="Python Agent Test (benchmarks)", default_settings=_default_settings +) + +INITIALIZATION_LOCK = Lock() +APPLICATIONS = [] + +DISALLOWED_ENV_VARS = ("NEW_RELIC_CONFIG_FILE", "NEW_RELIC_LICENSE_KEY") + + +def collector_agent_registration(instance): + # If the application is already registered, exit early + if APPLICATIONS: + instance.application = APPLICATIONS[0] # Make application accessible to benchmarks + return + + # Register the agent with the collector using the pytest fixture manually + with INITIALIZATION_LOCK: + if APPLICATIONS: # Must re-check this condition just in case + instance.application = APPLICATIONS[0] # Make application accessible to benchmarks + return + + # Force benchmarking to always use developer mode + os.environ["NEW_RELIC_DEVELOPER_MODE"] = "true" # Force developer mode + for env_var in DISALLOWED_ENV_VARS: # Drop disallowed env vars + os.environ.pop(env_var, None) + + # Use pytest fixture by hand to start the agent + fixture = _collector_agent_registration_fixture() + APPLICATIONS.append(next(fixture)) + + # Wait for the application to become active + collector_available_fixture(APPLICATIONS[0]) + + # Make application accessible to benchmarks + instance.application = APPLICATIONS[0] diff --git a/newrelic/api/pre_function.py b/tests/agent_benchmarks/bench_agent_active.py similarity index 52% rename from newrelic/api/pre_function.py rename to tests/agent_benchmarks/bench_agent_active.py index e5df8e9b64..de7a695e44 100644 --- a/newrelic/api/pre_function.py +++ b/tests/agent_benchmarks/bench_agent_active.py @@ -12,6 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Use of these from this module will be deprecated. +from newrelic.agent import background_task, current_transaction -from newrelic.common.object_wrapper import PreFunctionWrapper, pre_function, wrap_pre_function # noqa: F401 +from . import benchmark + +# This benchmark suite is a placeholder until actual benchmark suites can be added. +# For now, this ensures the infrastructure works as intended. + + +@benchmark +class Suite: + def bench_application_active(self): + from newrelic.agent import application + + assert application().active + + @background_task() + def bench_transaction_active(self): + from newrelic.agent import application + + assert current_transaction() diff --git a/tests/agent_features/test_asgi_distributed_tracing.py b/tests/agent_features/test_asgi_distributed_tracing.py index 35a7448125..ae50d45cf3 100644 --- a/tests/agent_features/test_asgi_distributed_tracing.py +++ b/tests/agent_features/test_asgi_distributed_tracing.py @@ -180,7 +180,8 @@ def _make_test_transaction(): ) def _test(): with _make_test_transaction() as transaction: - transaction.accept_distributed_trace_payload(dt_payload) + dt_headers = {"newrelic": dt_payload} + transaction.accept_distributed_trace_headers(dt_headers) if gen_error: try: diff --git a/tests/agent_features/test_async_context_propagation.py b/tests/agent_features/test_async_context_propagation.py index eb7a1fcb66..12876dc7b9 100644 --- a/tests/agent_features/test_async_context_propagation.py +++ b/tests/agent_features/test_async_context_propagation.py @@ -13,7 +13,8 @@ # limitations under the License. import pytest -from testing_support.fixtures import function_not_called, override_generic_settings +from testing_support.fixtures import override_generic_settings +from testing_support.validators.validate_function_not_called import validate_function_not_called from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics from newrelic.api.application import application_instance as application @@ -128,7 +129,7 @@ def handle_exception(loop, context): @override_generic_settings(global_settings(), {"enabled": False}) -@function_not_called("newrelic.core.stats_engine", "StatsEngine.record_transaction") +@validate_function_not_called("newrelic.core.stats_engine", "StatsEngine.record_transaction") def test_nr_disabled(event_loop): import asyncio diff --git a/tests/agent_features/test_attributes_in_action.py b/tests/agent_features/test_attributes_in_action.py index 0b24a3100a..4d12e8b059 100644 --- a/tests/agent_features/test_attributes_in_action.py +++ b/tests/agent_features/test_attributes_in_action.py @@ -14,6 +14,7 @@ import pytest import webtest +from testing_support.asgi_testing import AsgiTest from testing_support.fixtures import ( cat_enabled, dt_enabled, @@ -21,6 +22,7 @@ reset_core_stats_engine, validate_attributes, ) +from testing_support.sample_asgi_applications import normal_asgi_application from testing_support.validators.validate_browser_attributes import validate_browser_attributes from testing_support.validators.validate_error_event_attributes import validate_error_event_attributes from testing_support.validators.validate_error_event_attributes_outside_transaction import ( @@ -45,13 +47,6 @@ from newrelic.api.wsgi_application import wsgi_application from newrelic.common.object_names import callable_name -try: - from testing_support.asgi_testing import AsgiTest - from testing_support.sample_asgi_applications import normal_asgi_application -except SyntaxError: - normal_asgi_application = None - - URL_PARAM = "some_key" URL_PARAM2 = "second_key" REQUEST_URL = f"/?{URL_PARAM}=someval&{URL_PARAM2}=anotherval" @@ -160,12 +155,7 @@ def normal_wsgi_application(environ, start_response): return [output] -application_params = [normal_wsgi_application] -if normal_asgi_application: - application_params.append(normal_asgi_application) - - -@pytest.fixture(scope="module", params=application_params) +@pytest.fixture(scope="module", params=[normal_wsgi_application, normal_asgi_application]) def normal_application(request): if request.param is normal_wsgi_application: return webtest.TestApp(normal_wsgi_application) diff --git a/tests/agent_features/test_configuration.py b/tests/agent_features/test_configuration.py index 41d929f131..4dd0726a82 100644 --- a/tests/agent_features/test_configuration.py +++ b/tests/agent_features/test_configuration.py @@ -363,21 +363,27 @@ def test_strip_proxy_details(settings): assert proxy_host == expected_proxy_host -def test_delete_setting(): - d = {"transaction_tracer.capture_attributes": True} - settings = apply_server_side_settings(d) - assert "capture_attributes" in settings.transaction_tracer +# TODO: Reenable once newly deprecated settings have been +# been put into the `deprecated_settings_map` +# def test_delete_setting(): +# """This test applies to a deprecated setting +# """ +# d = {"transaction_tracer.explain_enabled": True} +# settings = apply_server_side_settings(d) +# assert "explain_enabled" in settings.transaction_tracer - delete_setting(settings, "transaction_tracer.capture_attributes") - assert "capture_attributes" not in settings.transaction_tracer +# delete_setting(settings, "transaction_tracer.explain_enabled") +# assert "explain_enabled" not in settings.transaction_tracer -def test_delete_setting_absent(): - settings = apply_server_side_settings() - assert "capture_attributes" not in settings.transaction_tracer +# def test_delete_setting_absent(): +# """This test applies to a deprecated setting +# """ +# settings = apply_server_side_settings() +# assert "explain_enabled" not in settings.transaction_tracer - delete_setting(settings, "transaction_tracer.capture_attributes") - assert "capture_attributes" not in settings.transaction_tracer +# delete_setting(settings, "transaction_tracer.explain_enabled") +# assert "explain_enabled" not in settings.transaction_tracer def test_delete_setting_parent(): @@ -443,59 +449,51 @@ def test_delete_setting_parent(): (TSetting("analytics_events.enabled", False, True), TSetting("transaction_events.enabled", True, True)), ( TSetting("analytics_events.max_samples_stored", 1200, 1200), - TSetting("event_harvest_config.harvest_limits.analytic_event_data", 9999, 1200), + TSetting("transaction_events.max_samples_stored", 9999, 1200), ), ( TSetting("analytics_events.max_samples_stored", 9999, 1200), - TSetting("event_harvest_config.harvest_limits.analytic_event_data", 1200, 1200), - ), - ( TSetting("transaction_events.max_samples_stored", 1200, 1200), - TSetting("event_harvest_config.harvest_limits.analytic_event_data", 9999, 1200), ), ( - TSetting("transaction_events.max_samples_stored", 9999, 1200), TSetting("event_harvest_config.harvest_limits.analytic_event_data", 1200, 1200), + TSetting("transaction_events.max_samples_stored", 9999, 1200), ), ( - TSetting("span_events.max_samples_stored", 1000, 2000), - TSetting("event_harvest_config.harvest_limits.span_event_data", 9999, 2000), + TSetting("event_harvest_config.harvest_limits.analytic_event_data", 9999, 1200), + TSetting("transaction_events.max_samples_stored", 1200, 1200), ), ( - TSetting("span_events.max_samples_stored", 9999, 2000), TSetting("event_harvest_config.harvest_limits.span_event_data", 1000, 2000), + TSetting("span_events.max_samples_stored", 9999, 2000), ), ( - TSetting("error_collector.max_event_samples_stored", 100, 100), - TSetting("event_harvest_config.harvest_limits.error_event_data", 9999, 100), + TSetting("event_harvest_config.harvest_limits.span_event_data", 9999, 2000), + TSetting("span_events.max_samples_stored", 1000, 2000), ), ( - TSetting("error_collector.max_event_samples_stored", 9999, 100), TSetting("event_harvest_config.harvest_limits.error_event_data", 100, 100), + TSetting("error_collector.max_event_samples_stored", 9999, 100), ), ( - TSetting("custom_insights_events.max_samples_stored", 3600, 3600), - TSetting("event_harvest_config.harvest_limits.custom_event_data", 9999, 3600), + TSetting("event_harvest_config.harvest_limits.error_event_data", 9999, 100), + TSetting("error_collector.max_event_samples_stored", 100, 100), ), ( - TSetting("custom_insights_events.max_samples_stored", 9999, 3600), TSetting("event_harvest_config.harvest_limits.custom_event_data", 3600, 3600), + TSetting("custom_insights_events.max_samples_stored", 9999, 3600), ), ( - TSetting("application_logging.forwarding.max_samples_stored", 10000, 10000), - TSetting("event_harvest_config.harvest_limits.log_event_data", 99999, 10000), + TSetting("event_harvest_config.harvest_limits.custom_event_data", 9999, 3600), + TSetting("custom_insights_events.max_samples_stored", 3600, 3600), ), ( - TSetting("application_logging.forwarding.max_samples_stored", 99999, 10000), TSetting("event_harvest_config.harvest_limits.log_event_data", 10000, 10000), + TSetting("application_logging.forwarding.max_samples_stored", 99999, 10000), ), ( - TSetting("error_collector.ignore_errors", [], []), - TSetting("error_collector.ignore_classes", callable_name(ValueError), []), - ), - ( - TSetting("error_collector.ignore_errors", callable_name(ValueError), []), - TSetting("error_collector.ignore_classes", [], []), + TSetting("event_harvest_config.harvest_limits.log_event_data", 99999, 10000), + TSetting("application_logging.forwarding.max_samples_stored", 10000, 10000), ), ] @@ -567,46 +565,6 @@ def test_translate_deprecated_setting_without_old_setting(old, new): assert fetch_config_setting(result, new.name) == new.value -def test_translate_deprecated_ignored_params_without_new_setting(): - ignored_params = ["foo", "bar"] - settings = apply_server_side_settings() - apply_config_setting(settings, "ignored_params", ignored_params) - - assert "foo" in settings.ignored_params - assert "bar" in settings.ignored_params - assert len(settings.attributes.exclude) == 0 - - cached = [("ignored_params", ignored_params)] - result = translate_deprecated_settings(settings, cached) - - assert result is settings - assert "request.parameters.foo" in result.attributes.exclude - assert "request.parameters.bar" in result.attributes.exclude - assert "ignored_params" not in result - - -def test_translate_deprecated_ignored_params_with_new_setting(): - ignored_params = ["foo", "bar"] - attr_exclude = ["request.parameters.foo"] - settings = apply_server_side_settings() - apply_config_setting(settings, "ignored_params", ignored_params) - apply_config_setting(settings, "attributes.exclude", attr_exclude) - - assert "foo" in settings.ignored_params - assert "bar" in settings.ignored_params - assert "request.parameters.foo" in settings.attributes.exclude - - cached = [("ignored_params", ignored_params), ("attributes.exclude", attr_exclude)] - result = translate_deprecated_settings(settings, cached) - - # ignored_params are not merged! - - assert result is settings - assert "request.parameters.foo" in result.attributes.exclude - assert "request.parameters.bar" not in result.attributes.exclude - assert "ignored_params" not in result - - @pytest.mark.parametrize( "name,expected_value", ( @@ -1002,7 +960,7 @@ def test_map_aws_account_id(account_id, expected_account_id, logger): [tool.newrelic.error_collector] enabled = true -ignore_errors = ["module:name1", "module:name"] +ignore_classes = ["module:name1", "module:name"] [tool.newrelic.transaction_tracer] enabled = true diff --git a/tests/agent_features/test_custom_events.py b/tests/agent_features/test_custom_events.py index e799ed507b..968d5a5b72 100644 --- a/tests/agent_features/test_custom_events.py +++ b/tests/agent_features/test_custom_events.py @@ -15,12 +15,13 @@ import time import pytest -from testing_support.fixtures import function_not_called, override_application_settings, reset_core_stats_engine +from testing_support.fixtures import override_application_settings, reset_core_stats_engine from testing_support.validators.validate_custom_event import ( validate_custom_event_count, validate_custom_event_in_application_stats_engine, ) from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_function_not_called import validate_function_not_called from newrelic.api.application import application_instance as application from newrelic.api.background_task import background_task @@ -214,14 +215,14 @@ def test_custom_event_settings_check_custom_insights_enabled(): @override_application_settings({"custom_insights_events.enabled": False}) -@function_not_called("newrelic.api.transaction", "create_custom_event") +@validate_function_not_called("newrelic.api.transaction", "create_custom_event") @background_task() def test_transaction_create_custom_event_not_called(): record_custom_event("FooEvent", _user_params) @override_application_settings({"custom_insights_events.enabled": False}) -@function_not_called("newrelic.core.application", "create_custom_event") +@validate_function_not_called("newrelic.core.application", "create_custom_event") @background_task() def test_application_create_custom_event_not_called(): app = application() diff --git a/tests/agent_features/test_distributed_tracing.py b/tests/agent_features/test_distributed_tracing.py index 502b3828b0..36261d97e2 100644 --- a/tests/agent_features/test_distributed_tracing.py +++ b/tests/agent_features/test_distributed_tracing.py @@ -17,8 +17,10 @@ import pytest import webtest -from testing_support.fixtures import override_application_settings, validate_attributes +from testing_support.fixtures import override_application_settings, validate_attributes, validate_attributes_complete from testing_support.validators.validate_error_event_attributes import validate_error_event_attributes +from testing_support.validators.validate_function_called import validate_function_called +from testing_support.validators.validate_function_not_called import validate_function_not_called from testing_support.validators.validate_transaction_event_attributes import validate_transaction_event_attributes from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics @@ -28,14 +30,14 @@ from newrelic.api.time_trace import current_trace from newrelic.api.transaction import ( accept_distributed_trace_headers, - accept_distributed_trace_payload, - create_distributed_trace_payload, current_span_id, current_trace_id, current_transaction, + insert_distributed_trace_headers, ) from newrelic.api.web_transaction import WSGIWebTransaction from newrelic.api.wsgi_application import wsgi_application +from newrelic.core.attribute import Attribute distributed_trace_intrinsics = ["guid", "traceId", "priority", "sampled"] inbound_payload_intrinsics = [ @@ -181,10 +183,13 @@ def _test(): payload["d"]["pa"] = "5e5733a911cfbc73" if accept_payload: - result = accept_distributed_trace_payload(payload) + headers = {"newrelic": payload} + result = accept_distributed_trace_headers(headers) assert result else: - create_distributed_trace_payload() + headers = [] + insert_distributed_trace_headers(headers) + assert headers try: raise ValueError("cookies") @@ -261,7 +266,8 @@ def _make_test_transaction(): ) def _test(): with _make_test_transaction() as transaction: - transaction.accept_distributed_trace_payload(dt_payload) + dt_headers = {"newrelic": dt_payload} + transaction.accept_distributed_trace_headers(dt_headers) if gen_error: try: @@ -402,11 +408,73 @@ def _test_inbound_dt_payload_acceptance(): "tx": "8703ff3d88eefe9d", }, } - - result = transaction.accept_distributed_trace_payload(payload) + headers = {"newrelic": payload} + result = transaction.accept_distributed_trace_headers(headers) if trusted_account_key: assert result else: assert not result _test_inbound_dt_payload_acceptance() + + +@pytest.mark.parametrize( + "sampled,remote_parent_sampled,remote_parent_not_sampled,expected_sampled,expected_priority,expected_adaptive_sampling_algo_called", + ( + (True, "default", "default", None, None, True), # Uses sampling algo. + (True, "always_on", "default", True, 2, False), # Always sampled. + (True, "always_off", "default", False, 0, False), # Never sampled. + (False, "default", "default", None, None, True), # Uses sampling algo. + (False, "always_on", "default", None, None, True), # Uses sampling alog. + (False, "always_off", "default", None, None, True), # Uses sampling algo. + (True, "default", "always_on", None, None, True), # Uses sampling algo. + (True, "default", "always_off", None, None, True), # Uses sampling algo. + (False, "default", "always_on", True, 2, False), # Always sampled. + (False, "default", "always_off", False, 0, False), # Never sampled. + ), +) +def test_distributed_trace_w3cparent_sampling_decision( + sampled, + remote_parent_sampled, + remote_parent_not_sampled, + expected_sampled, + expected_priority, + expected_adaptive_sampling_algo_called, +): + required_intrinsics = [] + if expected_sampled is not None: + required_intrinsics.append(Attribute(name="sampled", value=expected_sampled, destinations=0b110)) + if expected_priority is not None: + required_intrinsics.append(Attribute(name="priority", value=expected_priority, destinations=0b110)) + + test_settings = _override_settings.copy() + test_settings.update( + { + "distributed_tracing.sampler.remote_parent_sampled": remote_parent_sampled, + "distributed_tracing.sampler.remote_parent_not_sampled": remote_parent_not_sampled, + "span_events.enabled": True, + } + ) + if expected_adaptive_sampling_algo_called: + function_called_decorator = validate_function_called( + "newrelic.api.transaction", "Transaction.sampling_algo_compute_sampled_and_priority" + ) + else: + function_called_decorator = validate_function_not_called( + "newrelic.api.transaction", "Transaction.sampling_algo_compute_sampled_and_priority" + ) + + @function_called_decorator + @override_application_settings(test_settings) + @validate_attributes_complete("intrinsic", required_intrinsics) + @background_task(name="test_distributed_trace_attributes") + def _test(): + txn = current_transaction() + + headers = { + "traceparent": f"00-0af7651916cd43dd8448eb211c80319c-00f067aa0ba902b7-{int(sampled):02x}", + "tracestate": "rojo=f06a0ba902b7,congo=t61rcWkgMzE", + } + accept_distributed_trace_headers(headers) + + _test() diff --git a/tests/agent_features/test_lambda_handler.py b/tests/agent_features/test_lambda_handler.py deleted file mode 100644 index f6160564de..0000000000 --- a/tests/agent_features/test_lambda_handler.py +++ /dev/null @@ -1,233 +0,0 @@ -# Copyright 2010 New Relic, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools -from copy import deepcopy - -import pytest -from testing_support.fixtures import override_application_settings -from testing_support.validators.validate_transaction_event_attributes import validate_transaction_event_attributes -from testing_support.validators.validate_transaction_trace_attributes import validate_transaction_trace_attributes - -from newrelic.api import lambda_handler - - -# NOTE: this fixture will force all tests in this file to assume that a cold -# start has occurred, *except* when a test has a parameter named -# "is_cold" and its value is True -@pytest.fixture(autouse=True) -def force_cold_start_status(request): - try: - is_cold_start = request.getfixturevalue("is_cold") - lambda_handler.COLD_START_RECORDED = not is_cold_start - except Exception: - lambda_handler.COLD_START_RECORDED = True - - -@lambda_handler.lambda_handler() -def handler(event, context): - return {"statusCode": "200", "body": "{}", "headers": {"Content-Type": "application/json", "Content-Length": 2}} - - -_override_settings = {"attributes.include": ["request.parameters.*", "request.headers.*"]} -_expected_attributes = { - "agent": [ - "aws.requestId", - "aws.lambda.arn", - "request.method", - "request.uri", - "response.status", - "response.headers.contentType", - "response.headers.contentLength", - ], - "user": [], - "intrinsic": [], -} - -_exact_attrs = { - "agent": {"request.parameters.foo": "bar", "request.headers.host": "myhost"}, - "user": {}, - "intrinsic": {}, -} - -empty_event = {} -firehose_event = { - "records": [ - { - "recordId": "495469866831355442", - "data": "SGVsbG8sIHRoaXMgaXMgYSB0ZXN0IDEyMy4=", - "approximateArrivalTimestamp": 1495072949453, - } - ], - "region": "us-west-2", - "deliveryStreamArn": "arn:aws:kinesis:EXAMPLE", - "invocationId": "invocationIdExample", -} - - -class Context: - aws_request_id = "cookies" - invoked_function_arn = "arn" - function_name = "cats" - function_version = "$LATEST" - memory_limit_in_mb = 128 - - -# The lambda_hander has been deprecated for 3+ years -@pytest.mark.skip(reason="The lambda_handler has been deprecated") -@pytest.mark.parametrize("is_cold", (False, True)) -def test_lambda_transaction_attributes(is_cold, monkeypatch): - # setup copies of the attribute lists for this test only - _forgone_params = {} - _exact = deepcopy(_exact_attrs) - _expected = deepcopy(_expected_attributes) - - # if we have a cold start, then we should see aws.lambda.coldStart=True - if is_cold: - _exact["agent"]["aws.lambda.coldStart"] = True - _expected["agent"].append("aws.lambda.coldStart") - - # otherwise, then we need to make sure that we don't see it at all - else: - _forgone_params = {"agent": ["aws.lambda.coldStart"], "user": [], "intrinsic": []} - - @validate_transaction_trace_attributes(required_params=_expected, forgone_params=_forgone_params) - @validate_transaction_event_attributes( - required_params=_expected, forgone_params=_forgone_params, exact_attrs=_exact - ) - @override_application_settings(_override_settings) - def _test(): - monkeypatch.setenv("AWS_REGION", "earth") - handler( - { - "httpMethod": "GET", - "path": "/", - "headers": {"HOST": "myhost"}, - "queryStringParameters": {"foo": "bar"}, - "multiValueQueryStringParameters": {"foo": ["bar"]}, - }, - Context, - ) - - _test() - - -# The lambda_hander has been deprecated for 3+ years -@pytest.mark.skip(reason="The lambda_handler has been deprecated") -@validate_transaction_trace_attributes(_expected_attributes) -@validate_transaction_event_attributes(_expected_attributes) -@override_application_settings(_override_settings) -def test_lambda_malformed_api_gateway_payload(monkeypatch): - monkeypatch.setenv("AWS_REGION", "earth") - handler( - { - "httpMethod": "GET", - "path": "/", - "headers": {}, - "queryStringParameters": 42, - "multiValueQueryStringParameters": 42, - }, - Context, - ) - - -_malformed_request_attributes = {"agent": ["aws.requestId", "aws.lambda.arn"], "user": [], "intrinsic": []} - - -# The lambda_hander has been deprecated for 3+ years -@pytest.mark.skip(reason="The lambda_handler has been deprecated") -@validate_transaction_trace_attributes(_malformed_request_attributes) -@validate_transaction_event_attributes(_malformed_request_attributes) -@override_application_settings(_override_settings) -def test_lambda_malformed_request_headers(): - handler({"httpMethod": "GET", "path": "/", "headers": None}, Context) - - -_malformed_response_attributes = { - "agent": ["aws.requestId", "aws.lambda.arn", "request.method", "request.uri", "response.status"], - "user": [], - "intrinsic": [], -} - - -# The lambda_hander has been deprecated for 3+ years -@pytest.mark.skip(reason="The lambda_handler has been deprecated") -@validate_transaction_trace_attributes(_malformed_response_attributes) -@validate_transaction_event_attributes(_malformed_response_attributes) -@override_application_settings(_override_settings) -def test_lambda_malformed_response_headers(): - @lambda_handler.lambda_handler() - def handler(event, context): - return {"statusCode": 200, "body": "{}", "headers": None} - - handler({"httpMethod": "GET", "path": "/", "headers": {}}, Context) - - -_no_status_code_response = { - "agent": [ - "aws.requestId", - "aws.lambda.arn", - "request.method", - "request.uri", - "response.headers.contentType", - "response.headers.contentLength", - ], - "user": [], - "intrinsic": [], -} - - -# The lambda_hander has been deprecated for 3+ years -@pytest.mark.skip(reason="The lambda_handler has been deprecated") -@validate_transaction_trace_attributes(_no_status_code_response) -@validate_transaction_event_attributes(_no_status_code_response) -@override_application_settings(_override_settings) -def test_lambda_no_status_code_response(): - @lambda_handler.lambda_handler() - def handler(event, context): - return {"body": "{}", "headers": {"Content-Type": "application/json", "Content-Length": 2}} - - handler({"httpMethod": "GET", "path": "/", "headers": {}}, Context) - - -# The lambda_hander has been deprecated for 3+ years -@pytest.mark.skip(reason="The lambda_handler has been deprecated") -@pytest.mark.parametrize("event,arn", ((empty_event, None), (firehose_event, "arn:aws:kinesis:EXAMPLE"))) -def test_lambda_event_source_arn_attribute(event, arn): - if arn is None: - _exact = None - _expected = None - _forgone = {"user": [], "intrinsic": [], "agent": ["aws.lambda.eventSource.arn"]} - else: - _exact = {"user": {}, "intrinsic": {}, "agent": {"aws.lambda.eventSource.arn": arn}} - _expected = {"user": [], "intrinsic": [], "agent": ["aws.lambda.eventSource.arn"]} - _forgone = None - - @validate_transaction_trace_attributes(required_params=_expected, forgone_params=_forgone) - @validate_transaction_event_attributes(required_params=_expected, forgone_params=_forgone, exact_attrs=_exact) - @override_application_settings(_override_settings) - def _test(): - handler(event, Context) - - _test() - - -# The lambda_hander has been deprecated for 3+ years -@pytest.mark.skip(reason="The lambda_handler has been deprecated") -@pytest.mark.parametrize( - "api", (lambda_handler.lambda_handler, functools.partial(lambda_handler.LambdaHandlerWrapper, handler)) -) -def test_deprecation_warnings(api): - with pytest.deprecated_call(): - api() diff --git a/tests/agent_features/test_ml_events.py b/tests/agent_features/test_ml_events.py index 536cbca4ca..6d0edc5da2 100644 --- a/tests/agent_features/test_ml_events.py +++ b/tests/agent_features/test_ml_events.py @@ -16,7 +16,8 @@ from importlib import reload import pytest -from testing_support.fixtures import function_not_called, override_application_settings, reset_core_stats_engine +from testing_support.fixtures import override_application_settings, reset_core_stats_engine +from testing_support.validators.validate_function_not_called import validate_function_not_called from testing_support.validators.validate_ml_event_count import validate_ml_event_count from testing_support.validators.validate_ml_event_payload import validate_ml_event_payload from testing_support.validators.validate_ml_events import validate_ml_events @@ -331,7 +332,7 @@ def test_ml_event_settings_check_ml_insights_disabled(): @override_application_settings({"ml_insights_events.enabled": False}) @reset_core_stats_engine() -@function_not_called("newrelic.api.transaction", "create_custom_event") +@validate_function_not_called("newrelic.api.transaction", "create_custom_event") @background_task() def test_transaction_create_ml_event_not_called(): record_ml_event("FooEvent", {"foo": "bar"}) @@ -339,7 +340,7 @@ def test_transaction_create_ml_event_not_called(): @override_application_settings({"ml_insights_events.enabled": False}) @reset_core_stats_engine() -@function_not_called("newrelic.core.application", "create_custom_event") +@validate_function_not_called("newrelic.core.application", "create_custom_event") @background_task() def test_application_create_ml_event_not_called(): app = application() diff --git a/tests/agent_features/test_notice_error.py b/tests/agent_features/test_notice_error.py index 1240dea1e8..5744dcc61a 100644 --- a/tests/agent_features/test_notice_error.py +++ b/tests/agent_features/test_notice_error.py @@ -411,7 +411,7 @@ def test_transaction_error_event_limit(): @override_application_settings( { "agent_limits.errors_per_harvest": _errors_per_harvest_limit, - "event_harvest_config.harvest_limits.error_event_data": _error_event_limit, + "error_collector.max_event_samples_stored": _error_event_limit, } ) @reset_core_stats_engine() diff --git a/tests/agent_features/test_priority_sampling.py b/tests/agent_features/test_priority_sampling.py index 6ef697c12c..ceb81564b4 100644 --- a/tests/agent_features/test_priority_sampling.py +++ b/tests/agent_features/test_priority_sampling.py @@ -24,7 +24,7 @@ from newrelic.api.background_task import BackgroundTask -@override_application_settings({"event_harvest_config.harvest_limits.analytic_event_data": 1}) +@override_application_settings({"transaction_events.max_samples_stored": 1}) @pytest.mark.parametrize("first_transaction_saved", [True, False]) def test_priority_used_in_transaction_events(first_transaction_saved): first_priority = 1 if first_transaction_saved else 0 @@ -57,7 +57,7 @@ def _test(): _test() -@override_application_settings({"event_harvest_config.harvest_limits.error_event_data": 1}) +@override_application_settings({"error_collector.max_event_samples_stored": 1}) @pytest.mark.parametrize("first_transaction_saved", [True, False]) def test_priority_used_in_transaction_error_events(first_transaction_saved): first_priority = 1 if first_transaction_saved else 0 @@ -97,7 +97,7 @@ def _test(): _test() -@override_application_settings({"event_harvest_config.harvest_limits.custom_event_data": 1}) +@override_application_settings({"custom_insights_events.max_samples_stored": 1}) @pytest.mark.parametrize("first_transaction_saved", [True, False]) def test_priority_used_in_transaction_custom_events(first_transaction_saved): first_priority = 1 if first_transaction_saved else 0 diff --git a/tests/agent_features/test_serverless_mode.py b/tests/agent_features/test_serverless_mode.py index 27ffd314f4..048b96aa23 100644 --- a/tests/agent_features/test_serverless_mode.py +++ b/tests/agent_features/test_serverless_mode.py @@ -23,7 +23,6 @@ from newrelic.api.application import application_instance from newrelic.api.background_task import background_task from newrelic.api.external_trace import ExternalTrace -from newrelic.api.lambda_handler import lambda_handler from newrelic.api.transaction import current_transaction from newrelic.core.config import global_settings @@ -57,7 +56,7 @@ def _test(): _test() - out, err = capsys.readouterr() + out, _err = capsys.readouterr() # Validate that something is printed to stdout assert out @@ -135,39 +134,8 @@ def _test_inbound_dt_payload_acceptance(): "tx": "8703ff3d88eefe9d", }, } - - result = transaction.accept_distributed_trace_payload(payload) + headers = {"newrelic": payload} + result = transaction.accept_distributed_trace_headers(headers) assert result _test_inbound_dt_payload_acceptance() - - -# The lambda_hander has been deprecated for 3+ years -@pytest.mark.skip(reason="The lambda_handler has been deprecated") -@pytest.mark.parametrize("arn_set", (True, False)) -def test_payload_metadata_arn(serverless_application, arn_set): - # If the session object gathers the arn from the settings object before the - # lambda handler records it there, then this test will fail. - - settings = global_settings() - original_metadata = settings.aws_lambda_metadata.copy() - - arn = None - if arn_set: - arn = "arrrrrrrrrrRrrrrrrrn" - - settings.aws_lambda_metadata.update({"arn": arn, "function_version": "$LATEST"}) - - class Context: - invoked_function_arn = arn - - @validate_serverless_metadata(exact_metadata={"arn": arn}) - @lambda_handler(application=serverless_application) - def handler(event, context): - assert settings.aws_lambda_metadata["arn"] == arn - return {} - - try: - handler({}, Context) - finally: - settings.aws_lambda_metadata = original_metadata diff --git a/tests/agent_features/test_span_events.py b/tests/agent_features/test_span_events.py index 2d49ae01c6..fe19dacd07 100644 --- a/tests/agent_features/test_span_events.py +++ b/tests/agent_features/test_span_events.py @@ -15,7 +15,8 @@ import sys import pytest -from testing_support.fixtures import dt_enabled, function_not_called, override_application_settings +from testing_support.fixtures import dt_enabled, override_application_settings +from testing_support.validators.validate_function_not_called import validate_function_not_called from testing_support.validators.validate_span_events import validate_span_events from testing_support.validators.validate_transaction_event_attributes import validate_transaction_event_attributes from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics @@ -415,7 +416,7 @@ def _test(): pass if not spans_expected: - _test = function_not_called("newrelic.core.attribute", "resolve_agent_attributes")(_test) + _test = validate_function_not_called("newrelic.core.attribute", "resolve_agent_attributes")(_test) _test() @@ -516,9 +517,9 @@ def _test(): def test_span_user_attribute_overrides_transaction_attribute(): transaction = current_transaction() - transaction.add_custom_parameter("foo", "a") + transaction.add_custom_attribute("foo", "a") add_custom_span_attribute("foo", "b") - transaction.add_custom_parameter("foo", "c") + transaction.add_custom_attribute("foo", "c") @override_application_settings({"attributes.include": "*"}) @@ -563,7 +564,7 @@ def _test(): transaction = current_transaction() for i in range(128): - transaction.add_custom_parameter(f"txn_attr{i}", "txnValue") + transaction.add_custom_attribute(f"txn_attr{i}", "txnValue") if i < 64: add_custom_span_attribute(f"span_attr{i}", "spanValue") diff --git a/tests/agent_unittests/test_connect_response_fields.py b/tests/agent_unittests/test_connect_response_fields.py index cb8c71d11f..9d6a423773 100644 --- a/tests/agent_unittests/test_connect_response_fields.py +++ b/tests/agent_unittests/test_connect_response_fields.py @@ -140,7 +140,7 @@ def test_span_event_harvest_config(connect_response_fields): from newrelic.core.config import SPAN_EVENT_RESERVOIR_SIZE expected = SPAN_EVENT_RESERVOIR_SIZE - assert protocol.configuration.event_harvest_config.harvest_limits.span_event_data == expected + assert protocol.configuration.span_events.max_samples_stored == expected @override_generic_settings(global_settings(), {"developer_mode": True}) diff --git a/tests/agent_unittests/test_harvest_loop.py b/tests/agent_unittests/test_harvest_loop.py index 0439ba1650..b9c1ea25b8 100644 --- a/tests/agent_unittests/test_harvest_loop.py +++ b/tests/agent_unittests/test_harvest_loop.py @@ -18,7 +18,8 @@ from pathlib import Path import pytest -from testing_support.fixtures import failing_endpoint, function_not_called, override_generic_settings +from testing_support.fixtures import failing_endpoint, override_generic_settings +from testing_support.validators.validate_function_not_called import validate_function_not_called from newrelic.common.agent_http import DeveloperModeClient from newrelic.common.object_wrapper import function_wrapper, transient_function_wrapper @@ -348,7 +349,7 @@ def test_application_harvest_with_spans(distributed_tracing_enabled, span_events "license_key": "**NOT A LICENSE KEY**", "distributed_tracing.enabled": distributed_tracing_enabled, "span_events.enabled": span_events_enabled, - "event_harvest_config.harvest_limits.span_event_data": max_samples_stored, + "span_events.max_samples_stored": max_samples_stored, }, ) def _test(): @@ -513,10 +514,10 @@ def test_adaptive_sampling(transaction_node, monkeypatch): "feature_flag": set(), "distributed_tracing.enabled": True, "application_logging.forwarding.enabled": True, - "event_harvest_config.harvest_limits.error_event_data": 1000, - "event_harvest_config.harvest_limits.span_event_data": 1000, - "event_harvest_config.harvest_limits.custom_event_data": 1000, - "event_harvest_config.harvest_limits.log_event_data": 1000, + "error_collector.max_event_samples_stored": 1000, + "span_events.max_samples_stored": 1000, + "custom_insights_events.max_samples_stored": 1000, + "application_logging.forwarding.max_samples_stored": 1000, }, ) def test_reservoir_sizes(transaction_node): @@ -536,13 +537,13 @@ def test_reservoir_sizes(transaction_node): @pytest.mark.parametrize( - "harvest_name, event_name", + "harvest_setting,event_name", [ - ("analytic_event_data", "transaction_events"), - ("error_event_data", "error_events"), - ("custom_event_data", "custom_events"), - ("log_event_data", "log_events"), - ("span_event_data", "span_events"), + ("transaction_events.max_samples_stored", "transaction_events"), + ("error_collector.max_event_samples_stored", "error_events"), + ("custom_insights_events.max_samples_stored", "custom_events"), + ("application_logging.forwarding.max_samples_stored", "log_events"), + ("span_events.max_samples_stored", "span_events"), ], ) @override_generic_settings( @@ -554,11 +555,18 @@ def test_reservoir_sizes(transaction_node): "distributed_tracing.enabled": True, }, ) -def test_reservoir_size_zeros(harvest_name, event_name): +def test_reservoir_size_zeros(harvest_setting, event_name): app = Application("Python Agent Test (Harvest Loop)") app.connect_to_data_collector(None) - setattr(settings.event_harvest_config.harvest_limits, harvest_name, 0) + # Walk down the settings tree until the 2nd to last setting name is reached to get the + # settings container, then set the final setting on that container to 0 + harvest_setting = list(harvest_setting.split(".")) + _settings = settings + for setting_attr in harvest_setting[:-1]: + _settings = getattr(_settings, setting_attr) + setattr(_settings, harvest_setting[-1], 0) + settings.event_harvest_config.allowlist = frozenset(()) app._stats_engine.reset_stats(settings) @@ -606,7 +614,7 @@ def test_error_event_sampling_info(events_seen): { "developer_mode": True, "license_key": "**NOT A LICENSE KEY**", - "event_harvest_config.harvest_limits.error_event_data": reservoir_size, + "error_collector.max_event_samples_stored": reservoir_size, }, ) def _test(): @@ -646,7 +654,7 @@ def test_serverless_mode_adaptive_sampling(time_to_next_reset, computed_count, c assert app.adaptive_sampler.computed_count_last == computed_count_last -@function_not_called("newrelic.core.adaptive_sampler", "AdaptiveSampler._reset") +@validate_function_not_called("newrelic.core.adaptive_sampler", "AdaptiveSampler._reset") @override_generic_settings(settings, {"developer_mode": True}) def test_compute_sampled_no_reset(): app = Application("Python Agent Test (Harvest Loop)") @@ -675,7 +683,7 @@ def transactions_validator(payload): settings, { "developer_mode": True, - "event_harvest_config.harvest_limits.analytic_event_data": transactions_limit, + "transaction_events.max_samples_stored": transactions_limit, "agent_limits.synthetics_events": synthetics_limit, }, ) @@ -858,22 +866,6 @@ def test_default_events_harvested(allowlist_event): assert app._stats_engine.metrics_count() == 4 -@failing_endpoint("analytic_event_data") -@override_generic_settings(settings, {"developer_mode": True, "agent_limits.merge_stats_maximum": 0}) -def test_infinite_merges(): - app = Application("Python Agent Test (Harvest Loop)") - app.connect_to_data_collector(None) - - app._stats_engine.transaction_events.add("transaction event") - - assert app._stats_engine.transaction_events.num_seen == 1 - - app.harvest() - - # the agent_limits.merge_stats_maximum is not respected - assert app._stats_engine.transaction_events.num_seen == 1 - - @failing_endpoint("analytic_event_data") @override_generic_settings(settings, {"developer_mode": True}) def test_flexible_harvest_rollback(): diff --git a/tests/agent_unittests/test_http_client.py b/tests/agent_unittests/test_http_client.py index 31d573cb89..ad1bffc858 100644 --- a/tests/agent_unittests/test_http_client.py +++ b/tests/agent_unittests/test_http_client.py @@ -319,7 +319,7 @@ def test_http_payload_compression(server, client_cls, method, threshold): def test_cert_path(server): with HttpClient("localhost", server.port, ca_bundle_path=CERT_PATH) as client: - status, data = client.send_request() + client.send_request() @pytest.mark.parametrize("system_certs_available", (True, False)) diff --git a/tests/component_djangorestframework/test_application.py b/tests/component_djangorestframework/test_application.py index b929b849d4..8cef68ec56 100644 --- a/tests/component_djangorestframework/test_application.py +++ b/tests/component_djangorestframework/test_application.py @@ -15,8 +15,9 @@ import django import pytest import webtest -from testing_support.fixtures import function_not_called, override_generic_settings +from testing_support.fixtures import override_generic_settings from testing_support.validators.validate_code_level_metrics import validate_code_level_metrics +from testing_support.validators.validate_function_not_called import validate_function_not_called from testing_support.validators.validate_transaction_errors import validate_transaction_errors from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics @@ -164,7 +165,7 @@ def test_application_view_agent_disabled(target_application): settings = global_settings() @override_generic_settings(settings, {"enabled": False}) - @function_not_called("newrelic.core.stats_engine", "StatsEngine.record_transaction") + @validate_function_not_called("newrelic.core.stats_engine", "StatsEngine.record_transaction") def _test(): response = target_application.get("/view/") assert response.status_int == 200 diff --git a/tests/component_graphqlserver/test_graphql.py b/tests/component_graphqlserver/test_graphql.py index 3e8391ed2a..41b28b9883 100644 --- a/tests/component_graphqlserver/test_graphql.py +++ b/tests/component_graphqlserver/test_graphql.py @@ -181,7 +181,7 @@ def _query(): @dt_enabled def test_middleware(target_application): - framework, version, target_application = target_application + framework, _version, target_application = target_application _test_middleware_metrics = [ ("GraphQL/operation/GraphQLServer/query//hello", 1), ("GraphQL/resolve/GraphQLServer/hello", 1), @@ -207,7 +207,7 @@ def _test(): @dt_enabled def test_exception_in_middleware(target_application): - framework, version, target_application = target_application + _framework, _version, target_application = target_application query = "query MyQuery { error_middleware }" field = "error_middleware" @@ -254,7 +254,7 @@ def _test(): @pytest.mark.parametrize("field", ("error", "error_non_null")) @dt_enabled def test_exception_in_resolver(target_application, field): - framework, version, target_application = target_application + _framework, _version, target_application = target_application query = f"query MyQuery {{ {field} }}" txn_name = "framework_graphql._target_schema_sync:resolve_error" @@ -308,7 +308,7 @@ def _test(): ], ) def test_exception_in_validation(target_application, is_graphql_2, query, exc_class): - framework, version, target_application = target_application + _framework, _version, target_application = target_application if "syntax" in query: txn_name = "graphql.language.parser:parse" else: @@ -354,7 +354,7 @@ def _test(): @dt_enabled def test_operation_metrics_and_attrs(target_application): - framework, version, target_application = target_application + framework, _version, target_application = target_application operation_metrics = [("GraphQL/operation/GraphQLServer/query/MyQuery/library", 1)] operation_attrs = {"graphql.operation.type": "query", "graphql.operation.name": "MyQuery"} @@ -380,7 +380,7 @@ def _test(): @dt_enabled def test_field_resolver_metrics_and_attrs(target_application): - framework, version, target_application = target_application + framework, _version, target_application = target_application field_resolver_metrics = [("GraphQL/resolve/GraphQLServer/hello", 1)] graphql_attrs = { "graphql.field.name": "hello", @@ -426,7 +426,7 @@ def _test(): @dt_enabled @pytest.mark.parametrize("query,obfuscated", _test_queries) def test_query_obfuscation(target_application, query, obfuscated): - framework, version, target_application = target_application + _framework, _version, target_application = target_application graphql_attrs = {"graphql.operation.query": obfuscated} if callable(query): @@ -471,7 +471,7 @@ def _test(): @dt_enabled @pytest.mark.parametrize("query,expected_path", _test_queries) def test_deepest_unique_path(target_application, query, expected_path): - framework, version, target_application = target_application + _framework, _version, target_application = target_application if expected_path == "/error": txn_name = "framework_graphql._target_schema_sync:resolve_error" else: @@ -486,5 +486,5 @@ def _test(): @validate_transaction_count(0) def test_ignored_introspection_transactions(target_application): - framework, version, target_application = target_application + _framework, _version, target_application = target_application response = target_application("{ __schema { types { name } } }") diff --git a/tests/coroutines_asyncio/test_context_propagation.py b/tests/coroutines_asyncio/test_context_propagation.py index a2773c0110..752e30448e 100644 --- a/tests/coroutines_asyncio/test_context_propagation.py +++ b/tests/coroutines_asyncio/test_context_propagation.py @@ -15,7 +15,8 @@ import sys import pytest -from testing_support.fixtures import function_not_called, override_generic_settings +from testing_support.fixtures import override_generic_settings +from testing_support.validators.validate_function_not_called import validate_function_not_called from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics from newrelic.api.application import application_instance as application @@ -126,7 +127,7 @@ def handle_exception(loop, context): @override_generic_settings(global_settings(), {"enabled": False}) -@function_not_called("newrelic.core.stats_engine", "StatsEngine.record_transaction") +@validate_function_not_called("newrelic.core.stats_engine", "StatsEngine.record_transaction") def test_nr_disabled(event_loop): import asyncio diff --git a/tests/cross_agent/test_distributed_tracing.py b/tests/cross_agent/test_distributed_tracing.py index 3c7314b31d..2d4ca1ed72 100644 --- a/tests/cross_agent/test_distributed_tracing.py +++ b/tests/cross_agent/test_distributed_tracing.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import base64 import json from pathlib import Path @@ -118,13 +119,22 @@ def target_wsgi_application(environ, start_response): extra_inbound_payloads = test_settings["extra_inbound_payloads"] for payload, expected_result in extra_inbound_payloads: - result = txn.accept_distributed_trace_payload(payload, test_settings["transport_type"]) + headers = {"newrelic": payload} + result = txn.accept_distributed_trace_headers(headers, test_settings["transport_type"]) assert result is expected_result outbound_payloads = test_settings["outbound_payloads"] if outbound_payloads: for payload_assertions in outbound_payloads: - payload = txn._create_distributed_trace_payload() + headers = [] + txn.insert_distributed_trace_headers(headers) + # To revert to the dict format of the payload, use this: + payload = json.loads( + base64.b64decode([value for key, value in headers if key == "newrelic"][0]).decode("utf-8") + ) + payload_version = payload.get("v") + if payload_version and isinstance(payload_version, list): + payload["v"] = tuple(payload_version) assert_payload(payload, payload_assertions, test_settings["major_version"], test_settings["minor_version"]) start_response(status, response_headers) @@ -153,15 +163,16 @@ def test_distributed_tracing( web_transaction, ): extra_inbound_payloads = [] - if transport_type != "HTTP": - # Since wsgi_application calls accept_distributed_trace_payload + if not inbound_payloads: + # If there is no `inbound_payloads`, we do + # not want to break the downstream logic, + # so this is explicitly skipped. + pass + elif transport_type != "HTTP": + # Since wsgi_application calls accept_distributed_trace_headers # automatically with transport_type='HTTP', we must defer this call # until we can specify the transport type. extra_inbound_payloads.append((inbound_payloads.pop(), True)) - elif not inbound_payloads: - # In order to assert that accept_distributed_trace_payload returns - # False in this instance, we defer. - extra_inbound_payloads.append((inbound_payloads, False)) elif len(inbound_payloads) > 1: extra_inbound_payloads.extend((payload, False) for payload in inbound_payloads[1:]) diff --git a/tests/cross_agent/test_lambda_event_source.py b/tests/cross_agent/test_lambda_event_source.py deleted file mode 100644 index 325a920f6c..0000000000 --- a/tests/cross_agent/test_lambda_event_source.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright 2010 New Relic, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -from pathlib import Path - -import pytest -from testing_support.fixtures import override_application_settings -from testing_support.validators.validate_transaction_event_attributes import validate_transaction_event_attributes - -from newrelic.api.lambda_handler import lambda_handler - -FIXTURE_DIR = Path(__file__).parent / "fixtures" -FIXTURE = FIXTURE_DIR / "lambda_event_source.json" -tests = {} -events = {} - - -def _load_tests(): - with FIXTURE.open(encoding="utf-8") as fh: - for test in json.loads(fh.read()): - test_name = test.pop("name") - - test_file = f"{test_name}.json" - path = FIXTURE_DIR / "lambda_event_source" / test_file - with path.open(encoding="utf-8") as fh: - events[test_name] = json.loads(fh.read()) - - tests[test_name] = test - return tests.keys() - - -class Context: - aws_request_id = "cookies" - invoked_function_arn = "arn" - function_name = "cats" - function_version = "$LATEST" - memory_limit_in_mb = 128 - - -@lambda_handler() -def handler(event, context): - return {"statusCode": "200", "body": "{}", "headers": {"Content-Type": "application/json", "Content-Length": 2}} - - -# The lambda_hander has been deprecated for 3+ years -@pytest.mark.skip(reason="The lambda_handler has been deprecated") -@pytest.mark.parametrize("test_name", _load_tests()) -def test_lambda_event_source(test_name): - _exact = {"user": {}, "intrinsic": {}, "agent": {}} - - expected_arn = tests[test_name].get("aws.lambda.eventSource.arn", None) - if expected_arn: - _exact["agent"]["aws.lambda.eventSource.arn"] = expected_arn - else: - pytest.skip("Nothing to test!") - return - - @override_application_settings({"attributes.include": ["aws.*"]}) - @validate_transaction_event_attributes({}, exact_attrs=_exact) - def _test(): - handler(events[test_name], Context) - - _test() diff --git a/tests/datastore_elasticsearch/test_async_elasticsearch.py b/tests/datastore_elasticsearch/test_async_elasticsearch.py index 5002d71f57..6c830c60a2 100644 --- a/tests/datastore_elasticsearch/test_async_elasticsearch.py +++ b/tests/datastore_elasticsearch/test_async_elasticsearch.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from conftest import ES_SETTINGS, IS_V8_OR_ABOVE +from conftest import ES_SETTINGS, IS_V8_OR_ABOVE, RUN_IF_V8_OR_ABOVE from elasticsearch._async import client from testing_support.fixture.event_loop import event_loop as loop from testing_support.fixtures import override_application_settings @@ -20,6 +20,7 @@ from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics from newrelic.api.background_task import background_task +from newrelic.api.transaction import current_transaction # Settings @@ -193,5 +194,41 @@ def test_async_elasticsearch_operation_enabled(async_client, loop): loop.run_until_complete(_exercise_es(async_client)) +@validate_transaction_errors(errors=[]) +@validate_transaction_metrics( + "test_async_elasticsearch:test_async_elasticsearch_operation_enabled_empty_transaction_settings", + scoped_metrics=_enable_scoped_metrics, + rollup_metrics=_enable_rollup_metrics, + background_task=True, +) +@override_application_settings(_enable_instance_settings) +@background_task() +def test_async_elasticsearch_operation_enabled_empty_transaction_settings(async_client, loop): + transaction = current_transaction() + settings = transaction._settings + transaction._settings = None + + loop.run_until_complete(_exercise_es(async_client)) + + transaction._settings = settings + + def test_async_elasticsearch_no_transaction(async_client, loop): loop.run_until_complete(_exercise_es(async_client)) + + +@RUN_IF_V8_OR_ABOVE +@background_task() +def test_async_elasticsearch_options_no_crash(async_client, loop): + """Test that the options method on the async client doesn't cause a crash when run with the agent""" + + async def _test(): + client_with_auth = async_client.options(basic_auth=("username", "password")) + assert client_with_auth is not None + assert client_with_auth != async_client + + # If options was instrumented, this would cause a crash since the first call would return an unexpected coroutine + client_chained = async_client.options(basic_auth=("user", "pass")).options(request_timeout=60) + assert client_chained is not None + + loop.run_until_complete(_test()) diff --git a/tests/datastore_elasticsearch/test_async_instrumented_methods.py b/tests/datastore_elasticsearch/test_async_instrumented_methods.py index c6c771ba39..73141fc9f7 100644 --- a/tests/datastore_elasticsearch/test_async_instrumented_methods.py +++ b/tests/datastore_elasticsearch/test_async_instrumented_methods.py @@ -75,7 +75,7 @@ async def _test(): def _test_methods_wrapped(_object, ignored_methods=None): if not ignored_methods: - ignored_methods = {"perform_request", "transport"} + ignored_methods = {"perform_request", "transport", "options"} def is_wrapped(m): return hasattr(getattr(_object, m), "__wrapped__") diff --git a/tests/datastore_elasticsearch/test_elasticsearch.py b/tests/datastore_elasticsearch/test_elasticsearch.py index 685ea341a4..cf6b3a16c3 100644 --- a/tests/datastore_elasticsearch/test_elasticsearch.py +++ b/tests/datastore_elasticsearch/test_elasticsearch.py @@ -20,6 +20,7 @@ from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics from newrelic.api.background_task import background_task +from newrelic.api.transaction import current_transaction # Settings @@ -189,5 +190,24 @@ def test_elasticsearch_operation_enabled(client): _exercise_es(client) +@validate_transaction_errors(errors=[]) +@validate_transaction_metrics( + "test_elasticsearch:test_elasticsearch_operation_enabled_empty_transaction_settings", + scoped_metrics=_enable_scoped_metrics, + rollup_metrics=_enable_rollup_metrics, + background_task=True, +) +@override_application_settings(_enable_instance_settings) +@background_task() +def test_elasticsearch_operation_enabled_empty_transaction_settings(client): + transaction = current_transaction() + settings = transaction._settings + transaction._settings = None + + _exercise_es(client) + + transaction._settings = settings + + def test_elasticsearch_no_transaction(client): _exercise_es(client) diff --git a/tests/datastore_elasticsearch/test_instrumented_methods.py b/tests/datastore_elasticsearch/test_instrumented_methods.py index facfbfe733..9d69f5d8dd 100644 --- a/tests/datastore_elasticsearch/test_instrumented_methods.py +++ b/tests/datastore_elasticsearch/test_instrumented_methods.py @@ -84,7 +84,7 @@ def _test(): def _test_methods_wrapped(_object, ignored_methods=None): if not ignored_methods: - ignored_methods = {"perform_request", "transport"} + ignored_methods = {"perform_request", "transport", "options"} def is_wrapped(m): return hasattr(getattr(_object, m), "__wrapped__") diff --git a/tests/datastore_psycopg/conftest.py b/tests/datastore_psycopg/conftest.py index 04894bfd1d..6f16ec8646 100644 --- a/tests/datastore_psycopg/conftest.py +++ b/tests/datastore_psycopg/conftest.py @@ -17,6 +17,8 @@ from testing_support.fixture.event_loop import event_loop as loop from testing_support.fixtures import collector_agent_registration_fixture, collector_available_fixture +from newrelic.common.package_version_utils import get_package_version_tuple + _default_settings = { "package_reporting.enabled": False, # Turn off package reporting for testing as it causes slow downs. "transaction_tracer.explain_threshold": 0.0, @@ -36,6 +38,7 @@ DB_MULTIPLE_SETTINGS = postgresql_settings() DB_SETTINGS = DB_MULTIPLE_SETTINGS[0] +PSYCOPG_VERSION = get_package_version_tuple("psycopg") @pytest.fixture(scope="session", params=["sync", "async"]) diff --git a/tests/datastore_psycopg/test_as_string.py b/tests/datastore_psycopg/test_as_string.py index cea5627b6f..acd4750199 100644 --- a/tests/datastore_psycopg/test_as_string.py +++ b/tests/datastore_psycopg/test_as_string.py @@ -12,6 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +import pytest +from conftest import PSYCOPG_VERSION + try: from psycopg import sql except ImportError: @@ -109,3 +112,12 @@ def test_as_string_10(connection): ) result = q2.as_string(connection) assert result == 'insert into table ("foo", "bar", "baz") values (%(foo)s, %(bar)s, %(baz)s)' + + +@pytest.mark.skipif(PSYCOPG_VERSION < (3, 2, 0), reason="This signature was changed in psycopg 3.2.0") +@background_task() +def test_as_string_11(connection): + ident = sql.Identifier("foo") + # No context provided to as_string(), should not raise an error + result = ident.as_string() + assert result == '"foo"' diff --git a/tests/datastore_psycopg/test_slow_sql.py b/tests/datastore_psycopg/test_slow_sql.py index 9fc0f04bdd..dbf2383325 100644 --- a/tests/datastore_psycopg/test_slow_sql.py +++ b/tests/datastore_psycopg/test_slow_sql.py @@ -122,7 +122,7 @@ def _test(): "tx": "8703ff3d88eefe9d", }, } - - transaction.accept_distributed_trace_payload(payload) + headers = {"newrelic": payload} + transaction.accept_distributed_trace_headers(headers) _test() diff --git a/tests/datastore_psycopg2/test_slow_sql.py b/tests/datastore_psycopg2/test_slow_sql.py index b41b0a6759..b85c1d1385 100644 --- a/tests/datastore_psycopg2/test_slow_sql.py +++ b/tests/datastore_psycopg2/test_slow_sql.py @@ -132,6 +132,7 @@ def _test(): }, } - transaction.accept_distributed_trace_payload(payload) + headers = {"newrelic": payload} + transaction.accept_distributed_trace_headers(headers) _test() diff --git a/tests/external_aiobotocore/conftest.py b/tests/external_aiobotocore/conftest.py index 3c35ffbf0b..6516585c88 100644 --- a/tests/external_aiobotocore/conftest.py +++ b/tests/external_aiobotocore/conftest.py @@ -20,7 +20,8 @@ import moto.server import pytest import werkzeug.serving -from external_botocore._mock_external_bedrock_server import MockExternalBedrockServer +from external_botocore._mock_external_bedrock_server_converse import MockExternalBedrockConverseServer +from external_botocore._mock_external_bedrock_server_invoke_model import MockExternalBedrockServer from testing_support.fixture.event_loop import event_loop as loop from testing_support.fixtures import collector_agent_registration_fixture, collector_available_fixture @@ -183,3 +184,37 @@ def bedrock_server(loop): yield client loop.run_until_complete(client.__aexit__(None, None, None)) + + +# Bedrock Fixtures +@pytest.fixture(scope="session") +def bedrock_converse_server(loop): + """ + This fixture will create a mocked backend for testing purposes. + """ + import aiobotocore + + from newrelic.core.config import _environ_as_bool + + if get_package_version_tuple("botocore") < (1, 31, 57): + pytest.skip(reason="Bedrock Runtime not available.") + + if _environ_as_bool("NEW_RELIC_TESTING_RECORD_BEDROCK_RESPONSES", False): + raise NotImplementedError("To record test responses, use botocore instead.") + + # Use mocked Bedrock backend and prerecorded responses + with MockExternalBedrockConverseServer() as server: + session = aiobotocore.session.get_session() + client = loop.run_until_complete( + session.create_client( + "bedrock-runtime", + "us-east-1", + endpoint_url=f"http://localhost:{server.port}", + aws_access_key_id="NOT-A-REAL-SECRET", + aws_secret_access_key="NOT-A-REAL-SECRET", + ).__aenter__() + ) + + yield client + + loop.run_until_complete(client.__aexit__(None, None, None)) diff --git a/tests/external_aiobotocore/test_bedrock_chat_completion_converse.py b/tests/external_aiobotocore/test_bedrock_chat_completion_converse.py new file mode 100644 index 0000000000..da9c5818e7 --- /dev/null +++ b/tests/external_aiobotocore/test_bedrock_chat_completion_converse.py @@ -0,0 +1,521 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import botocore.exceptions +import pytest +from conftest import BOTOCORE_VERSION +from testing_support.fixtures import override_llm_token_callback_settings, reset_core_stats_engine, validate_attributes +from testing_support.ml_testing_utils import ( + add_token_count_to_events, + disabled_ai_monitoring_record_content_settings, + disabled_ai_monitoring_settings, + events_sans_content, + events_sans_llm_metadata, + events_with_context_attrs, + llm_token_count_callback, + set_trace_info, +) +from testing_support.validators.validate_custom_event import validate_custom_event_count +from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_error_trace_attributes import validate_error_trace_attributes +from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics + +from newrelic.api.background_task import background_task +from newrelic.api.llm_custom_attributes import WithLlmCustomAttributes +from newrelic.api.transaction import add_custom_attribute +from newrelic.common.object_names import callable_name + +chat_completion_expected_events = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "c20d345e-6878-4778-b674-6b187bae8ecf", + "duration": None, # Response time varies each test run + "request.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.choices.finish_reason": "max_tokens", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 3, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "c20d345e-6878-4778-b674-6b187bae8ecf", + "span_id": None, + "trace_id": "trace-id", + "content": "You are a scientist.", + "role": "system", + "completion_id": None, + "sequence": 0, + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "c20d345e-6878-4778-b674-6b187bae8ecf", + "span_id": None, + "trace_id": "trace-id", + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 1, + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "c20d345e-6878-4778-b674-6b187bae8ecf", + "span_id": None, + "trace_id": "trace-id", + "content": "To convert 212°F to Celsius, we can use the formula:\n\nC = (F - 32) × 5/9\n\nWhere:\nC is the temperature in Celsius\nF is the temperature in Fahrenheit\n\nPlugging in 212°F, we get:\n\nC = (212 - 32) × 5/9\nC = 180 × 5/9\nC = 100\n\nTherefore, 212°", # noqa: RUF001 + "role": "assistant", + "completion_id": None, + "sequence": 2, + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + "is_response": True, + }, + ), +] + + +@pytest.fixture(scope="module") +def exercise_model(loop, bedrock_converse_server): + def _exercise_model(message): + async def coro(): + inference_config = {"temperature": 0.7, "maxTokens": 100} + + response = await bedrock_converse_server.converse( + modelId="anthropic.claude-3-sonnet-20240229-v1:0", + messages=message, + system=[{"text": "You are a scientist."}], + inferenceConfig=inference_config, + ) + assert response + + return loop.run_until_complete(coro()) + + return _exercise_model + + +@reset_core_stats_engine() +def test_bedrock_chat_completion_in_txn_with_llm_metadata(set_trace_info, exercise_model): + @validate_custom_events(events_with_context_attrs(chat_completion_expected_events)) + # One summary event, one user message, and one response message from the assistant + @validate_custom_event_count(count=4) + @validate_transaction_metrics( + name="test_bedrock_chat_completion_in_txn_with_llm_metadata", + scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], + rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], + background_task=True, + ) + @validate_attributes("agent", ["llm"]) + @background_task(name="test_bedrock_chat_completion_in_txn_with_llm_metadata") + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + with WithLlmCustomAttributes({"context": "attr"}): + message = [{"role": "user", "content": [{"text": "What is 212 degrees Fahrenheit converted to Celsius?"}]}] + exercise_model(message) + + _test() + + +@disabled_ai_monitoring_record_content_settings +@reset_core_stats_engine() +def test_bedrock_chat_completion_no_content(set_trace_info, exercise_model): + @validate_custom_events(events_sans_content(chat_completion_expected_events)) + # One summary event, one user message, and one response message from the assistant + @validate_custom_event_count(count=4) + @validate_transaction_metrics( + name="test_bedrock_chat_completion_no_content", + scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], + rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], + background_task=True, + ) + @validate_attributes("agent", ["llm"]) + @background_task(name="test_bedrock_chat_completion_no_content") + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + message = [{"role": "user", "content": [{"text": "What is 212 degrees Fahrenheit converted to Celsius?"}]}] + exercise_model(message) + + _test() + + +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +def test_bedrock_chat_completion_with_token_count(set_trace_info, exercise_model): + @validate_custom_events(add_token_count_to_events(chat_completion_expected_events)) + # One summary event, one user message, and one response message from the assistant + @validate_custom_event_count(count=4) + @validate_transaction_metrics( + name="test_bedrock_chat_completion_with_token_count", + scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], + rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], + background_task=True, + ) + @validate_attributes("agent", ["llm"]) + @background_task(name="test_bedrock_chat_completion_with_token_count") + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + message = [{"role": "user", "content": [{"text": "What is 212 degrees Fahrenheit converted to Celsius?"}]}] + exercise_model(message) + + _test() + + +@reset_core_stats_engine() +def test_bedrock_chat_completion_no_llm_metadata(set_trace_info, exercise_model): + @validate_custom_events(events_sans_llm_metadata(chat_completion_expected_events)) + @validate_custom_event_count(count=4) + @validate_transaction_metrics( + name="test_bedrock_chat_completion_in_txn_no_llm_metadata", + scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], + rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], + background_task=True, + ) + @background_task(name="test_bedrock_chat_completion_in_txn_no_llm_metadata") + def _test(): + set_trace_info() + message = [{"role": "user", "content": [{"text": "What is 212 degrees Fahrenheit converted to Celsius?"}]}] + exercise_model(message) + + _test() + + +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_bedrock_chat_completion_outside_txn(exercise_model): + add_custom_attribute("llm.conversation_id", "my-awesome-id") + message = [{"role": "user", "content": [{"text": "What is 212 degrees Fahrenheit converted to Celsius?"}]}] + exercise_model(message) + + +@disabled_ai_monitoring_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@background_task(name="test_bedrock_chat_completion_disabled_ai_monitoring_settings") +def test_bedrock_chat_completion_disabled_ai_monitoring_settings(set_trace_info, exercise_model): + set_trace_info() + message = [{"role": "user", "content": [{"text": "What is 212 degrees Fahrenheit converted to Celsius?"}]}] + exercise_model(message) + + +chat_completion_invalid_access_key_error_events = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "e1206e19-2318-4a9d-be98-017c73f06118", + "duration": None, # Response time varies each test run + "request.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "request.temperature": 0.7, + "request.max_tokens": 100, + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 1, + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "e1206e19-2318-4a9d-be98-017c73f06118", + "span_id": None, + "trace_id": "trace-id", + "content": "Invalid Token", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), +] + +_client_error = botocore.exceptions.ClientError +_client_error_name = callable_name(_client_error) + + +@reset_core_stats_engine() +def test_bedrock_chat_completion_error_incorrect_access_key( + loop, monkeypatch, bedrock_converse_server, exercise_model, set_trace_info +): + """ + A request is made to the server with invalid credentials. botocore will reach out to the server and receive an + UnrecognizedClientException as a response. Information from the request will be parsed and reported in customer + events. The error response can also be parsed, and will be included as attributes on the recorded exception. + """ + + @validate_custom_events(chat_completion_invalid_access_key_error_events) + @validate_error_trace_attributes( + _client_error_name, + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 403, + "error.message": "The security token included in the request is invalid.", + "error.code": "UnrecognizedClientException", + }, + }, + ) + @validate_transaction_metrics( + name="test_bedrock_chat_completion", + scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], + rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], + background_task=True, + ) + @background_task(name="test_bedrock_chat_completion") + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + converse_incorrect_access_key(loop, bedrock_converse_server, monkeypatch) + + _test() + + +def converse_incorrect_access_key(loop, bedrock_converse_server, monkeypatch): + async def _coro(): + monkeypatch.setattr(bedrock_converse_server._request_signer._credentials, "access_key", "INVALID-ACCESS-KEY") + + with pytest.raises(_client_error): + message = [{"role": "user", "content": [{"text": "Invalid Token"}]}] + response = await bedrock_converse_server.converse( + modelId="anthropic.claude-3-sonnet-20240229-v1:0", + messages=message, + inferenceConfig={"temperature": 0.7, "maxTokens": 100}, + ) + assert response + + loop.run_until_complete(_coro()) + + +chat_completion_invalid_model_error_events = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "f4908827-3db9-4742-9103-2bbc34578b03", + "span_id": None, + "trace_id": "trace-id", + "duration": None, # Response time varies each test run + "request.model": "does-not-exist", + "response.model": "does-not-exist", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.number_of_messages": 1, + "vendor": "bedrock", + "ingest_source": "Python", + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "f4908827-3db9-4742-9103-2bbc34578b03", + "content": "Model does not exist.", + "role": "user", + "completion_id": None, + "response.model": "does-not-exist", + "sequence": 0, + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), +] + + +@reset_core_stats_engine() +def test_bedrock_chat_completion_error_invalid_model(loop, bedrock_converse_server, set_trace_info): + @validate_custom_events(chat_completion_invalid_model_error_events) + @validate_error_trace_attributes( + "botocore.errorfactory:ValidationException", + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 400, + "error.message": "The provided model identifier is invalid.", + "error.code": "ValidationException", + }, + }, + ) + @validate_transaction_metrics( + name="test_bedrock_chat_completion_error_invalid_model", + scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], + rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], + background_task=True, + ) + @background_task(name="test_bedrock_chat_completion_error_invalid_model") + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + converse_invalid_model(loop, bedrock_converse_server) + + _test() + + +def converse_invalid_model(loop, bedrock_converse_server): + async def _coro(): + with pytest.raises(_client_error): + message = [{"role": "user", "content": [{"text": "Model does not exist."}]}] + + response = await bedrock_converse_server.converse( + modelId="does-not-exist", messages=message, inferenceConfig={"temperature": 0.7, "maxTokens": 100} + ) + + assert response + + loop.run_until_complete(_coro()) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +def test_bedrock_chat_completion_error_invalid_model_no_content(loop, bedrock_converse_server, set_trace_info): + @validate_custom_events(events_sans_content(chat_completion_invalid_model_error_events)) + @validate_error_trace_attributes( + "botocore.errorfactory:ValidationException", + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 400, + "error.message": "The provided model identifier is invalid.", + "error.code": "ValidationException", + }, + }, + ) + @validate_transaction_metrics( + name="test_bedrock_chat_completion_error_invalid_model_no_content", + scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], + rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], + background_task=True, + ) + @background_task(name="test_bedrock_chat_completion_error_invalid_model_no_content") + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + converse_invalid_model(loop, bedrock_converse_server) + + _test() + + +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +def test_bedrock_chat_completion_error_incorrect_access_key_with_token_count( + monkeypatch, bedrock_converse_server, loop, set_trace_info +): + """ + A request is made to the server with invalid credentials. botocore will reach out to the server and receive an + UnrecognizedClientException as a response. Information from the request will be parsed and reported in customer + events. The error response can also be parsed, and will be included as attributes on the recorded exception. + """ + + @validate_custom_events(add_token_count_to_events(chat_completion_invalid_access_key_error_events)) + @validate_error_trace_attributes( + _client_error_name, + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 403, + "error.message": "The security token included in the request is invalid.", + "error.code": "UnrecognizedClientException", + }, + }, + ) + @validate_transaction_metrics( + name="test_bedrock_chat_completion_incorrect_access_key_with_token_count", + scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], + rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], + background_task=True, + ) + @background_task(name="test_bedrock_chat_completion_incorrect_access_key_with_token_count") + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + converse_incorrect_access_key(loop, bedrock_converse_server, monkeypatch) + + _test() diff --git a/tests/external_aiobotocore/test_bedrock_chat_completion.py b/tests/external_aiobotocore/test_bedrock_chat_completion_invoke_model.py similarity index 100% rename from tests/external_aiobotocore/test_bedrock_chat_completion.py rename to tests/external_aiobotocore/test_bedrock_chat_completion_invoke_model.py diff --git a/tests/external_botocore/_mock_external_bedrock_server_converse.py b/tests/external_botocore/_mock_external_bedrock_server_converse.py new file mode 100644 index 0000000000..aef6d52856 --- /dev/null +++ b/tests/external_botocore/_mock_external_bedrock_server_converse.py @@ -0,0 +1,120 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +from testing_support.mock_external_http_server import MockExternalHTTPServer + +RESPONSES = { + "What is 212 degrees Fahrenheit converted to Celsius?": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "c20d345e-6878-4778-b674-6b187bae8ecf"}, + 200, + { + "metrics": {"latencyMs": 1866}, + "output": { + "message": { + "content": [ + { + "text": "To convert 212°F to Celsius, we can use the formula:\n\nC = (F - 32) × 5/9\n\nWhere:\nC is the temperature in Celsius\nF is the temperature in Fahrenheit\n\nPlugging in 212°F, we get:\n\nC = (212 - 32) × 5/9\nC = 180 × 5/9\nC = 100\n\nTherefore, 212°" # noqa: RUF001 + } + ], + "role": "assistant", + } + }, + "stopReason": "max_tokens", + "usage": {"inputTokens": 26, "outputTokens": 100, "totalTokens": 126}, + }, + ], + "Invalid Token": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "e1206e19-2318-4a9d-be98-017c73f06118", + "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", + }, + 403, + {"message": "The security token included in the request is invalid."}, + ], + "Model does not exist.": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "f4908827-3db9-4742-9103-2bbc34578b03", + "x-amzn-ErrorType": "ValidationException:http://internal.amazon.com/coral/com.amazon.bedrock/", + }, + 400, + {"message": "The provided model identifier is invalid."}, + ], +} + + +def simple_get(self): + content_len = int(self.headers.get("content-length")) + body = self.rfile.read(content_len).decode("utf-8") + try: + content = json.loads(body) + except Exception: + content = body + + prompt = extract_shortened_prompt_converse(content) + if not prompt: + self.send_response(500) + self.end_headers() + self.wfile.write(b"Could not parse prompt.") + return + + headers, status_code, response = ({}, 0, "") + + for k, v in RESPONSES.items(): + if prompt.startswith(k): + headers, status_code, response = v + break + + if not response: + # If no matches found + self.send_response(500) + self.end_headers() + self.wfile.write(f"Unknown Prompt:\n{prompt}".encode()) + return + + # Send response code + self.send_response(status_code) + + # Send headers + for k, v in headers.items(): + self.send_header(k, v) + self.end_headers() + + # Send response body + response_body = json.dumps(response).encode("utf-8") + + self.wfile.write(response_body) + return + + +def extract_shortened_prompt_converse(content): + try: + prompt = content["messages"][0].get("content")[0].get("text", None) + # Sometimes there are leading whitespaces in the prompt. + prompt = prompt.lstrip().split("\n")[0] + except Exception: + prompt = "" + return prompt + + +class MockExternalBedrockConverseServer(MockExternalHTTPServer): + # To use this class in a test one needs to start and stop this server + # before and after making requests to the test app that makes the external + # calls. + + def __init__(self, handler=simple_get, port=None, *args, **kwargs): + super().__init__(handler=handler, port=port, *args, **kwargs) # noqa: B026 diff --git a/tests/external_botocore/_mock_external_bedrock_server.py b/tests/external_botocore/_mock_external_bedrock_server_invoke_model.py similarity index 100% rename from tests/external_botocore/_mock_external_bedrock_server.py rename to tests/external_botocore/_mock_external_bedrock_server_invoke_model.py diff --git a/tests/external_botocore/conftest.py b/tests/external_botocore/conftest.py index b5e6b7b329..17abcaca93 100644 --- a/tests/external_botocore/conftest.py +++ b/tests/external_botocore/conftest.py @@ -19,13 +19,10 @@ from pathlib import Path import pytest -from _mock_external_bedrock_server import MockExternalBedrockServer, extract_shortened_prompt +from _mock_external_bedrock_server_converse import MockExternalBedrockConverseServer, extract_shortened_prompt_converse +from _mock_external_bedrock_server_invoke_model import MockExternalBedrockServer, extract_shortened_prompt from botocore.response import StreamingBody -from testing_support.fixtures import ( - collector_agent_registration_fixture, - collector_available_fixture, - override_application_settings, -) +from testing_support.fixtures import collector_agent_registration_fixture, collector_available_fixture from newrelic.common.object_wrapper import wrap_function_wrapper from newrelic.common.package_version_utils import get_package_version, get_package_version_tuple @@ -34,7 +31,7 @@ BOTOCORE_VERSION = get_package_version("botocore") _default_settings = { - "package_reporting.enabled": False, # Turn off package reporting for testing as it causes slow downs. + "package_reporting.enabled": False, # Turn off package reporting for testing as it causes slowdowns. "transaction_tracer.explain_threshold": 0.0, "transaction_tracer.transaction_threshold": 0.0, "transaction_tracer.stack_trace_threshold": 0.0, @@ -54,6 +51,9 @@ BEDROCK_AUDIT_LOG_FILE = Path(__file__).parent / "bedrock_audit.log" BEDROCK_AUDIT_LOG_CONTENTS = {} +BEDROCK_CONVERSE_AUDIT_LOG_FILE = Path(__file__).parent / "bedrock_audit_converse.log" +BEDROCK_CONVERSE_AUDIT_LOG_CONTENTS = {} + @pytest.fixture(scope="session") def bedrock_server(): @@ -99,7 +99,59 @@ def bedrock_server(): # Write responses to audit log bedrock_audit_log_contents = dict(sorted(BEDROCK_AUDIT_LOG_CONTENTS.items(), key=lambda i: (i[1][1], i[0]))) - with BEDROCK_AUDIT_LOG_FILE.open("w") as audit_log_fp: + with BEDROCK_AUDIT_LOG_FILE.open("w", encoding="utf-8") as audit_log_fp: + json.dump(bedrock_audit_log_contents, fp=audit_log_fp, indent=4) + + +@pytest.fixture(scope="session") +def bedrock_converse_server(): + """ + This fixture will either create a mocked backend for testing purposes, or will + set up an audit log file to log responses of the real Bedrock backend to a file. + The behavior can be controlled by setting NEW_RELIC_TESTING_RECORD_BEDROCK_RESPONSES=1 as + an environment variable to run using the real Bedrock backend. (Default: mocking) + """ + import boto3 + + from newrelic.core.config import _environ_as_bool + + if get_package_version_tuple("botocore") < (1, 31, 57): + pytest.skip(reason="Bedrock Runtime not available.") + + if not _environ_as_bool("NEW_RELIC_TESTING_RECORD_BEDROCK_RESPONSES", False): + # Use mocked Bedrock backend and prerecorded responses + with MockExternalBedrockConverseServer() as server: + client = boto3.client( + "bedrock-runtime", + "us-east-1", + endpoint_url=f"http://localhost:{server.port}", + aws_access_key_id="NOT-A-REAL-SECRET", + aws_secret_access_key="NOT-A-REAL-SECRET", + ) + + yield client + else: + # Use real Bedrock backend and record responses + assert os.environ["AWS_ACCESS_KEY_ID"], "AWS_ACCESS_KEY_ID is required." + assert os.environ["AWS_SECRET_ACCESS_KEY"], "AWS_SECRET_ACCESS_KEY is required." + + # Construct real client + client = boto3.client("bedrock-runtime", "us-east-1") + + # Apply function wrappers to record data + wrap_function_wrapper( + "botocore.endpoint", "Endpoint._do_get_response", wrap_botocore_endpoint_Endpoint__do_get_response_converse + ) + wrap_function_wrapper( + "botocore.eventstream", "EventStreamBuffer.add_data", wrap_botocore_eventstream_add_data_converse + ) + yield client # Run tests + + # Write responses to audit log + bedrock_audit_log_contents = dict( + sorted(BEDROCK_CONVERSE_AUDIT_LOG_CONTENTS.items(), key=lambda i: (i[1][1], i[0])) + ) + with BEDROCK_CONVERSE_AUDIT_LOG_FILE.open("w", encoding="utf-8") as audit_log_fp: json.dump(bedrock_audit_log_contents, fp=audit_log_fp, indent=4) @@ -153,6 +205,45 @@ def wrap_botocore_endpoint_Endpoint__do_get_response(wrapped, instance, args, kw return result +def wrap_botocore_endpoint_Endpoint__do_get_response_converse(wrapped, instance, args, kwargs): + request = bind__do_get_response(*args, **kwargs) + + if not request: + return wrapped(*args, **kwargs) + + # Send request + result = wrapped(*args, **kwargs) + # Unpack response + success, exception = result + response = (success or exception)[0] + + body = request.body + + try: + content = json.loads(body) + except Exception: + content = body.decode("utf-8") + + prompt = extract_shortened_prompt_converse(content) + headers = dict(response.headers.items()) + headers = dict( + filter(lambda k: k[0].lower() in RECORDED_HEADERS or k[0].startswith("x-ratelimit"), headers.items()) + ) + status_code = response.status_code + + # Log response + if response.raw.chunked: + # Log response + BEDROCK_CONVERSE_AUDIT_LOG_CONTENTS[prompt] = headers, status_code, [] # Append response data to audit log + else: + # Clean up data + response_content = response.content + data = json.loads(response_content.decode("utf-8")) + BEDROCK_CONVERSE_AUDIT_LOG_CONTENTS[prompt] = headers, status_code, data # Append response data to audit log + + return result + + def bind__do_get_response(request, operation_model, context): return request @@ -163,3 +254,11 @@ def wrap_botocore_eventstream_add_data(wrapped, instance, args, kwargs): prompt = list(BEDROCK_AUDIT_LOG_CONTENTS.keys())[-1] BEDROCK_AUDIT_LOG_CONTENTS[prompt][2].append(data) return wrapped(*args, **kwargs) + + +def wrap_botocore_eventstream_add_data_converse(wrapped, instance, args, kwargs): + bound_args = bind_args(wrapped, args, kwargs) + data = bound_args["data"].hex() # convert bytes to hex for storage + prompt = list(BEDROCK_CONVERSE_AUDIT_LOG_CONTENTS.keys())[-1] + BEDROCK_CONVERSE_AUDIT_LOG_CONTENTS[prompt][2].append(data) + return wrapped(*args, **kwargs) diff --git a/tests/external_botocore/test_bedrock_chat_completion.py b/tests/external_botocore/test_bedrock_chat_completion_invoke_model.py similarity index 100% rename from tests/external_botocore/test_bedrock_chat_completion.py rename to tests/external_botocore/test_bedrock_chat_completion_invoke_model.py diff --git a/tests/external_botocore/test_chat_completion_converse.py b/tests/external_botocore/test_chat_completion_converse.py new file mode 100644 index 0000000000..96ead41dd7 --- /dev/null +++ b/tests/external_botocore/test_chat_completion_converse.py @@ -0,0 +1,524 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import botocore.exceptions +import pytest +from conftest import BOTOCORE_VERSION +from testing_support.fixtures import override_llm_token_callback_settings, reset_core_stats_engine, validate_attributes +from testing_support.ml_testing_utils import ( + add_token_count_to_events, + disabled_ai_monitoring_record_content_settings, + disabled_ai_monitoring_settings, + events_sans_content, + events_sans_llm_metadata, + events_with_context_attrs, + llm_token_count_callback, + set_trace_info, +) +from testing_support.validators.validate_custom_event import validate_custom_event_count +from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_error_trace_attributes import validate_error_trace_attributes +from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics + +from newrelic.api.background_task import background_task +from newrelic.api.llm_custom_attributes import WithLlmCustomAttributes +from newrelic.api.transaction import add_custom_attribute +from newrelic.common.object_names import callable_name + +chat_completion_expected_events = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "c20d345e-6878-4778-b674-6b187bae8ecf", + "duration": None, # Response time varies each test run + "request.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.choices.finish_reason": "max_tokens", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 3, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "c20d345e-6878-4778-b674-6b187bae8ecf", + "span_id": None, + "trace_id": "trace-id", + "content": "You are a scientist.", + "role": "system", + "completion_id": None, + "sequence": 0, + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "c20d345e-6878-4778-b674-6b187bae8ecf", + "span_id": None, + "trace_id": "trace-id", + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 1, + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "c20d345e-6878-4778-b674-6b187bae8ecf", + "span_id": None, + "trace_id": "trace-id", + "content": "To convert 212°F to Celsius, we can use the formula:\n\nC = (F - 32) × 5/9\n\nWhere:\nC is the temperature in Celsius\nF is the temperature in Fahrenheit\n\nPlugging in 212°F, we get:\n\nC = (212 - 32) × 5/9\nC = 180 × 5/9\nC = 100\n\nTherefore, 212°", # noqa: RUF001 + "role": "assistant", + "completion_id": None, + "sequence": 2, + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + "is_response": True, + }, + ), +] + + +@pytest.fixture(scope="module") +def exercise_model(bedrock_converse_server): + def _exercise_model(message): + inference_config = {"temperature": 0.7, "maxTokens": 100} + + response = bedrock_converse_server.converse( + modelId="anthropic.claude-3-sonnet-20240229-v1:0", + messages=message, + system=[{"text": "You are a scientist."}], + inferenceConfig=inference_config, + ) + + return _exercise_model + + +@reset_core_stats_engine() +def test_bedrock_chat_completion_in_txn_with_llm_metadata(set_trace_info, exercise_model): + @validate_custom_events(events_with_context_attrs(chat_completion_expected_events)) + # One summary event, one user message, and one response message from the assistant + @validate_custom_event_count(count=4) + @validate_transaction_metrics( + name="test_bedrock_chat_completion_in_txn_with_llm_metadata", + scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], + rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], + background_task=True, + ) + @validate_attributes("agent", ["llm"]) + @background_task(name="test_bedrock_chat_completion_in_txn_with_llm_metadata") + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + with WithLlmCustomAttributes({"context": "attr"}): + message = [{"role": "user", "content": [{"text": "What is 212 degrees Fahrenheit converted to Celsius?"}]}] + exercise_model(message) + + _test() + + +@disabled_ai_monitoring_record_content_settings +@reset_core_stats_engine() +def test_bedrock_chat_completion_no_content(set_trace_info, exercise_model): + @validate_custom_events(events_sans_content(chat_completion_expected_events)) + # One summary event, one user message, and one response message from the assistant + @validate_custom_event_count(count=4) + @validate_transaction_metrics( + name="test_bedrock_chat_completion_no_content", + scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], + rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], + background_task=True, + ) + @validate_attributes("agent", ["llm"]) + @background_task(name="test_bedrock_chat_completion_no_content") + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + message = [{"role": "user", "content": [{"text": "What is 212 degrees Fahrenheit converted to Celsius?"}]}] + exercise_model(message) + + _test() + + +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +def test_bedrock_chat_completion_with_token_count(set_trace_info, exercise_model): + @validate_custom_events(add_token_count_to_events(chat_completion_expected_events)) + # One summary event, one user message, and one response message from the assistant + @validate_custom_event_count(count=4) + @validate_transaction_metrics( + name="test_bedrock_chat_completion_with_token_count", + scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], + rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], + background_task=True, + ) + @validate_attributes("agent", ["llm"]) + @background_task(name="test_bedrock_chat_completion_with_token_count") + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + message = [{"role": "user", "content": [{"text": "What is 212 degrees Fahrenheit converted to Celsius?"}]}] + exercise_model(message) + + _test() + + +@reset_core_stats_engine() +def test_bedrock_chat_completion_no_llm_metadata(set_trace_info, exercise_model): + @validate_custom_events(events_sans_llm_metadata(chat_completion_expected_events)) + @validate_custom_event_count(count=4) + @validate_transaction_metrics( + name="test_bedrock_chat_completion_in_txn_no_llm_metadata", + scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], + rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], + background_task=True, + ) + @background_task(name="test_bedrock_chat_completion_in_txn_no_llm_metadata") + def _test(): + set_trace_info() + message = [{"role": "user", "content": [{"text": "What is 212 degrees Fahrenheit converted to Celsius?"}]}] + exercise_model(message) + + _test() + + +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_bedrock_chat_completion_outside_txn(exercise_model): + add_custom_attribute("llm.conversation_id", "my-awesome-id") + message = [{"role": "user", "content": [{"text": "What is 212 degrees Fahrenheit converted to Celsius?"}]}] + exercise_model(message) + + +@disabled_ai_monitoring_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@background_task(name="test_bedrock_chat_completion_disabled_ai_monitoring_settings") +def test_bedrock_chat_completion_disabled_ai_monitoring_settings(set_trace_info, exercise_model): + set_trace_info() + message = [{"role": "user", "content": [{"text": "What is 212 degrees Fahrenheit converted to Celsius?"}]}] + exercise_model(message) + + +chat_completion_invalid_access_key_error_events = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "e1206e19-2318-4a9d-be98-017c73f06118", + "duration": None, # Response time varies each test run + "request.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "request.temperature": 0.7, + "request.max_tokens": 100, + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 1, + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "e1206e19-2318-4a9d-be98-017c73f06118", + "span_id": None, + "trace_id": "trace-id", + "content": "Invalid Token", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), +] + +_client_error = botocore.exceptions.ClientError +_client_error_name = callable_name(_client_error) + + +@reset_core_stats_engine() +def test_bedrock_chat_completion_error_incorrect_access_key( + monkeypatch, bedrock_converse_server, exercise_model, set_trace_info +): + """ + A request is made to the server with invalid credentials. botocore will reach out to the server and receive an + UnrecognizedClientException as a response. Information from the request will be parsed and reported in customer + events. The error response can also be parsed, and will be included as attributes on the recorded exception. + """ + + @validate_custom_events(chat_completion_invalid_access_key_error_events) + @validate_error_trace_attributes( + _client_error_name, + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 403, + "error.message": "The security token included in the request is invalid.", + "error.code": "UnrecognizedClientException", + }, + }, + ) + @validate_transaction_metrics( + name="test_bedrock_chat_completion", + scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], + rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], + background_task=True, + ) + @background_task(name="test_bedrock_chat_completion") + def _test(): + monkeypatch.setattr(bedrock_converse_server._request_signer._credentials, "access_key", "INVALID-ACCESS-KEY") + + with pytest.raises(_client_error): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + message = [{"role": "user", "content": [{"text": "Invalid Token"}]}] + + response = bedrock_converse_server.converse( + modelId="anthropic.claude-3-sonnet-20240229-v1:0", + messages=message, + inferenceConfig={"temperature": 0.7, "maxTokens": 100}, + ) + + assert response + + _test() + + +chat_completion_invalid_model_error_events = [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "f4908827-3db9-4742-9103-2bbc34578b03", + "span_id": None, + "trace_id": "trace-id", + "duration": None, # Response time varies each test run + "request.model": "does-not-exist", + "response.model": "does-not-exist", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.number_of_messages": 1, + "vendor": "bedrock", + "ingest_source": "Python", + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "f4908827-3db9-4742-9103-2bbc34578b03", + "content": "Model does not exist.", + "role": "user", + "completion_id": None, + "response.model": "does-not-exist", + "sequence": 0, + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), +] + + +@reset_core_stats_engine() +def test_bedrock_chat_completion_error_invalid_model(bedrock_converse_server, set_trace_info): + @validate_custom_events(events_with_context_attrs(chat_completion_invalid_model_error_events)) + @validate_error_trace_attributes( + "botocore.errorfactory:ValidationException", + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 400, + "error.message": "The provided model identifier is invalid.", + "error.code": "ValidationException", + }, + }, + ) + @validate_transaction_metrics( + name="test_bedrock_chat_completion_error_invalid_model", + scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], + rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], + background_task=True, + ) + @background_task(name="test_bedrock_chat_completion_error_invalid_model") + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + with pytest.raises(_client_error): + with WithLlmCustomAttributes({"context": "attr"}): + message = [{"role": "user", "content": [{"text": "Model does not exist."}]}] + + response = bedrock_converse_server.converse( + modelId="does-not-exist", messages=message, inferenceConfig={"temperature": 0.7, "maxTokens": 100} + ) + + assert response + + _test() + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +def test_bedrock_chat_completion_error_invalid_model_no_content(bedrock_converse_server, set_trace_info): + @validate_custom_events(events_sans_content(chat_completion_invalid_model_error_events)) + @validate_error_trace_attributes( + "botocore.errorfactory:ValidationException", + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 400, + "error.message": "The provided model identifier is invalid.", + "error.code": "ValidationException", + }, + }, + ) + @validate_transaction_metrics( + name="test_bedrock_chat_completion_error_invalid_model_no_content", + scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], + rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], + background_task=True, + ) + @background_task(name="test_bedrock_chat_completion_error_invalid_model_no_content") + def _test(): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + with pytest.raises(_client_error): + message = [{"role": "user", "content": [{"text": "Model does not exist."}]}] + + response = bedrock_converse_server.converse( + modelId="does-not-exist", messages=message, inferenceConfig={"temperature": 0.7, "maxTokens": 100} + ) + + assert response + + _test() + + +@reset_core_stats_engine() +@override_llm_token_callback_settings(llm_token_count_callback) +def test_bedrock_chat_completion_error_incorrect_access_key_with_token_count( + monkeypatch, bedrock_converse_server, exercise_model, set_trace_info +): + """ + A request is made to the server with invalid credentials. botocore will reach out to the server and receive an + UnrecognizedClientException as a response. Information from the request will be parsed and reported in customer + events. The error response can also be parsed, and will be included as attributes on the recorded exception. + """ + + @validate_custom_events(add_token_count_to_events(chat_completion_invalid_access_key_error_events)) + @validate_error_trace_attributes( + _client_error_name, + exact_attrs={ + "agent": {}, + "intrinsic": {}, + "user": { + "http.statusCode": 403, + "error.message": "The security token included in the request is invalid.", + "error.code": "UnrecognizedClientException", + }, + }, + ) + @validate_transaction_metrics( + name="test_bedrock_chat_completion_incorrect_access_key_with_token_count", + scoped_metrics=[("Llm/completion/Bedrock/converse", 1)], + rollup_metrics=[("Llm/completion/Bedrock/converse", 1)], + custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], + background_task=True, + ) + @background_task(name="test_bedrock_chat_completion_incorrect_access_key_with_token_count") + def _test(): + monkeypatch.setattr(bedrock_converse_server._request_signer._credentials, "access_key", "INVALID-ACCESS-KEY") + + with pytest.raises(_client_error): + set_trace_info() + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + add_custom_attribute("non_llm_attr", "python-agent") + + message = [{"role": "user", "content": [{"text": "Invalid Token"}]}] + + response = bedrock_converse_server.converse( + modelId="anthropic.claude-3-sonnet-20240229-v1:0", + messages=message, + inferenceConfig={"temperature": 0.7, "maxTokens": 100}, + ) + + assert response + + _test() diff --git a/tests/external_httplib/test_urllib.py b/tests/external_httplib/test_urllib.py index 8d9dd1820d..9b2fde3500 100644 --- a/tests/external_httplib/test_urllib.py +++ b/tests/external_httplib/test_urllib.py @@ -12,13 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os +import sys import pytest try: import urllib.request as urllib -except: +except ImportError: import urllib from testing_support.external_fixtures import cache_outgoing_headers, insert_incoming_headers @@ -29,6 +29,14 @@ from newrelic.api.background_task import background_task +# Since Python 3.3, `urllib.URLopener()` has been deprecated in favor of +# `urllib.request.urlopen`. In Python 3.14, `urllib.URLopener()` will be +# removed. `urllib.request.urlopen` corresponds to the old `urllib2.urlopen` + +SKIP_IF_PYTHON_3_14_OR_ABOVE = pytest.mark.skipif( + sys.version_info[0:2] >= (3, 14), reason="urllib.URLopener() is removed in Python 3.14 and above" +) + @pytest.fixture(scope="session") def metrics(server): @@ -44,6 +52,7 @@ def metrics(server): return scoped, rollup +@SKIP_IF_PYTHON_3_14_OR_ABOVE def test_urlopener_http_request(server, metrics): @validate_transaction_metrics( "test_urllib:test_urlopener_http_request", @@ -59,6 +68,7 @@ def _test(): _test() +@SKIP_IF_PYTHON_3_14_OR_ABOVE def test_urlopener_https_request(server, metrics): @validate_transaction_metrics( "test_urllib:test_urlopener_https_request", @@ -77,6 +87,7 @@ def _test(): _test() +@SKIP_IF_PYTHON_3_14_OR_ABOVE def test_urlopener_http_request_with_port(server): scoped = [(f"External/localhost:{server.port}/urllib/", 1)] @@ -110,6 +121,7 @@ def _test(): ] +@SKIP_IF_PYTHON_3_14_OR_ABOVE @validate_transaction_metrics( "test_urllib:test_urlopener_file_request", scoped_metrics=_test_urlopener_file_request_scoped_metrics, @@ -123,6 +135,7 @@ def test_urlopener_file_request(): opener.open(file_uri) +@SKIP_IF_PYTHON_3_14_OR_ABOVE @background_task() @cache_outgoing_headers @validate_cross_process_headers @@ -131,6 +144,7 @@ def test_urlopener_cross_process_request(server): opener.open(f"http://localhost:{server.port}/") +@SKIP_IF_PYTHON_3_14_OR_ABOVE @cat_enabled def test_urlopener_cross_process_response(server): _test_urlopener_cross_process_response_scoped_metrics = [ diff --git a/tests/external_httplib/test_urllib2.py b/tests/external_httplib/test_urllib2.py index 54aeed7217..c744614be8 100644 --- a/tests/external_httplib/test_urllib2.py +++ b/tests/external_httplib/test_urllib2.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os import urllib.request as urllib2 import pytest diff --git a/tests/external_httplib2/test_httplib2.py b/tests/external_httplib2/test_httplib2.py index 19edf44028..e2ffd9f46a 100644 --- a/tests/external_httplib2/test_httplib2.py +++ b/tests/external_httplib2/test_httplib2.py @@ -84,7 +84,7 @@ def test_httplib2_http_request(server, metrics): @background_task(name="test_httplib2:test_httplib2_http_request") def _test(): connection = httplib2.Http() - response, content = connection.request(f"http://localhost:{server.port}", "GET") + connection.request(f"http://localhost:{server.port}", "GET") _test() diff --git a/tests/external_pyzeebe/_mocks.py b/tests/external_pyzeebe/_mocks.py new file mode 100644 index 0000000000..91ce47946a --- /dev/null +++ b/tests/external_pyzeebe/_mocks.py @@ -0,0 +1,106 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from types import SimpleNamespace + +from pyzeebe.grpc_internals.zeebe_adapter import ZeebeAdapter + +# Dummy response objects with only required fields +DummyCreateProcessInstanceResponse = SimpleNamespace(process_instance_key=12345) + +DummyCreateProcessInstanceWithResultResponse = SimpleNamespace( + process_instance_key=45678, variables={"result": "success"} +) + +DummyDeployResourceResponse = SimpleNamespace(key=67890, deployments=[], tenant_id=None) + +DummyPublishMessageResponse = SimpleNamespace(key=99999, tenant_id=None) + + +# Dummy RPC stub coroutines +async def dummy_create_process_instance( + self, + bpmn_process_id: str, + variables: dict = None, # noqa: RUF013 + version: int = -1, + tenant_id: str = None, # noqa: RUF013 +): + """Simulate ZeebeAdapter.create_process_instance""" + return DummyCreateProcessInstanceResponse + + +async def dummy_create_process_instance_with_result( + self, + bpmn_process_id: str, + variables: dict = None, # noqa: RUF013 + version: int = -1, + timeout: int = 0, + variables_to_fetch=None, + tenant_id: str = None, # noqa: RUF013 +): + """Simulate ZeebeAdapter.create_process_instance_with_result""" + return DummyCreateProcessInstanceWithResultResponse + + +async def dummy_deploy_resource(*resource_file_path: str, tenant_id: str = None): # noqa: RUF013 + """Simulate ZeebeAdapter.deploy_resource""" + # Create dummy deployment metadata for each provided resource path + deployments = [ + SimpleNamespace( + resource_name=str(path), + bpmn_process_id="dummy_process", + process_definition_key=123, + version=1, + tenant_id=tenant_id if tenant_id is not None else None, + ) + for path in resource_file_path + ] + # Create a dummy response with a list of deployments + return SimpleNamespace( + deployment_key=333333, deployments=deployments, tenant_id=tenant_id if tenant_id is not None else None + ) + + +async def dummy_publish_message( + self, + name: str, + correlation_key: str, + variables: dict = None, # noqa: RUF013 + time_to_live_in_milliseconds: int = 60000, + message_id: str = None, # noqa: RUF013 + tenant_id: str = None, # noqa: RUF013 +): + """Simulate ZeebeAdapter.publish_message""" + # Return the dummy response (contains message key) + return SimpleNamespace(key=999999, tenant_id=tenant_id if tenant_id is not None else None) + + +async def dummy_complete_job(self, job_key: int, variables: dict): + """Simulate JobExecutor.complete_job""" + self._last_complete = {"job_key": job_key, "variables": variables} + return None + + +class DummyZeebeAdapter(ZeebeAdapter): + """Simulate a ZeebeAdapter so JobExecutor can be instatiated w/o gRPC channel""" + + def __init__(self): + self.completed_job_key = None + self.completed_job_vars = None + + async def complete_job(self, job_key: int, variables: dict): + self.completed_job_key = job_key + self.completed_job_vars = variables + return None diff --git a/tests/external_pyzeebe/conftest.py b/tests/external_pyzeebe/conftest.py new file mode 100644 index 0000000000..35d6bc5700 --- /dev/null +++ b/tests/external_pyzeebe/conftest.py @@ -0,0 +1,29 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from testing_support.fixture.event_loop import event_loop as loop +from testing_support.fixtures import collector_agent_registration_fixture, collector_available_fixture + +_default_settings = { + "package_reporting.enabled": False, # Turn off package reporting for testing as it causes slow downs. + "transaction_tracer.explain_threshold": 0.0, + "transaction_tracer.transaction_threshold": 0.0, + "transaction_tracer.stack_trace_threshold": 0.0, + "debug.log_data_collector_payloads": True, + "debug.record_transaction_failure": True, +} + +collector_agent_registration = collector_agent_registration_fixture( + app_name="Python Agent Test (external_pyzeebe)", default_settings=_default_settings +) diff --git a/tests/external_pyzeebe/test.bpmn b/tests/external_pyzeebe/test.bpmn new file mode 100644 index 0000000000..7cdf1e410d --- /dev/null +++ b/tests/external_pyzeebe/test.bpmn @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tests/external_pyzeebe/test_client.py b/tests/external_pyzeebe/test_client.py new file mode 100644 index 0000000000..a832f20f44 --- /dev/null +++ b/tests/external_pyzeebe/test_client.py @@ -0,0 +1,144 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from _mocks import ( + dummy_create_process_instance, + dummy_create_process_instance_with_result, + dummy_deploy_resource, + dummy_publish_message, +) +from pyzeebe import ZeebeClient, create_insecure_channel +from pyzeebe.grpc_internals.zeebe_adapter import ZeebeAdapter +from testing_support.validators.validate_custom_event import validate_custom_event_count +from testing_support.validators.validate_span_events import validate_span_events +from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics + +from newrelic.api.background_task import background_task + +client = ZeebeClient(create_insecure_channel()) + + +@validate_transaction_metrics( + "test_zeebe_client:run_process", rollup_metrics=[("ZeebeClient/run_process", 1)], background_task=True +) +@validate_span_events(exact_agents={"zeebe.client.bpmnProcessId": "test_process"}, count=1) +def test_run_process(monkeypatch, loop): + monkeypatch.setattr(ZeebeAdapter, "create_process_instance", dummy_create_process_instance) + + @background_task(name="test_zeebe_client:run_process") + async def _test(): + response = await client.run_process("test_process") + assert response.process_instance_key == 12345 + + loop.run_until_complete(_test()) + + +@validate_custom_event_count(count=0) +def test_run_process_outside_txn(monkeypatch, loop): + monkeypatch.setattr(ZeebeAdapter, "create_process_instance", dummy_create_process_instance) + + async def _test(): + response = await client.run_process("test_process") + assert response.process_instance_key == 12345 + + loop.run_until_complete(_test()) + + +@validate_transaction_metrics( + "test_zeebe_client:run_process_with_result", + rollup_metrics=[("ZeebeClient/run_process_with_result", 1)], + background_task=True, +) +@validate_span_events(exact_agents={"zeebe.client.bpmnProcessId": "test_process"}, count=1) +def test_run_process_with_result(monkeypatch, loop): + monkeypatch.setattr(ZeebeAdapter, "create_process_instance_with_result", dummy_create_process_instance_with_result) + + @background_task(name="test_zeebe_client:run_process_with_result") + async def _test(): + result = await client.run_process_with_result("test_process") + assert result.process_instance_key == 45678 + assert result.variables == {"result": "success"} + + loop.run_until_complete(_test()) + + +@validate_custom_event_count(count=0) +def test_run_process_with_result_outside_txn(monkeypatch, loop): + monkeypatch.setattr(ZeebeAdapter, "create_process_instance_with_result", dummy_create_process_instance_with_result) + + async def _test(): + result = await client.run_process_with_result("test_process") + assert result.process_instance_key == 45678 + assert result.variables == {"result": "success"} + + loop.run_until_complete(_test()) + + +@validate_transaction_metrics( + "test_zeebe_client:deploy_resource", rollup_metrics=[("ZeebeClient/deploy_resource", 1)], background_task=True +) +@validate_span_events(exact_agents={"zeebe.client.resourceCount": 1, "zeebe.client.resourceFile": "test.bpmn"}, count=1) +def test_deploy_resource(monkeypatch, loop): + monkeypatch.setattr(ZeebeAdapter, "deploy_resource", dummy_deploy_resource) + + @background_task(name="test_zeebe_client:deploy_resource") + async def _test(): + result = await client.deploy_resource("test.bpmn") + assert result.deployment_key == 333333 + + loop.run_until_complete(_test()) + + +@validate_custom_event_count(count=0) +def test_deploy_resource_outside_txn(monkeypatch, loop): + monkeypatch.setattr(ZeebeAdapter, "deploy_resource", dummy_deploy_resource) + + async def _test(): + result = await client.deploy_resource("test.bpmn") + assert result.deployment_key == 333333 + + loop.run_until_complete(_test()) + + +@validate_transaction_metrics( + "test_zeebe_client:publish_message", rollup_metrics=[("ZeebeClient/publish_message", 1)], background_task=True +) +@validate_span_events( + exact_agents={ + "zeebe.client.messageName": "test_message", + "zeebe.client.correlationKey": "999999", + "zeebe.client.messageId": "abc123", + }, + count=1, +) +def test_publish_message(monkeypatch, loop): + monkeypatch.setattr(ZeebeAdapter, "publish_message", dummy_publish_message) + + @background_task(name="test_zeebe_client:publish_message") + async def _test(): + result = await client.publish_message(name="test_message", correlation_key="999999", message_id="abc123") + assert result.key == 999999 + + loop.run_until_complete(_test()) + + +@validate_custom_event_count(count=0) +def test_publish_message_outside_txn(monkeypatch, loop): + monkeypatch.setattr(ZeebeAdapter, "publish_message", dummy_publish_message) + + async def _test(): + result = await client.publish_message(name="test_message", correlation_key="999999", message_id="abc123") + assert result.key == 999999 + + loop.run_until_complete(_test()) diff --git a/tests/external_pyzeebe/test_job_executor.py b/tests/external_pyzeebe/test_job_executor.py new file mode 100644 index 0000000000..ca6e47a9a1 --- /dev/null +++ b/tests/external_pyzeebe/test_job_executor.py @@ -0,0 +1,79 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio + +from _mocks import DummyZeebeAdapter +from pyzeebe import Job, JobStatus, ZeebeTaskRouter +from pyzeebe.worker.job_executor import JobController, JobExecutor +from pyzeebe.worker.task_state import TaskState +from testing_support.validators.validate_custom_parameters import validate_custom_parameters +from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics + +# Set up a router with a dummy async task +router = ZeebeTaskRouter() + + +@router.task(task_type="testTask") +async def dummy_task(x: int) -> dict: + """ + Simulate a task function that reads input variable x + """ + return {"result": x} + + +@validate_transaction_metrics(group="ZeebeTask", name="test_process/testTask") +@validate_custom_parameters( + required_params=[ + ("zeebe.job.key", 123), + ("zeebe.job.type", "testTask"), + ("zeebe.job.bpmnProcessId", "test_process"), + ("zeebe.job.processInstanceKey", 456), + ("zeebe.job.elementId", "service_task_123"), + ] +) +def test_execute_one_job(loop): + dummy_adapter = DummyZeebeAdapter() + + # Build a Job with fixed values + job = Job( + key=123, + type="testTask", # must match router.task(task_type="testTask") + bpmn_process_id="test_process", + process_instance_key=456, + process_definition_version=1, + process_definition_key=789, + element_id="service_task_123", + element_instance_key=321, + custom_headers={}, + worker="test_worker", + retries=3, + deadline=0, + variables={"x": 33}, + status=JobStatus.Running, + ) + + # JobExecutor constructor params init + task_obj = router.get_task("testTask") + assert task_obj is not None + jobs_queue = asyncio.Queue() + task_state = TaskState() + + job_executor = JobExecutor(task_obj, jobs_queue, task_state, dummy_adapter) + + # Build a JobController for completion logic. + job_controller = JobController(job, dummy_adapter) + + loop.run_until_complete(job_executor.execute_one_job(job, job_controller)) + assert job.variables["x"] == 33 diff --git a/tests/framework_aiohttp/test_ws.py b/tests/framework_aiohttp/test_ws.py index da908014dc..df0c820e4c 100644 --- a/tests/framework_aiohttp/test_ws.py +++ b/tests/framework_aiohttp/test_ws.py @@ -13,12 +13,12 @@ # limitations under the License. import aiohttp -from testing_support.fixtures import function_not_called +from testing_support.validators.validate_function_not_called import validate_function_not_called version_info = tuple(int(_) for _ in aiohttp.__version__.split(".")[:2]) -@function_not_called("newrelic.core.stats_engine", "StatsEngine.record_transaction") +@validate_function_not_called("newrelic.core.stats_engine", "StatsEngine.record_transaction") def test_websocket(aiohttp_app): async def ws_write(): ws = await aiohttp_app.client.ws_connect("/ws") diff --git a/tests/framework_flask/conftest.py b/tests/framework_flask/conftest.py index 872de5b53c..f2881d0c75 100644 --- a/tests/framework_flask/conftest.py +++ b/tests/framework_flask/conftest.py @@ -15,18 +15,11 @@ import platform import pytest -from flask import __version__ as flask_version # required for python 3.7 in lieu of get_package_version_tuple +from testing_support.fixtures import collector_agent_registration_fixture, collector_available_fixture from newrelic.common.package_version_utils import get_package_version_tuple -try: - FLASK_VERSION = tuple(int(v) for v in flask_version.split(".")) -except: - # This does not work for Python 3.7 for v2.2.5 - # This only works for flaskmaster - FLASK_VERSION = get_package_version_tuple("flask") - -from testing_support.fixtures import collector_agent_registration_fixture, collector_available_fixture +FLASK_VERSION = get_package_version_tuple("flask") _default_settings = { "package_reporting.enabled": False, # Turn off package reporting for testing as it causes slow downs. diff --git a/tests/framework_graphql/test_application.py b/tests/framework_graphql/test_application.py index 7564dddaf2..bedec18fab 100644 --- a/tests/framework_graphql/test_application.py +++ b/tests/framework_graphql/test_application.py @@ -22,6 +22,7 @@ from framework_graphql.test_application_async import error_middleware_async, example_middleware_async from newrelic.api.background_task import background_task +from newrelic.api.transaction import current_transaction from newrelic.common.object_names import callable_name from newrelic.common.package_version_utils import get_package_version @@ -60,7 +61,7 @@ def error_middleware(next, root, info, **args): # noqa: A002 def test_no_harm_no_transaction(target_application): - framework, version, target_application, is_bg, schema_type, extra_spans = target_application + _framework, _version, target_application, _is_bg, _schema_type, _extra_spans = target_application def _test(): response = target_application("{ __schema { types { name } } }") @@ -94,7 +95,7 @@ def _graphql_base_rollup_metrics(framework, version, background_task=True): def test_basic(target_application): - framework, version, target_application, is_bg, schema_type, extra_spans = target_application + framework, version, target_application, is_bg, _schema_type, _extra_spans = target_application @validate_transaction_metrics( "query//hello", @@ -110,9 +111,32 @@ def _test(): _test() +def test_transaction_empty_settings(target_application): + framework, version, target_application, _is_bg, _schema_type, _extra_spans = target_application + + @validate_transaction_metrics( + "query//hello", + "GraphQL", + rollup_metrics=_graphql_base_rollup_metrics(framework, version, True), + background_task=True, + ) + @background_task() + def _test(): + transaction = current_transaction() + settings = transaction._settings + transaction._settings = None + + response = target_application("{ hello }") + assert response["hello"] == "Hello!" + + transaction._settings = settings + + _test() + + @dt_enabled def test_query_and_mutation(target_application): - framework, version, target_application, is_bg, schema_type, extra_spans = target_application + framework, version, target_application, is_bg, schema_type, _extra_spans = target_application mutation_path = "storage_add" if framework != "Graphene" else "storage_add.string" type_annotation = "!" if framework == "Strawberry" else "" @@ -221,7 +245,7 @@ def _test(): @pytest.mark.parametrize("middleware", error_middleware) @dt_enabled def test_exception_in_middleware(target_application, middleware): - framework, version, target_application, is_bg, schema_type, extra_spans = target_application + framework, version, target_application, is_bg, schema_type, _extra_spans = target_application query = "query MyQuery { error_middleware }" field = "error_middleware" @@ -276,7 +300,7 @@ def _test(): @pytest.mark.parametrize("field", ("error", "error_non_null")) @dt_enabled def test_exception_in_resolver(target_application, field): - framework, version, target_application, is_bg, schema_type, extra_spans = target_application + framework, version, target_application, is_bg, schema_type, _extra_spans = target_application query = f"query MyQuery {{ {field} }}" txn_name = f"framework_{framework.lower()}._target_schema_{schema_type}:resolve_error" @@ -332,7 +356,7 @@ def _test(): ], ) def test_exception_in_validation(target_application, query, exc_class): - framework, version, target_application, is_bg, schema_type, extra_spans = target_application + framework, version, target_application, is_bg, _schema_type, _extra_spans = target_application if "syntax" in query: txn_name = "graphql.language.parser:parse" else: @@ -377,7 +401,7 @@ def _test(): @dt_enabled def test_operation_metrics_and_attrs(target_application): - framework, version, target_application, is_bg, schema_type, extra_spans = target_application + framework, version, target_application, is_bg, _schema_type, extra_spans = target_application operation_metrics = [(f"GraphQL/operation/{framework}/query/MyQuery/library", 1)] operation_attrs = {"graphql.operation.type": "query", "graphql.operation.name": "MyQuery"} @@ -404,7 +428,7 @@ def _test(): @dt_enabled def test_field_resolver_metrics_and_attrs(target_application): - framework, version, target_application, is_bg, schema_type, extra_spans = target_application + framework, version, target_application, is_bg, _schema_type, extra_spans = target_application field_resolver_metrics = [(f"GraphQL/resolve/{framework}/hello", 1)] type_annotation = "!" if framework == "Strawberry" else "" @@ -454,7 +478,7 @@ def _test(): @dt_enabled @pytest.mark.parametrize("query,obfuscated", _test_queries) def test_query_obfuscation(target_application, query, obfuscated): - framework, version, target_application, is_bg, schema_type, extra_spans = target_application + framework, _version, target_application, is_bg, _schema_type, _extra_spans = target_application graphql_attrs = {"graphql.operation.query": obfuscated} if callable(query): @@ -502,7 +526,7 @@ def _test(): @dt_enabled @pytest.mark.parametrize("query,expected_path", _test_queries) def test_deepest_unique_path(target_application, query, expected_path): - framework, version, target_application, is_bg, schema_type, extra_spans = target_application + framework, _version, target_application, is_bg, schema_type, _extra_spans = target_application if expected_path == "/error": txn_name = f"framework_{framework.lower()}._target_schema_{schema_type}:resolve_error" else: @@ -518,7 +542,7 @@ def _test(): @pytest.mark.parametrize("capture_introspection_setting", (True, False)) def test_introspection_transactions(target_application, capture_introspection_setting): - framework, version, target_application, is_bg, schema_type, extra_spans = target_application + _framework, _version, target_application, _is_bg, _schema_type, _extra_spans = target_application txn_ct = 1 if capture_introspection_setting else 0 @override_application_settings( diff --git a/tests/framework_grpc/test_server.py b/tests/framework_grpc/test_server.py index 311cedc652..602d2ccb45 100644 --- a/tests/framework_grpc/test_server.py +++ b/tests/framework_grpc/test_server.py @@ -16,8 +16,9 @@ import pytest from _test_common import create_request, wait_for_transaction_completion from conftest import create_stub_and_channel -from testing_support.fixtures import function_not_called, override_application_settings, override_generic_settings +from testing_support.fixtures import override_application_settings, override_generic_settings from testing_support.validators.validate_code_level_metrics import validate_code_level_metrics +from testing_support.validators.validate_function_not_called import validate_function_not_called from testing_support.validators.validate_transaction_errors import validate_transaction_errors from testing_support.validators.validate_transaction_event_attributes import validate_transaction_event_attributes from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics @@ -173,7 +174,7 @@ def test_newrelic_disabled_no_transaction(mock_grpc_server, stub): method = stub.DoUnaryUnary @override_generic_settings(global_settings(), {"enabled": False}) - @function_not_called("newrelic.core.stats_engine", "StatsEngine.record_transaction") + @validate_function_not_called("newrelic.core.stats_engine", "StatsEngine.record_transaction") @wait_for_transaction_completion def _doit(): method(request) diff --git a/tests/framework_sanic/test_application.py b/tests/framework_sanic/test_application.py index 75f795b3da..04df329c39 100644 --- a/tests/framework_sanic/test_application.py +++ b/tests/framework_sanic/test_application.py @@ -17,12 +17,12 @@ import pytest import sanic from testing_support.fixtures import ( - function_not_called, override_application_settings, override_generic_settings, override_ignore_status_codes, ) from testing_support.validators.validate_code_level_metrics import validate_code_level_metrics +from testing_support.validators.validate_function_not_called import validate_function_not_called from testing_support.validators.validate_transaction_errors import validate_transaction_errors from testing_support.validators.validate_transaction_event_attributes import validate_transaction_event_attributes from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics @@ -60,7 +60,7 @@ def test_simple_request(app): assert response.status == 200 -@function_not_called("newrelic.core.stats_engine", "StatsEngine.record_transaction") +@validate_function_not_called("newrelic.core.stats_engine", "StatsEngine.record_transaction") def test_websocket(app): headers = {"upgrade": "WebSocket"} response = app.fetch("get", "/", headers=headers) @@ -214,7 +214,7 @@ def _test(): _test = validate_transaction_errors(errors=errors)(_test) _test = validate_transaction_metrics(metric_name, scoped_metrics=metrics, rollup_metrics=metrics)(_test) else: - _test = function_not_called("newrelic.core.stats_engine", "StatsEngine.record_transaction")(_test) + _test = validate_function_not_called("newrelic.core.stats_engine", "StatsEngine.record_transaction")(_test) _test() @@ -222,7 +222,7 @@ def _test(): def test_no_transaction_when_nr_disabled(app): settings = global_settings() - @function_not_called("newrelic.core.stats_engine", "StatsEngine.record_transaction") + @validate_function_not_called("newrelic.core.stats_engine", "StatsEngine.record_transaction") @override_generic_settings(settings, {"enabled": False}) def _test(): app.fetch("GET", "/") diff --git a/tests/framework_starlette/test_application.py b/tests/framework_starlette/test_application.py index 55f751e9a3..cd5668fcb8 100644 --- a/tests/framework_starlette/test_application.py +++ b/tests/framework_starlette/test_application.py @@ -120,7 +120,7 @@ def test_exception_in_middleware(target_application, app_name): # Starlette >=0.15 and <0.17 raises an exception group instead of reraising the ValueError # This only occurs on Python versions >=3.8 - if sys.version_info[0:2] > (3, 7) and starlette_version >= (0, 15, 0) and starlette_version < (0, 17, 0): + if (0, 15, 0) <= starlette_version < (0, 17, 0): from anyio._backends._asyncio import ExceptionGroup exc_type = ExceptionGroup diff --git a/tests/framework_starlette/test_bg_tasks.py b/tests/framework_starlette/test_bg_tasks.py index 5c339e7a49..15b37eafb8 100644 --- a/tests/framework_starlette/test_bg_tasks.py +++ b/tests/framework_starlette/test_bg_tasks.py @@ -85,15 +85,9 @@ def _test(): assert response.status == 200 # The bug was fixed in version 0.21.0 but re-occured in 0.23.1. - # The bug was also not present on 0.20.1 to 0.23.1 if using Python3.7. + # The bug was also not present on 0.20.1 to 0.23.1 if using Python 3.7. # The bug was fixed again in version 0.29.0 - BUG_COMPLETELY_FIXED = any( - ( - (0, 21, 0) <= starlette_version < (0, 23, 1), - (0, 20, 1) <= starlette_version < (0, 23, 1) and sys.version_info[:2] > (3, 7), - starlette_version >= (0, 29, 0), - ) - ) + BUG_COMPLETELY_FIXED = any(((0, 20, 1) <= starlette_version < (0, 23, 1), starlette_version >= (0, 29, 0))) BUG_PARTIALLY_FIXED = any( ((0, 20, 1) <= starlette_version < (0, 21, 0), (0, 23, 1) <= starlette_version < (0, 29, 0)) ) diff --git a/tests/framework_strawberry/test_application.py b/tests/framework_strawberry/test_application.py index 403491ab20..7a522e22ad 100644 --- a/tests/framework_strawberry/test_application.py +++ b/tests/framework_strawberry/test_application.py @@ -37,7 +37,7 @@ def target_application(request): @pytest.mark.parametrize("capture_introspection_setting", (True, False)) def test_introspection_transactions(target_application, capture_introspection_setting): - framework, version, target_application, is_bg, schema_type, extra_spans = target_application + _framework, _version, target_application, _is_bg, _schema_type, _extra_spans = target_application txn_ct = 1 if capture_introspection_setting else 0 diff --git a/tests/framework_tornado/test_server.py b/tests/framework_tornado/test_server.py index 1d250ac48e..f5a750a9ec 100644 --- a/tests/framework_tornado/test_server.py +++ b/tests/framework_tornado/test_server.py @@ -14,12 +14,12 @@ import pytest from testing_support.fixtures import ( - function_not_called, override_application_settings, override_generic_settings, override_ignore_status_codes, ) from testing_support.validators.validate_code_level_metrics import validate_code_level_metrics +from testing_support.validators.validate_function_not_called import validate_function_not_called from testing_support.validators.validate_transaction_count import validate_transaction_count from testing_support.validators.validate_transaction_errors import validate_transaction_errors from testing_support.validators.validate_transaction_event_attributes import validate_transaction_event_attributes @@ -166,7 +166,7 @@ def test_not_found(app): @override_generic_settings(global_settings(), {"enabled": False}) -@function_not_called("newrelic.core.stats_engine", "StatsEngine.record_transaction") +@validate_function_not_called("newrelic.core.stats_engine", "StatsEngine.record_transaction") def test_nr_disabled(app): response = app.fetch("/simple") assert response.code == 200 @@ -199,7 +199,7 @@ async def _connect(): def connect(): return app.io_loop.run_sync(_connect) - @function_not_called("newrelic.core.stats_engine", "StatsEngine.record_transaction") + @validate_function_not_called("newrelic.core.stats_engine", "StatsEngine.record_transaction") def call(call): async def _call(): await conn.write_message("test") diff --git a/tests/logger_logging/conftest.py b/tests/logger_logging/conftest.py index 4b22012b5d..9ed7407a9b 100644 --- a/tests/logger_logging/conftest.py +++ b/tests/logger_logging/conftest.py @@ -31,7 +31,7 @@ "application_logging.forwarding.context_data.enabled": True, "application_logging.metrics.enabled": True, "application_logging.local_decorating.enabled": True, - "event_harvest_config.harvest_limits.log_event_data": 100000, + "application_logging.forwarding.max_samples_stored": 100000, } collector_agent_registration = collector_agent_registration_fixture( diff --git a/tests/logger_loguru/conftest.py b/tests/logger_loguru/conftest.py index a6387c7017..26578b9b33 100644 --- a/tests/logger_loguru/conftest.py +++ b/tests/logger_loguru/conftest.py @@ -29,7 +29,7 @@ "application_logging.metrics.enabled": True, "application_logging.local_decorating.enabled": True, "application_logging.forwarding.context_data.enabled": True, - "event_harvest_config.harvest_limits.log_event_data": 100000, + "application_logging.forwarding.max_samples_stored": 100000, } collector_agent_registration = collector_agent_registration_fixture( diff --git a/tests/logger_structlog/conftest.py b/tests/logger_structlog/conftest.py index 26b2cce367..bafdc2d7fd 100644 --- a/tests/logger_structlog/conftest.py +++ b/tests/logger_structlog/conftest.py @@ -30,7 +30,7 @@ "application_logging.metrics.enabled": True, "application_logging.local_decorating.enabled": True, "application_logging.forwarding.context_data.enabled": True, - "event_harvest_config.harvest_limits.log_event_data": 100000, + "application_logging.forwarding.max_samples_stored": 100000, } collector_agent_registration = collector_agent_registration_fixture( diff --git a/tests/messagebroker_pika/test_pika_async_connection_consume.py b/tests/messagebroker_pika/test_pika_async_connection_consume.py index 906741fb5b..4779fa1698 100644 --- a/tests/messagebroker_pika/test_pika_async_connection_consume.py +++ b/tests/messagebroker_pika/test_pika_async_connection_consume.py @@ -21,13 +21,9 @@ from conftest import BODY, CORRELATION_ID, EXCHANGE, EXCHANGE_2, HEADERS, PIKA_VERSION_INFO, QUEUE, QUEUE_2, REPLY_TO from pika.adapters.tornado_connection import TornadoConnection from testing_support.db_settings import rabbitmq_settings -from testing_support.fixtures import ( - capture_transaction_metrics, - dt_enabled, - function_not_called, - override_application_settings, -) +from testing_support.fixtures import capture_transaction_metrics, dt_enabled, override_application_settings from testing_support.validators.validate_code_level_metrics import validate_code_level_metrics +from testing_support.validators.validate_function_not_called import validate_function_not_called from testing_support.validators.validate_span_events import validate_span_events from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics from testing_support.validators.validate_tt_collector_json import validate_tt_collector_json @@ -364,7 +360,7 @@ def on_open_connection(connection): # This should not create a transaction -@function_not_called("newrelic.core.stats_engine", "StatsEngine.record_transaction") +@validate_function_not_called("newrelic.core.stats_engine", "StatsEngine.record_transaction") @override_application_settings({"debug.record_transaction_failure": True}) def test_tornado_connection_basic_consume_outside_transaction(producer): def on_message(channel, method_frame, header_frame, body): diff --git a/tests/messagebroker_pika/test_pika_blocking_connection_consume_generator.py b/tests/messagebroker_pika/test_pika_blocking_connection_consume_generator.py index 465c1758a4..72a210ad79 100644 --- a/tests/messagebroker_pika/test_pika_blocking_connection_consume_generator.py +++ b/tests/messagebroker_pika/test_pika_blocking_connection_consume_generator.py @@ -93,7 +93,7 @@ def test_blocking_connection_consume_timeout(producer): for result in channel.consume(QUEUE, inactivity_timeout=0.01): # result is None if there is a timeout if result and any(result): - method_frame, properties, body = result + method_frame, _properties, body = result channel.basic_ack(method_frame.delivery_tag) assert hasattr(method_frame, "_nr_start_time") assert body == BODY @@ -202,7 +202,7 @@ def test_blocking_connection_consume_using_methods(producer): consumer = channel.consume(QUEUE, inactivity_timeout=0.01) - method, properties, body = next(consumer) + method, _properties, body = next(consumer) assert hasattr(method, "_nr_start_time") assert body == BODY @@ -256,9 +256,9 @@ def test_blocking_connection_consume_many_outside_txn(produce_five): @validate_tt_collector_json(message_broker_params=_message_broker_tt_params) def consume_it(consumer, up_next=None): if up_next is None: - method_frame, properties, body = next(consumer) + method_frame, _properties, body = next(consumer) else: - method_frame, properties, body = up_next + method_frame, _properties, body = up_next assert hasattr(method_frame, "_nr_start_time") assert body == BODY return next(consumer) @@ -291,7 +291,7 @@ def test_blocking_connection_consume_using_methods_outside_txn(producer): consumer = channel.consume(QUEUE, inactivity_timeout=0.01) - method, properties, body = next(consumer) + method, _properties, body = next(consumer) assert hasattr(method, "_nr_start_time") assert body == BODY diff --git a/tests/mlmodel_autogen/conftest.py b/tests/mlmodel_autogen/conftest.py new file mode 100644 index 0000000000..2330f11da2 --- /dev/null +++ b/tests/mlmodel_autogen/conftest.py @@ -0,0 +1,166 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +import pytest +from autogen_core import FunctionCall +from autogen_core.models import CreateResult, RequestUsage +from autogen_ext.models.replay import ReplayChatCompletionClient +from testing_support.fixture.event_loop import event_loop as loop +from testing_support.fixtures import collector_agent_registration_fixture, collector_available_fixture + +_default_settings = { + "package_reporting.enabled": False, # Turn off package reporting for testing as it causes slowdowns. + "transaction_tracer.explain_threshold": 0.0, + "transaction_tracer.transaction_threshold": 0.0, + "transaction_tracer.stack_trace_threshold": 0.0, + "debug.log_data_collector_payloads": True, + "debug.record_transaction_failure": True, + "ai_monitoring.enabled": True, +} + +collector_agent_registration = collector_agent_registration_fixture( + app_name="Python Agent Test (mlmodel_autogen)", default_settings=_default_settings +) + + +@pytest.fixture +def single_tool_model_client(): + model_client = ReplayChatCompletionClient( + [ + CreateResult( + finish_reason="function_calls", + content=[FunctionCall(id="1", arguments=json.dumps({"message": "Hello"}), name="add_exclamation")], + usage=RequestUsage(prompt_tokens=10, completion_tokens=5), + cached=False, + ), + "Hello", + "TERMINATE", + ], + model_info={ + "function_calling": True, + "vision": True, + "json_output": True, + "family": "gpt-4.1-nano", + "structured_output": True, + }, + ) + return model_client + + +@pytest.fixture +def single_tool_model_client_error(): + model_client = ReplayChatCompletionClient( + [ + CreateResult( + finish_reason="function_calls", + # Set arguments to an invalid type to trigger error in tool + content=[FunctionCall(id="1", arguments=12, name="add_exclamation")], + usage=RequestUsage(prompt_tokens=10, completion_tokens=5), + cached=False, + ), + "Hello", + "TERMINATE", + ], + model_info={ + "function_calling": True, + "vision": True, + "json_output": True, + "family": "gpt-4.1-nano", + "structured_output": True, + }, + ) + return model_client + + +@pytest.fixture +def multi_tool_model_client(): + model_client = ReplayChatCompletionClient( + chat_completions=[ + CreateResult( + finish_reason="function_calls", + content=[FunctionCall(id="1", name="add_exclamation", arguments=json.dumps({"message": "Hello"}))], + usage=RequestUsage(prompt_tokens=10, completion_tokens=5), + cached=False, + ), + CreateResult( + finish_reason="function_calls", + content=[FunctionCall(id="3", name="compute_sum", arguments=json.dumps({"a": 5, "b": 3}))], + usage=RequestUsage(prompt_tokens=10, completion_tokens=5), + cached=False, + ), + CreateResult( + finish_reason="function_calls", + content=[FunctionCall(id="2", name="add_exclamation", arguments=json.dumps({"message": "Goodbye"}))], + usage=RequestUsage(prompt_tokens=10, completion_tokens=5), + cached=False, + ), + CreateResult( + finish_reason="function_calls", + content=[FunctionCall(id="4", name="compute_sum", arguments=json.dumps({"a": 123, "b": 2}))], + usage=RequestUsage(prompt_tokens=10, completion_tokens=5), + cached=False, + ), + ], + model_info={ + "family": "gpt-4.1-nano", + "function_calling": True, + "json_output": True, + "vision": True, + "structured_output": True, + }, + ) + return model_client + + +@pytest.fixture +def multi_tool_model_client_error(): + model_client = ReplayChatCompletionClient( + chat_completions=[ + CreateResult( + finish_reason="function_calls", + content=[FunctionCall(id="1", name="add_exclamation", arguments=json.dumps({"message": "Hello"}))], + usage=RequestUsage(prompt_tokens=10, completion_tokens=5), + cached=False, + ), + CreateResult( + finish_reason="function_calls", + content=[FunctionCall(id="2", name="add_exclamation", arguments=json.dumps({"message": "Goodbye"}))], + usage=RequestUsage(prompt_tokens=10, completion_tokens=5), + cached=False, + ), + CreateResult( + finish_reason="function_calls", + content=[FunctionCall(id="3", name="compute_sum", arguments=json.dumps({"a": 5, "b": 3}))], + usage=RequestUsage(prompt_tokens=10, completion_tokens=5), + cached=False, + ), + CreateResult( + finish_reason="function_calls", + # Set arguments to an invalid type to trigger error in tool + content=[FunctionCall(id="4", name="compute_sum", arguments=12)], + usage=RequestUsage(prompt_tokens=10, completion_tokens=5), + cached=False, + ), + ], + model_info={ + "family": "gpt-4.1-nano", + "function_calling": True, + "json_output": True, + "vision": True, + "structured_output": True, + }, + ) + return model_client diff --git a/tests/mlmodel_autogen/test_assistant_agent.py b/tests/mlmodel_autogen/test_assistant_agent.py new file mode 100644 index 0000000000..866b3b39df --- /dev/null +++ b/tests/mlmodel_autogen/test_assistant_agent.py @@ -0,0 +1,320 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import pytest +from autogen_agentchat.agents import AssistantAgent +from autogen_agentchat.base import TaskResult +from testing_support.fixtures import reset_core_stats_engine, validate_attributes +from testing_support.ml_testing_utils import ( + disabled_ai_monitoring_record_content_settings, + disabled_ai_monitoring_settings, + events_with_context_attrs, + set_trace_info, + tool_events_sans_content, +) +from testing_support.validators.validate_custom_event import validate_custom_event_count +from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_error_trace_attributes import validate_error_trace_attributes +from testing_support.validators.validate_transaction_error_event_count import validate_transaction_error_event_count +from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics + +from newrelic.api.background_task import background_task +from newrelic.api.llm_custom_attributes import WithLlmCustomAttributes +from newrelic.common.object_names import callable_name +from newrelic.common.package_version_utils import get_package_version_tuple + +AUTOGEN_VERSION = get_package_version_tuple("autogen-agentchat") + +tool_recorded_event = [ + ( + {"type": "LlmTool"}, + { + "id": None, + "run_id": "1", + "output": "Hello!", + "name": "add_exclamation", + "agent_name": "pirate_agent", + "span_id": None, + "trace_id": "trace-id", + "input": '{"message": "Hello"}', + "vendor": "autogen", + "ingest_source": "Python", + "duration": None, + }, + ) +] + +tool_recorded_event_error = [ + ( + {"type": "LlmTool"}, + { + "id": None, + "run_id": "1", + "name": "add_exclamation", + "agent_name": "pirate_agent", + "span_id": None, + "trace_id": "trace-id", + "input": "12", + "vendor": "autogen", + "ingest_source": "Python", + "error": True, + "duration": None, + }, + ) +] + + +agent_recorded_event = [ + ( + {"type": "LlmAgent"}, + { + "id": None, + "name": "pirate_agent", + "span_id": None, + "trace_id": "trace-id", + "vendor": "autogen", + "ingest_source": "Python", + "duration": None, + }, + ) +] + + +# Example tool for testing purposes +def add_exclamation(message: str) -> str: + return f"{message}!" + + +@reset_core_stats_engine() +@validate_custom_events( + events_with_context_attrs(tool_recorded_event) + events_with_context_attrs(agent_recorded_event) +) +@validate_custom_event_count(count=2) +@validate_transaction_metrics( + "test_assistant_agent:test_run_assistant_agent", + scoped_metrics=[ + ( + "Llm/agent/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent.on_messages_stream/pirate_agent", + 1, + ), + ( + "Llm/tool/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent._execute_tool_call/add_exclamation", + 1, + ), + ], + rollup_metrics=[ + ( + "Llm/agent/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent.on_messages_stream/pirate_agent", + 1, + ), + ( + "Llm/tool/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent._execute_tool_call/add_exclamation", + 1, + ), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_run_assistant_agent(loop, set_trace_info, single_tool_model_client): + set_trace_info() + pirate_agent = AssistantAgent( + name="pirate_agent", model_client=single_tool_model_client, tools=[add_exclamation], model_client_stream=True + ) + + async def _test(): + with WithLlmCustomAttributes({"context": "attr"}): + response = await pirate_agent.run() + assert "Hello!" in response.messages[1].content[0].content + + loop.run_until_complete(_test()) + + +@reset_core_stats_engine() +@validate_custom_events(tool_recorded_event + agent_recorded_event) +@validate_custom_event_count(count=2) +@validate_transaction_metrics( + "test_assistant_agent:test_run_stream_assistant_agent", + scoped_metrics=[ + ( + "Llm/agent/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent.on_messages_stream/pirate_agent", + 1, + ), + ( + "Llm/tool/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent._execute_tool_call/add_exclamation", + 1, + ), + ], + rollup_metrics=[ + ( + "Llm/agent/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent.on_messages_stream/pirate_agent", + 1, + ), + ( + "Llm/tool/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent._execute_tool_call/add_exclamation", + 1, + ), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_run_stream_assistant_agent(loop, set_trace_info, single_tool_model_client): + set_trace_info() + + pirate_agent = AssistantAgent( + name="pirate_agent", model_client=single_tool_model_client, tools=[add_exclamation], model_client_stream=True + ) + + async def _test(): + response = pirate_agent.run_stream() + result = "" + async for message in response: + if not isinstance(message, TaskResult): + result += message.to_text() + else: + break + + assert "Hello!" in result + + loop.run_until_complete(_test()) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_custom_events(tool_events_sans_content(tool_recorded_event) + agent_recorded_event) +@validate_custom_event_count(count=2) +@validate_transaction_metrics( + "test_assistant_agent:test_run_assistant_agent_no_content", + scoped_metrics=[ + ( + "Llm/agent/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent.on_messages_stream/pirate_agent", + 1, + ), + ( + "Llm/tool/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent._execute_tool_call/add_exclamation", + 1, + ), + ], + rollup_metrics=[ + ( + "Llm/agent/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent.on_messages_stream/pirate_agent", + 1, + ), + ( + "Llm/tool/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent._execute_tool_call/add_exclamation", + 1, + ), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_run_assistant_agent_no_content(loop, set_trace_info, single_tool_model_client): + set_trace_info() + pirate_agent = AssistantAgent( + name="pirate_agent", model_client=single_tool_model_client, tools=[add_exclamation], model_client_stream=True + ) + + async def _test(): + response = await pirate_agent.run() + assert "Hello!" in response.messages[1].content[0].content + + loop.run_until_complete(_test()) + + +@disabled_ai_monitoring_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@background_task() +def test_run_assistant_agent_disabled_ai_monitoring_events(loop, set_trace_info, single_tool_model_client): + set_trace_info() + pirate_agent = AssistantAgent( + name="pirate_agent", model_client=single_tool_model_client, tools=[add_exclamation], model_client_stream=True + ) + + async def _test(): + response = await pirate_agent.run() + assert "Hello!" in response.messages[1].content[0].content + + loop.run_until_complete(_test()) + + +SKIP_IF_AUTOGEN_062 = pytest.mark.skipif( + AUTOGEN_VERSION > (0, 6, 1), + reason="Forcing invalid tool call arguments causes a hang on autogen versions above 0.6.1", +) + + +@SKIP_IF_AUTOGEN_062 +@reset_core_stats_engine() +@validate_transaction_error_event_count(1) +@validate_error_trace_attributes(callable_name(TypeError), exact_attrs={"agent": {}, "intrinsic": {}, "user": {}}) +@validate_custom_events(tool_recorded_event_error) +@validate_custom_event_count(count=2) +@validate_transaction_metrics( + "test_assistant_agent:test_run_assistant_agent_error", + scoped_metrics=[ + ( + "Llm/agent/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent.on_messages_stream/pirate_agent", + 1, + ), + ( + "Llm/tool/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent._execute_tool_call/add_exclamation", + 1, + ), + ], + rollup_metrics=[ + ( + "Llm/agent/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent.on_messages_stream/pirate_agent", + 1, + ), + ( + "Llm/tool/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent._execute_tool_call/add_exclamation", + 1, + ), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_run_assistant_agent_error(loop, set_trace_info, single_tool_model_client_error): + set_trace_info() + pirate_agent = AssistantAgent( + name="pirate_agent", + model_client=single_tool_model_client_error, + tools=[add_exclamation], + model_client_stream=False, + ) + + async def _test(): + with pytest.raises(TypeError): + await pirate_agent.run() + + loop.run_until_complete(_test()) + + +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_run_assistant_agent_outside_txn(loop, single_tool_model_client): + pirate_agent = AssistantAgent( + name="pirate_agent", model_client=single_tool_model_client, tools=[add_exclamation], model_client_stream=True + ) + + async def _test(): + response = await pirate_agent.run() + assert "Hello!" in response.messages[1].content[0].content + + loop.run_until_complete(_test()) diff --git a/tests/mlmodel_autogen/test_mcp_tool_adapter.py b/tests/mlmodel_autogen/test_mcp_tool_adapter.py new file mode 100644 index 0000000000..15bf0bcec4 --- /dev/null +++ b/tests/mlmodel_autogen/test_mcp_tool_adapter.py @@ -0,0 +1,83 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest.mock import AsyncMock + +import pytest +from autogen_ext.tools.mcp import SseMcpToolAdapter, SseServerParams +from mcp import ClientSession, Tool +from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics + +from newrelic.api.background_task import background_task + +# Test setup derived from: https://github.com/microsoft/autogen/blob/main/python/packages/autogen-ext/tests/tools/test_mcp_tools.py +# autogen MIT license: https://github.com/microsoft/autogen/blob/main/LICENSE and +# https://github.com/microsoft/autogen/blob/main/LICENSE-CODE + + +@pytest.fixture +def mock_sse_session(): + session = AsyncMock(spec=ClientSession) + session.initialize = AsyncMock() + session.call_tool = AsyncMock() + session.list_tools = AsyncMock() + return session + + +@pytest.fixture +def add_exclamation(): + return Tool( + name="add_exclamation", + description="A test SSE tool that adds an exclamation mark to a string", + inputSchema={"type": "object", "properties": {"input": {"type": "string"}}, "required": ["input"]}, + ) + + +@validate_transaction_metrics( + "test_mcp_tool_adapter:test_from_server_params_tracing", + scoped_metrics=[("Llm/autogen_ext.tools.mcp._sse:SseMcpToolAdapter.from_server_params/add_exclamation", 1)], + rollup_metrics=[("Llm/autogen_ext.tools.mcp._sse:SseMcpToolAdapter.from_server_params/add_exclamation", 1)], + background_task=True, +) +@background_task() +def test_from_server_params_tracing(loop, mock_sse_session, monkeypatch, add_exclamation): + async def _test(): + params = SseServerParams(url="http://test-url") + mock_context = AsyncMock() + mock_context.__aenter__.return_value = mock_sse_session + monkeypatch.setattr( + "autogen_ext.tools.mcp._base.create_mcp_server_session", lambda *args, **kwargs: mock_context + ) + + mock_sse_session.list_tools.return_value.tools = [add_exclamation] + + await SseMcpToolAdapter.from_server_params(params, "add_exclamation") + + loop.run_until_complete(_test()) + + +def test_from_server_params_tracing_no_transaction(loop, mock_sse_session, monkeypatch, add_exclamation): + async def _test(): + params = SseServerParams(url="http://test-url") + mock_context = AsyncMock() + mock_context.__aenter__.return_value = mock_sse_session + monkeypatch.setattr( + "autogen_ext.tools.mcp._base.create_mcp_server_session", lambda *args, **kwargs: mock_context + ) + + mock_sse_session.list_tools.return_value.tools = [add_exclamation] + + await SseMcpToolAdapter.from_server_params(params, "add_exclamation") + + loop.run_until_complete(_test()) diff --git a/tests/mlmodel_autogen/test_teams.py b/tests/mlmodel_autogen/test_teams.py new file mode 100644 index 0000000000..fa8b8ca3f6 --- /dev/null +++ b/tests/mlmodel_autogen/test_teams.py @@ -0,0 +1,511 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from autogen_agentchat.agents import AssistantAgent +from autogen_agentchat.base import TaskResult +from autogen_agentchat.teams import RoundRobinGroupChat +from test_assistant_agent import SKIP_IF_AUTOGEN_062 +from testing_support.fixtures import reset_core_stats_engine, validate_attributes +from testing_support.ml_testing_utils import ( + disabled_ai_monitoring_record_content_settings, + disabled_ai_monitoring_settings, + events_with_context_attrs, + set_trace_info, + tool_events_sans_content, +) +from testing_support.validators.validate_custom_event import validate_custom_event_count +from testing_support.validators.validate_custom_events import validate_custom_events +from testing_support.validators.validate_error_trace_attributes import validate_error_trace_attributes +from testing_support.validators.validate_transaction_error_event_count import validate_transaction_error_event_count +from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics + +from newrelic.api.background_task import background_task +from newrelic.api.llm_custom_attributes import WithLlmCustomAttributes +from newrelic.common.object_names import callable_name + +team_tools_recorded_events = [ + ( + {"type": "LlmTool"}, + { + "id": None, # UUID that varies with each run + "run_id": "1", + "output": "Hello!", + "name": "add_exclamation", + "agent_name": "pirate_agent", + "span_id": None, + "trace_id": "trace-id", + "input": '{"message": "Hello"}', + "vendor": "autogen", + "ingest_source": "Python", + "duration": None, + }, + ), + ( + {"type": "LlmTool"}, + { + "id": None, # UUID that varies with each run + "run_id": "3", + "output": "8", + "name": "compute_sum", + "agent_name": "robot_agent", + "span_id": None, + "trace_id": "trace-id", + "input": '{"a": 5, "b": 3}', + "vendor": "autogen", + "ingest_source": "Python", + "duration": None, + }, + ), +] + +team_agent_recorded_events = [ + ( + {"type": "LlmAgent"}, + { + "id": None, + "name": "pirate_agent", + "span_id": None, + "trace_id": "trace-id", + "vendor": "autogen", + "ingest_source": "Python", + "duration": None, + }, + ), + ( + {"type": "LlmAgent"}, + { + "id": None, + "name": "robot_agent", + "span_id": None, + "trace_id": "trace-id", + "vendor": "autogen", + "ingest_source": "Python", + "duration": None, + }, + ), +] + + +team_tools_recorded_events_error = [ + ( + {"type": "LlmTool"}, + { + "id": None, # UUID that varies with each run + "run_id": "1", + "output": "Hello!", + "name": "add_exclamation", + "agent_name": "pirate_agent", + "span_id": None, + "trace_id": "trace-id", + "input": '{"message": "Hello"}', + "vendor": "autogen", + "ingest_source": "Python", + "duration": None, + }, + ), + ( + {"type": "LlmTool"}, + { + "id": None, # UUID that varies with each run + "run_id": "2", + "output": "Goodbye!", + "name": "add_exclamation", + "agent_name": "robot_agent", + "span_id": None, + "trace_id": "trace-id", + "input": '{"message": "Goodbye"}', + "vendor": "autogen", + "ingest_source": "Python", + "duration": None, + }, + ), + ( + {"type": "LlmTool"}, + { + "id": None, # UUID that varies with each run + "run_id": "3", + "output": "8", + "name": "compute_sum", + "agent_name": "pirate_agent", + "span_id": None, + "trace_id": "trace-id", + "input": '{"a": 5, "b": 3}', + "vendor": "autogen", + "ingest_source": "Python", + "duration": None, + }, + ), + ( + {"type": "LlmTool"}, + { + "id": None, # UUID that varies with each run + "run_id": "4", + "name": "compute_sum", + "agent_name": "robot_agent", + "span_id": None, + "trace_id": "trace-id", + "input": "12", + "vendor": "autogen", + "ingest_source": "Python", + "duration": None, + "error": True, + }, + ), +] + + +# Example tool functions +def add_exclamation(message: str) -> str: + return f"{message}!" + + +def compute_sum(a: int, b: int) -> int: + return a + b + + +@reset_core_stats_engine() +@validate_custom_event_count(count=8) +@validate_transaction_metrics( + "test_teams:test_run_stream_round_robin_group", + # Expect two of each metric since there are two agents executing two different tools each across 4 turns + scoped_metrics=[ + ( + "Llm/agent/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent.on_messages_stream/pirate_agent", + 2, + ), + ( + "Llm/agent/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent.on_messages_stream/robot_agent", + 2, + ), + ( + "Llm/tool/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent._execute_tool_call/add_exclamation", + 2, + ), + ("Llm/tool/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent._execute_tool_call/compute_sum", 2), + ], + rollup_metrics=[ + ( + "Llm/agent/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent.on_messages_stream/robot_agent", + 2, + ), + ( + "Llm/agent/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent.on_messages_stream/pirate_agent", + 2, + ), + ("Llm/tool/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent._execute_tool_call/compute_sum", 2), + ( + "Llm/tool/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent._execute_tool_call/add_exclamation", + 2, + ), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_run_stream_round_robin_group(loop, set_trace_info, multi_tool_model_client): + set_trace_info() + + pirate_agent = AssistantAgent( + name="pirate_agent", + model_client=multi_tool_model_client, + tools=[add_exclamation, compute_sum], + model_client_stream=True, + ) + + robot_agent = AssistantAgent( + name="robot_agent", + model_client=multi_tool_model_client, + tools=[add_exclamation, compute_sum], + model_client_stream=True, + ) + + agents = RoundRobinGroupChat(participants=[pirate_agent, robot_agent], max_turns=4) + + async def _test(): + response = agents.run_stream() + result = "" + async for message in response: + if not isinstance(message, TaskResult): + result += message.to_text() + else: + break + + assert "Hello!" in result + assert "Goodbye!" in result + assert "8" in result + assert "125" in result + + loop.run_until_complete(_test()) + + +@reset_core_stats_engine() +@validate_custom_event_count(count=8) +@validate_transaction_metrics( + "test_teams:test_run_round_robin_group", + scoped_metrics=[ + ( + "Llm/agent/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent.on_messages_stream/pirate_agent", + 2, + ), + ( + "Llm/agent/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent.on_messages_stream/robot_agent", + 2, + ), + ( + "Llm/tool/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent._execute_tool_call/add_exclamation", + 2, + ), + ("Llm/tool/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent._execute_tool_call/compute_sum", 2), + ], + rollup_metrics=[ + ( + "Llm/agent/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent.on_messages_stream/pirate_agent", + 2, + ), + ( + "Llm/agent/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent.on_messages_stream/robot_agent", + 2, + ), + ( + "Llm/tool/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent._execute_tool_call/add_exclamation", + 2, + ), + ("Llm/tool/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent._execute_tool_call/compute_sum", 2), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_run_round_robin_group(loop, set_trace_info, multi_tool_model_client): + set_trace_info() + + pirate_agent = AssistantAgent( + name="pirate_agent", + model_client=multi_tool_model_client, + tools=[add_exclamation, compute_sum], + model_client_stream=True, + ) + + robot_agent = AssistantAgent( + name="robot_agent", + model_client=multi_tool_model_client, + tools=[add_exclamation, compute_sum], + model_client_stream=True, + ) + + agents = RoundRobinGroupChat(participants=[pirate_agent, robot_agent], max_turns=4) + + async def _test(): + with WithLlmCustomAttributes({"context": "attr"}): + response = await agents.run() + + assert "Hello!" in response.messages[2].content + assert "8" in response.messages[5].content + assert "Goodbye" in response.messages[8].content + assert "125" in response.messages[11].content + + loop.run_until_complete(_test()) + + +@reset_core_stats_engine() +@disabled_ai_monitoring_record_content_settings +@validate_custom_events(tool_events_sans_content(team_tools_recorded_events) + team_agent_recorded_events) +@validate_custom_event_count(count=4) +@validate_transaction_metrics( + "test_teams:test_run_round_robin_group_no_content", + scoped_metrics=[ + ( + "Llm/agent/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent.on_messages_stream/pirate_agent", + 1, + ), + ( + "Llm/agent/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent.on_messages_stream/robot_agent", + 1, + ), + ( + "Llm/tool/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent._execute_tool_call/add_exclamation", + 1, + ), + ("Llm/tool/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent._execute_tool_call/compute_sum", 1), + ], + rollup_metrics=[ + ( + "Llm/agent/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent.on_messages_stream/pirate_agent", + 1, + ), + ( + "Llm/agent/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent.on_messages_stream/robot_agent", + 1, + ), + ( + "Llm/tool/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent._execute_tool_call/add_exclamation", + 1, + ), + ("Llm/tool/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent._execute_tool_call/compute_sum", 1), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_run_round_robin_group_no_content(loop, set_trace_info, multi_tool_model_client): + set_trace_info() + + pirate_agent = AssistantAgent( + name="pirate_agent", + model_client=multi_tool_model_client, + tools=[add_exclamation, compute_sum], + model_client_stream=True, + ) + + robot_agent = AssistantAgent( + name="robot_agent", + model_client=multi_tool_model_client, + tools=[add_exclamation, compute_sum], + model_client_stream=True, + ) + + agents = RoundRobinGroupChat(participants=[pirate_agent, robot_agent], max_turns=2) + + async def _test(): + response = await agents.run() + assert "Hello!" in response.messages[1].content[0].content + + loop.run_until_complete(_test()) + + +@disabled_ai_monitoring_settings +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +@background_task() +def test_run_round_robin_group_disabled_ai_events(loop, set_trace_info, multi_tool_model_client): + set_trace_info() + + pirate_agent = AssistantAgent( + name="pirate_agent", + model_client=multi_tool_model_client, + tools=[add_exclamation, compute_sum], + model_client_stream=True, + ) + + robot_agent = AssistantAgent( + name="robot_agent", + model_client=multi_tool_model_client, + tools=[add_exclamation, compute_sum], + model_client_stream=True, + ) + + agents = RoundRobinGroupChat(participants=[pirate_agent, robot_agent], max_turns=4) + + async def _test(): + response = await agents.run() + assert "Hello!" in response.messages[1].content[0].content + + loop.run_until_complete(_test()) + + +@SKIP_IF_AUTOGEN_062 +@reset_core_stats_engine() +@validate_transaction_error_event_count(1) +@validate_error_trace_attributes(callable_name(TypeError), exact_attrs={"agent": {}, "intrinsic": {}, "user": {}}) +@validate_custom_events(team_tools_recorded_events_error) +@validate_custom_event_count(count=8) +@validate_transaction_metrics( + "test_teams:test_run_round_robin_group_error", + scoped_metrics=[ + ( + "Llm/agent/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent.on_messages_stream/pirate_agent", + 2, + ), + ( + "Llm/agent/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent.on_messages_stream/robot_agent", + 2, + ), + ( + "Llm/tool/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent._execute_tool_call/add_exclamation", + 2, + ), + ("Llm/tool/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent._execute_tool_call/compute_sum", 2), + ], + rollup_metrics=[ + ( + "Llm/agent/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent.on_messages_stream/pirate_agent", + 2, + ), + ( + "Llm/agent/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent.on_messages_stream/robot_agent", + 2, + ), + ( + "Llm/tool/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent._execute_tool_call/add_exclamation", + 2, + ), + ("Llm/tool/Autogen/autogen_agentchat.agents._assistant_agent:AssistantAgent._execute_tool_call/compute_sum", 2), + ], + background_task=True, +) +@validate_attributes("agent", ["llm"]) +@background_task() +def test_run_round_robin_group_error(loop, set_trace_info, multi_tool_model_client_error): + set_trace_info() + + pirate_agent = AssistantAgent( + name="pirate_agent", + model_client=multi_tool_model_client_error, + tools=[add_exclamation, compute_sum], + model_client_stream=True, + ) + + robot_agent = AssistantAgent( + name="robot_agent", + model_client=multi_tool_model_client_error, + tools=[add_exclamation, compute_sum], + model_client_stream=True, + ) + + agents = RoundRobinGroupChat(participants=[pirate_agent, robot_agent], max_turns=4) + + async def _test(): + # run() should result in a RuntimeError wrapping a TypeError + # Due to the async execution, the RuntimeError is what is raised despite the TypeError being the root cause + with pytest.raises(RuntimeError): + await agents.run() + + loop.run_until_complete(_test()) + + +@reset_core_stats_engine() +@validate_custom_event_count(count=0) +def test_run_round_robin_group_outside_txn(loop, multi_tool_model_client): + pirate_agent = AssistantAgent( + name="pirate_agent", + model_client=multi_tool_model_client, + tools=[add_exclamation, compute_sum], + model_client_stream=True, + ) + + robot_agent = AssistantAgent( + name="robot_agent", + model_client=multi_tool_model_client, + tools=[add_exclamation, compute_sum], + model_client_stream=True, + ) + + agents = RoundRobinGroupChat(participants=[pirate_agent, robot_agent], max_turns=4) + + async def _test(): + response = await agents.run() + assert "Hello!" in response.messages[1].content[0].content + + loop.run_until_complete(_test()) diff --git a/tests/mlmodel_langchain/test_tool.py b/tests/mlmodel_langchain/test_tool.py index dcaebe7108..18882b87d1 100644 --- a/tests/mlmodel_langchain/test_tool.py +++ b/tests/mlmodel_langchain/test_tool.py @@ -27,6 +27,7 @@ disabled_ai_monitoring_settings, events_with_context_attrs, set_trace_info, + tool_events_sans_content, ) from testing_support.validators.validate_custom_event import validate_custom_event_count from testing_support.validators.validate_custom_events import validate_custom_events @@ -59,15 +60,6 @@ def _multi_arg_tool(first_num: int, second_num: int): return _multi_arg_tool -def events_sans_content(event): - new_event = copy.deepcopy(event) - for _event in new_event: - del _event[1]["input"] - if "output" in _event[1]: - del _event[1]["output"] - return new_event - - single_arg_tool_recorded_events = [ ( {"type": "LlmTool"}, @@ -108,7 +100,7 @@ def test_langchain_single_arg_tool(set_trace_info, single_arg_tool): @reset_core_stats_engine() @disabled_ai_monitoring_record_content_settings -@validate_custom_events(events_sans_content(single_arg_tool_recorded_events)) +@validate_custom_events(tool_events_sans_content(single_arg_tool_recorded_events)) @validate_custom_event_count(count=1) @validate_transaction_metrics( name="test_tool:test_langchain_single_arg_tool_no_content", @@ -144,7 +136,7 @@ def test_langchain_single_arg_tool_async(set_trace_info, single_arg_tool, loop): @reset_core_stats_engine() @disabled_ai_monitoring_record_content_settings -@validate_custom_events(events_sans_content(single_arg_tool_recorded_events)) +@validate_custom_events(tool_events_sans_content(single_arg_tool_recorded_events)) @validate_custom_event_count(count=1) @validate_transaction_metrics( name="test_tool:test_langchain_single_arg_tool_async_no_content", @@ -275,7 +267,7 @@ def test_langchain_error_in_run(set_trace_info, multi_arg_tool): @validate_error_trace_attributes( callable_name(pydantic_core._pydantic_core.ValidationError), exact_attrs={"agent": {}, "intrinsic": {}, "user": {}} ) -@validate_custom_events(events_sans_content(multi_arg_error_recorded_events)) +@validate_custom_events(tool_events_sans_content(multi_arg_error_recorded_events)) @validate_custom_event_count(count=1) @validate_transaction_metrics( name="test_tool:test_langchain_error_in_run_no_content", @@ -327,7 +319,7 @@ def test_langchain_error_in_run_async(set_trace_info, multi_arg_tool, loop): @validate_error_trace_attributes( callable_name(pydantic_core._pydantic_core.ValidationError), exact_attrs={"agent": {}, "intrinsic": {}, "user": {}} ) -@validate_custom_events(events_sans_content(multi_arg_error_recorded_events)) +@validate_custom_events(tool_events_sans_content(multi_arg_error_recorded_events)) @validate_custom_event_count(count=1) @validate_transaction_metrics( name="test_tool:test_langchain_error_in_run_async_no_content", diff --git a/tests/mlmodel_openai/_mock_external_openai_server.py b/tests/mlmodel_openai/_mock_external_openai_server.py index b008675f99..ec3bda2028 100644 --- a/tests/mlmodel_openai/_mock_external_openai_server.py +++ b/tests/mlmodel_openai/_mock_external_openai_server.py @@ -744,7 +744,7 @@ def _extract_shortened_prompt(content): def get_openai_version(): - # Import OpenAI so that get package version can catpure the version from the + # Import OpenAI so that get_package_version() can capture the version from the # system module. OpenAI does not have a package version in v0. import openai diff --git a/tests/mlmodel_sklearn/test_calibration_models.py b/tests/mlmodel_sklearn/test_calibration_models.py index d7a06603bb..211b935adf 100644 --- a/tests/mlmodel_sklearn/test_calibration_models.py +++ b/tests/mlmodel_sklearn/test_calibration_models.py @@ -53,7 +53,7 @@ def _run(): from sklearn.model_selection import train_test_split X, y = load_iris(return_X_y=True) - x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + x_train, x_test, y_train, _y_test = train_test_split(X, y, stratify=y, random_state=0) clf = getattr(sklearn.calibration, calibration_model_name)() diff --git a/tests/mlmodel_sklearn/test_discriminant_analysis_models.py b/tests/mlmodel_sklearn/test_discriminant_analysis_models.py index 40d5be839b..f0b5a574f3 100644 --- a/tests/mlmodel_sklearn/test_discriminant_analysis_models.py +++ b/tests/mlmodel_sklearn/test_discriminant_analysis_models.py @@ -68,7 +68,7 @@ def _run(discriminant_analysis_model_name): from sklearn.model_selection import train_test_split X, y = load_iris(return_X_y=True) - x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + x_train, x_test, y_train, _y_test = train_test_split(X, y, stratify=y, random_state=0) kwargs = {} clf = getattr(sklearn.discriminant_analysis, discriminant_analysis_model_name)(**kwargs) diff --git a/tests/mlmodel_sklearn/test_gaussian_process_models.py b/tests/mlmodel_sklearn/test_gaussian_process_models.py index 761742fa47..49dd8fc7d3 100644 --- a/tests/mlmodel_sklearn/test_gaussian_process_models.py +++ b/tests/mlmodel_sklearn/test_gaussian_process_models.py @@ -53,7 +53,7 @@ def _run(gaussian_process_model_name): from sklearn.model_selection import train_test_split X, y = load_iris(return_X_y=True) - x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + x_train, x_test, y_train, _y_test = train_test_split(X, y, stratify=y, random_state=0) clf = getattr(sklearn.gaussian_process, gaussian_process_model_name)(random_state=0) diff --git a/tests/mlmodel_sklearn/test_multiclass_models.py b/tests/mlmodel_sklearn/test_multiclass_models.py index acc5e579f5..94b5c49ad5 100644 --- a/tests/mlmodel_sklearn/test_multiclass_models.py +++ b/tests/mlmodel_sklearn/test_multiclass_models.py @@ -58,7 +58,7 @@ def _run(multiclass_model_name): from sklearn.model_selection import train_test_split X, y = load_iris(return_X_y=True) - x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + x_train, x_test, y_train, _y_test = train_test_split(X, y, stratify=y, random_state=0) # This is an example of a model that has all the available attributes # We could have choosen any estimator that has predict, score, diff --git a/tests/mlmodel_sklearn/test_naive_bayes_models.py b/tests/mlmodel_sklearn/test_naive_bayes_models.py index 762d6db3a6..8e5f956c2c 100644 --- a/tests/mlmodel_sklearn/test_naive_bayes_models.py +++ b/tests/mlmodel_sklearn/test_naive_bayes_models.py @@ -97,7 +97,7 @@ def _run(naive_bayes_model_name): from sklearn.model_selection import train_test_split X, y = load_iris(return_X_y=True) - x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + x_train, x_test, y_train, _y_test = train_test_split(X, y, stratify=y, random_state=0) clf = getattr(sklearn.naive_bayes, naive_bayes_model_name)() diff --git a/tests/mlmodel_sklearn/test_neural_network_models.py b/tests/mlmodel_sklearn/test_neural_network_models.py index e0c61a3dde..e24cb34aa0 100644 --- a/tests/mlmodel_sklearn/test_neural_network_models.py +++ b/tests/mlmodel_sklearn/test_neural_network_models.py @@ -61,7 +61,7 @@ def _run(neural_network_model_name): from sklearn.model_selection import train_test_split X, y = load_iris(return_X_y=True) - x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + x_train, x_test, y_train, _y_test = train_test_split(X, y, stratify=y, random_state=0) clf = getattr(sklearn.neural_network, neural_network_model_name)() diff --git a/tests/mlmodel_sklearn/test_svm_models.py b/tests/mlmodel_sklearn/test_svm_models.py index 194cda6cba..b4382cb8a4 100644 --- a/tests/mlmodel_sklearn/test_svm_models.py +++ b/tests/mlmodel_sklearn/test_svm_models.py @@ -63,7 +63,7 @@ def _run(svm_model_name): from sklearn.model_selection import train_test_split X, y = load_iris(return_X_y=True) - x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + x_train, x_test, y_train, _y_test = train_test_split(X, y, stratify=y, random_state=0) kwargs = {"random_state": 0} if svm_model_name in ["SVR", "NuSVR", "OneClassSVM"]: diff --git a/tests/testing_support/fixtures.py b/tests/testing_support/fixtures.py index d2c0e49663..00cfd7368d 100644 --- a/tests/testing_support/fixtures.py +++ b/tests/testing_support/fixtures.py @@ -23,8 +23,6 @@ from pathlib import Path from queue import Queue -import pytest - from newrelic.admin.record_deploy import record_deploy from newrelic.api.application import application_instance, application_settings, register_application from newrelic.api.ml_model import set_llm_token_count_callback @@ -47,6 +45,22 @@ _logger = logging.getLogger("newrelic.tests") +try: + import pytest +except ImportError: + # When running benchmarks, we don't use pytest. + # Instead, make these fixtures into functions and generators we can use manually. + pytest = type("pytest", (), {}) + + def fixture(func=None, **kwargs): + # Passthrough to make this transparent for benchmarks + if func: + return func + else: + return fixture + + pytest.fixture = fixture + def _environ_as_bool(name, default=False): flag = os.environ.get(name, default) @@ -188,7 +202,7 @@ def collector_agent_registration_fixture( linked_applications = linked_applications or [] @pytest.fixture(scope=scope) - def _collector_agent_registration_fixture(request): + def _collector_agent_registration_fixture(): if should_initialize_agent: initialize_agent(app_name=app_name, default_settings=default_settings) @@ -332,7 +346,7 @@ def _collector_agent_registration_fixture(request, settings_fixture): @pytest.fixture -def collector_available_fixture(request, collector_agent_registration): +def collector_available_fixture(collector_agent_registration): application = collector_agent_registration settings = global_settings() @@ -342,7 +356,7 @@ def collector_available_fixture(request, collector_agent_registration): time.sleep(0.1) timeout -= 0.1 - assert application.active, "Application failed to activate after 10 seconds." + assert application.active, f"Application failed to activate after {timeout} seconds." def raise_background_exceptions(timeout=5.0): @@ -1259,30 +1273,6 @@ def error_is_saved(error, app_name=None): return error_name in [e.type for e in errors if e.type == error_name] -def function_not_called(module, name): - """Verify that a function is not called. - - Assert False, if it is. - - """ - - called = [] - - @transient_function_wrapper(module, name) - def _function_not_called_(wrapped, instance, args, kwargs): - called.append(True) - return wrapped(*args, **kwargs) - - @function_wrapper - def wrapper(wrapped, instance, args, kwargs): - new_wrapper = _function_not_called_(wrapped) - result = new_wrapper(*args, **kwargs) - assert not called - return result - - return wrapper - - def validate_analytics_catmap_data(name, expected_attributes=(), non_expected_attributes=()): samples = [] diff --git a/tests/testing_support/ml_testing_utils.py b/tests/testing_support/ml_testing_utils.py index 0e7307bfb0..4ff70c7ed4 100644 --- a/tests/testing_support/ml_testing_utils.py +++ b/tests/testing_support/ml_testing_utils.py @@ -47,6 +47,15 @@ def events_sans_content(event): return new_event +def tool_events_sans_content(event): + new_event = copy.deepcopy(event) + for _event in new_event: + del _event[1]["input"] + if "output" in _event[1]: + del _event[1]["output"] + return new_event + + def events_sans_llm_metadata(expected_events): events = copy.deepcopy(expected_events) for event in events: diff --git a/tests/testing_support/validators/validate_custom_event_collector_json.py b/tests/testing_support/validators/validate_custom_event_collector_json.py index 0a7afb84db..5efe5cef3c 100644 --- a/tests/testing_support/validators/validate_custom_event_collector_json.py +++ b/tests/testing_support/validators/validate_custom_event_collector_json.py @@ -49,7 +49,7 @@ def _validate_custom_event_collector_json(wrapped, instance, args, kwargs): assert decoded_agent_run_id == agent_run_id assert decoded_sampling_info == sampling_info - max_setting = settings.event_harvest_config.harvest_limits.custom_event_data + max_setting = settings.custom_insights_events.max_samples_stored assert decoded_sampling_info["reservoir_size"] == max_setting assert decoded_sampling_info["events_seen"] == num_events diff --git a/tests/testing_support/validators/validate_database_trace_inputs.py b/tests/testing_support/validators/validate_database_trace_inputs.py index bf6c48f3c2..7aa624362f 100644 --- a/tests/testing_support/validators/validate_database_trace_inputs.py +++ b/tests/testing_support/validators/validate_database_trace_inputs.py @@ -35,7 +35,7 @@ def _bind_params( ): return (sql, dbapi2_module, connect_params, cursor_params, sql_parameters, execute_params, source) - (sql, dbapi2_module, connect_params, cursor_params, sql_parameters, execute_params, source) = _bind_params( + (_sql, dbapi2_module, connect_params, cursor_params, sql_parameters, execute_params, source) = _bind_params( *args, **kwargs ) diff --git a/tests/testing_support/validators/validate_datastore_trace_inputs.py b/tests/testing_support/validators/validate_datastore_trace_inputs.py index 0bf7228fad..3cb0bc81c6 100644 --- a/tests/testing_support/validators/validate_datastore_trace_inputs.py +++ b/tests/testing_support/validators/validate_datastore_trace_inputs.py @@ -30,13 +30,13 @@ def _bind_params(product, target, operation, host=None, port_path_or_id=None, da return (product, target, operation, host, port_path_or_id, database_name, kwargs) ( - captured_product, + _captured_product, captured_target, captured_operation, captured_host, captured_port_path_or_id, captured_database_name, - captured_kwargs, + _captured_kwargs, ) = _bind_params(*args, **kwargs) if target is not None: diff --git a/tests/testing_support/validators/validate_error_event_collector_json.py b/tests/testing_support/validators/validate_error_event_collector_json.py index 0fc0443682..9d6f8ef44e 100644 --- a/tests/testing_support/validators/validate_error_event_collector_json.py +++ b/tests/testing_support/validators/validate_error_event_collector_json.py @@ -45,8 +45,7 @@ def _validate_error_event_collector_json(wrapped, instance, args, kwargs): sampling_info = decoded_json[1] - harvest_config = instance.settings.event_harvest_config - reservoir_size = harvest_config.harvest_limits.error_event_data + reservoir_size = instance.settings.error_collector.max_event_samples_stored assert sampling_info["reservoir_size"] == reservoir_size assert sampling_info["events_seen"] == num_errors diff --git a/tests/testing_support/validators/validate_function_not_called.py b/tests/testing_support/validators/validate_function_not_called.py new file mode 100644 index 0000000000..5c8f44dc75 --- /dev/null +++ b/tests/testing_support/validators/validate_function_not_called.py @@ -0,0 +1,39 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from newrelic.common.object_wrapper import function_wrapper, transient_function_wrapper + + +def validate_function_not_called(module, name): + """Verify that a function is not called. + + Assert False, if it is. + + """ + + called = [] + + @transient_function_wrapper(module, name) + def _function_not_called_(wrapped, instance, args, kwargs): + called.append(True) + return wrapped(*args, **kwargs) + + @function_wrapper + def wrapper(wrapped, instance, args, kwargs): + new_wrapper = _function_not_called_(wrapped) + result = new_wrapper(*args, **kwargs) + assert not called + return result + + return wrapper diff --git a/tox.ini b/tox.ini index 98feeed514..d6ef7a3fa3 100644 --- a/tox.ini +++ b/tox.ini @@ -19,7 +19,7 @@ ; framework_aiohttp-aiohttp01: aiohttp<2 ; framework_aiohttp-aiohttp0202: aiohttp<2.3 ; 3. Python version required. Uses the standard tox definitions. (https://tox.readthedocs.io/en/latest/config.html#tox-environments) -; Examples: py37,py38,py39,pypy310 +; Examples: py38,py39,py310,py311,py312,py313,pypy310 ; 4. Library and version (Optional). Used when testing multiple versions of the library, and may be omitted when only testing a single version. ; Versions should be specified with 2 digits per version number, so <3 becomes 02 and <3.5 becomes 0304. latest and master are also acceptable versions. ; Examples: uvicorn03, CherryPy0302, uvicornlatest @@ -32,33 +32,32 @@ ; Examples: with_extensions, without_extensions ; envlist = ; linux-agent_features-pypy310-without_extensions, -; linux-agent_streaming-py37-{with,without}_extensions, +; linux-agent_streaming-py312-{with,without}_extensions, ; ; Full Format: ; services_required-tests_folder-python_version-library_and_version[optional]-with/without_c_extensions[optional] ; ; Full Examples: -; - memcached-datastore_bmemcached-py37-memcached030 +; - memcached-datastore_bmemcached-py313-memcached030 ; - linux-agent_unittests-py38-with_extensions ; - linux-adapter_gevent-py39 [tox] setupdir = {toxinidir} +uv_python_preference = only-managed +uv_seed = true ; Fail tests when interpreters are missing. skip_missing_interpreters = false envlist = # Linux Core Agent Test Suite - linux-agent_features-py37-{with,without}_extensions, {linux,linux_arm64}-agent_features-{py38,py39,py310,py311,py312,py313}-{with,without}_extensions, {linux,linux_arm64}-agent_features-pypy310-without_extensions, - linux-agent_streaming-py37-protobuf06-{with,without}_extensions, {linux,linux_arm64}-agent_streaming-{py38,py39,py310,py311,py312,py313}-protobuf06-{with,without}_extensions, {linux,linux_arm64}-agent_streaming-py39-protobuf{03,0319,04,05}-{with,without}_extensions, - linux-agent_unittests-py37-{with,without}_extensions, {linux,linux_arm64}-agent_unittests-{py38,py39,py310,py311,py312,py313}-{with,without}_extensions, {linux,linux_arm64}-agent_unittests-pypy310-without_extensions, - linux-cross_agent-py37-{with,without}_extensions, {linux,linux_arm64}-cross_agent-{py38,py39,py310,py311,py312,py313}-{with,without}_extensions, + {linux,linux_arm64}-cross_agent-pypy310-without_extensions, # Windows Core Agent Test Suite {windows,windows_arm64}-agent_features-py313-{with,without}_extensions, @@ -69,146 +68,146 @@ envlist = # Integration Tests (only run on Linux) cassandra-datastore_cassandradriver-{py38,py39,py310,py311,py312,pypy310}-cassandralatest, - elasticsearchserver07-datastore_elasticsearch-{py37,py38,py39,py310,py311,py312,py313,pypy310}-elasticsearch07, - elasticsearchserver08-datastore_elasticsearch-{py37,py38,py39,py310,py311,py312,py313,pypy310}-elasticsearch08, - firestore-datastore_firestore-{py37,py38,py39,py310,py311,py312,py313}, + elasticsearchserver07-datastore_elasticsearch-{py38,py39,py310,py311,py312,py313,pypy310}-elasticsearch07, + elasticsearchserver08-datastore_elasticsearch-{py38,py39,py310,py311,py312,py313,pypy310}-elasticsearch08, + firestore-datastore_firestore-{py38,py39,py310,py311,py312,py313}, grpc-framework_grpc-{py39,py310,py311,py312,py313}-grpclatest, kafka-messagebroker_confluentkafka-py39-confluentkafka{0108,0107,0106}, - kafka-messagebroker_confluentkafka-{py37,py38,py39,py310,py311,py312,py313}-confluentkafkalatest, - kafka-messagebroker_kafkapython-{py37,py38,py39,py310,py311,py312,py313,pypy310}-kafkapythonlatest, + kafka-messagebroker_confluentkafka-{py38,py39,py310,py311,py312,py313}-confluentkafkalatest, + kafka-messagebroker_kafkapython-{py38,py39,py310,py311,py312,py313,pypy310}-kafkapythonlatest, kafka-messagebroker_kafkapython-{py38,py39,py310,py311,py312,py313,pypy310}-kafkapythonnglatest, memcached-datastore_aiomcache-{py38,py39,py310,py311,py312,py313}, - memcached-datastore_bmemcached-{py37,py38,py39,py310,py311,py312,py313}, - memcached-datastore_memcache-{py37,py38,py39,py310,py311,py312,py313,pypy310}-memcached01, - memcached-datastore_pylibmc-py37, - memcached-datastore_pymemcache-{py37,py38,py39,py310,py311,py312,py313,pypy310}, - mongodb8-datastore_motor-{py37,py38,py39,py310,py311,py312,py313}-motorlatest, - mongodb3-datastore_pymongo-{py37,py38,py39,py310,py311,py312}-pymongo03, - mongodb8-datastore_pymongo-{py37,py38,py39,py310,py311,py312,py313,pypy310}-pymongo04, - mysql-datastore_aiomysql-{py37,py38,py39,py310,py311,py312,py313,pypy310}, + memcached-datastore_bmemcached-{py38,py39,py310,py311,py312,py313}, + memcached-datastore_memcache-{py38,py39,py310,py311,py312,py313,pypy310}-memcached01, + memcached-datastore_pylibmc-{py38,py39,py310,py311}, + memcached-datastore_pymemcache-{py38,py39,py310,py311,py312,py313,pypy310}, + mongodb8-datastore_motor-{py38,py39,py310,py311,py312,py313}-motorlatest, + mongodb3-datastore_pymongo-{py38,py39,py310,py311,py312}-pymongo03, + mongodb8-datastore_pymongo-{py38,py39,py310,py311,py312,py313,pypy310}-pymongo04, + ; aiomysql tests on PyPy disabled for now due to issues building cryptography + mysql-datastore_aiomysql-{py38,py39,py310,py311,py312,py313}, mssql-datastore_pymssql-pymssqllatest-{py39,py310,py311,py312,py313}, - mssql-datastore_pymssql-pymssql020301-{py37,py38}, - mysql-datastore_mysql-mysqllatest-{py37,py38,py39,py310,py311,py312,py313}, + mssql-datastore_pymssql-pymssql020301-py38, + mysql-datastore_mysql-mysqllatest-{py38,py39,py310,py311,py312,py313}, mysql-datastore_mysqldb-{py38,py39,py310,py311,py312,py313}, - mysql-datastore_pymysql-{py37,py38,py39,py310,py311,py312,py313,pypy310}, + ; pymysql tests on PyPy disabled for now due to issues building cryptography + mysql-datastore_pymysql-{py38,py39,py310,py311,py312,py313}, oracledb-datastore_oracledb-{py39,py310,py311,py312,py313}-oracledblatest, oracledb-datastore_oracledb-{py39,py313}-oracledb02, oracledb-datastore_oracledb-{py39,py312}-oracledb01, - nginx-external_httpx-{py37,py38,py39,py310,py311,py312,py313}, - postgres16-datastore_asyncpg-{py37,py38,py39,py310,py311,py312,py313}, + nginx-external_httpx-{py38,py39,py310,py311,py312,py313}, + postgres16-datastore_asyncpg-{py38,py39,py310,py311,py312,py313}, postgres16-datastore_psycopg-{py38,py39,py310,py311,py312,py313,pypy310}-psycopglatest, postgres16-datastore_psycopg-py312-psycopg_{purepython,binary,compiled}0301, - postgres16-datastore_psycopg2-{py37,py38,py39,py310,py311,py312}-psycopg2latest, - postgres16-datastore_psycopg2cffi-{py37,py38,py39,py310,py311,py312}-psycopg2cffilatest, - postgres16-datastore_pyodbc-{py37,py38,py39,py310,py311,py312,py313}-pyodbclatest, - postgres9-datastore_postgresql-{py37,py38,py39,py310,py311,py312,py313}, - linux-adapter_asgiref-{py37,py38,py39,py310,py311,py312,py313,pypy310}-asgireflatest, + postgres16-datastore_psycopg2-{py38,py39,py310,py311,py312}-psycopg2latest, + postgres16-datastore_psycopg2cffi-{py38,py39,py310,py311,py312}-psycopg2cffilatest, + postgres16-datastore_pyodbc-{py38,py39,py310,py311,py312,py313}-pyodbclatest, + postgres9-datastore_postgresql-{py38,py39,py310,py311,py312,py313}, + linux-adapter_asgiref-{py38,py39,py310,py311,py312,py313,pypy310}-asgireflatest, linux-adapter_asgiref-py310-asgiref{0303,0304,0305,0306,0307}, - linux-adapter_cheroot-{py37,py38,py39,py310,py311,py312,py313}, - linux-adapter_daphne-{py37,py38,py39,py310,py311,py312,py313}-daphnelatest, - linux-adapter_gevent-{py37,py38,py310,py311,py312,py313}, - linux-adapter_gunicorn-{py37,py38,py39,py310,py311,py312,py313}-aiohttp03-gunicornlatest, + linux-adapter_cheroot-{py38,py39,py310,py311,py312,py313}, + linux-adapter_daphne-{py38,py39,py310,py311,py312,py313}-daphnelatest, + linux-adapter_gevent-{py38,py310,py311,py312,py313}, + linux-adapter_gunicorn-{py38,py39,py310,py311,py312,py313}-aiohttp03-gunicornlatest, linux-adapter_hypercorn-{py38,py39,py310,py311,py312,py313}-hypercornlatest, linux-adapter_hypercorn-py38-hypercorn{0010,0011,0012,0013}, ; mcp tests on PyPy disabled for now due to issues building cryptography linux-adapter_mcp-{py310,py311,py312,py313}, - linux-adapter_uvicorn-{py37,py38,py39,py310,py311,py312,py313}-uvicornlatest, + linux-adapter_uvicorn-{py38,py39,py310,py311,py312,py313}-uvicornlatest, linux-adapter_uvicorn-py38-uvicorn014, - linux-adapter_waitress-{py37,py38,py39,py310,py311,py312,py313}-waitresslatest, - linux-application_celery-{py37,py38,py39,py310,py311,py312,py313,pypy310}-celerylatest, + linux-adapter_waitress-{py38,py39,py310,py311,py312,py313}-waitresslatest, + linux-application_celery-{py38,py39,py310,py311,py312,py313,pypy310}-celerylatest, linux-application_celery-py311-celery{0504,0503,0502}, - linux-component_djangorestframework-{py37,py38,py39,py310,py311,py312,py313}-djangorestframeworklatest, + linux-component_djangorestframework-{py38,py39,py310,py311,py312,py313}-djangorestframeworklatest, linux-component_flask_rest-{py38,py39,py310,py311,py312,py313,pypy310}-flaskrestxlatest, - linux-component_flask_rest-py37-flaskrestx110, - linux-component_graphqlserver-{py37,py38,py39,py310,py311,py312}, + linux-component_graphqlserver-{py38,py39,py310,py311,py312}, ;; Tests need to be updated to support newer graphql-server/sanic versions ; linux-component_graphqlserver-py313, - linux-component_tastypie-{py37,py38,py39,py310,py311,py312,py313,pypy310}-tastypielatest, - linux-coroutines_asyncio-{py37,py38,py39,py310,py311,py312,py313,pypy310}, - linux-datastore_sqlite-{py37,py38,py39,py310,py311,py312,py313,pypy310}, + linux-component_tastypie-{py38,py39,py310,py311,py312,py313,pypy310}-tastypielatest, + linux-coroutines_asyncio-{py38,py39,py310,py311,py312,py313,pypy310}, + linux-datastore_sqlite-{py38,py39,py310,py311,py312,py313,pypy310}, linux-external_aiobotocore-{py38,py39,py310,py311,py312,py313}-aiobotocorelatest, linux-external_botocore-{py38,py39,py310,py311,py312,py313}-botocorelatest, linux-external_botocore-{py311}-botocorelatest-langchain, linux-external_botocore-py310-botocore0125, linux-external_botocore-py311-botocore0128, - linux-external_feedparser-{py37,py38,py39,py310,py311,py312,py313}-feedparser06, - linux-external_http-{py37,py38,py39,py310,py311,py312,py313}, - linux-external_httplib-{py37,py38,py39,py310,py311,py312,py313,pypy310}, - linux-external_httplib2-{py37,py38,py39,py310,py311,py312,py313,pypy310}, - linux-external_requests-{py37,py38,py39,py310,py311,py312,py313,pypy310}, - linux-external_urllib3-{py37,py38,py39,py310,py311,py312,py313,pypy310}-urllib3latest, - linux-external_urllib3-{py37,py312,py313,pypy310}-urllib30126, - linux-framework_aiohttp-{py37,py38,py39,py310,py311,py312,py313,pypy310}-aiohttp03, - linux-framework_ariadne-{py37,py38,py39,py310,py311,py312,py313}-ariadnelatest, - linux-framework_ariadne-py37-ariadne{0011,0012,0013}, + linux-external_feedparser-{py38,py39,py310,py311,py312,py313}-feedparser06, + linux-external_http-{py38,py39,py310,py311,py312,py313}, + linux-external_httplib-{py38,py39,py310,py311,py312,py313,pypy310}, + linux-external_httplib2-{py38,py39,py310,py311,py312,py313,pypy310}, + # pyzeebe requires grpcio which does not support pypy + linux-external_pyzeebe-{py39,py310,py311,py312}, + linux-external_requests-{py38,py39,py310,py311,py312,py313,pypy310}, + linux-external_urllib3-{py38,py39,py310,py311,py312,py313,pypy310}-urllib3latest, + linux-external_urllib3-{py312,py313,pypy310}-urllib30126, + linux-framework_aiohttp-{py38,py39,py310,py311,py312,py313,pypy310}-aiohttp03, + linux-framework_ariadne-{py38,py39,py310,py311,py312,py313}-ariadnelatest, linux-framework_azurefunctions-{py39,py310,py311,py312}, - linux-framework_bottle-{py37,py38,py39,py310,py311,py312,py313,pypy310}-bottle0012, - linux-framework_cherrypy-{py37,py38,py39,py310,py311,py312,py313,pypy310}-CherryPylatest, - linux-framework_django-{py37,py38,py39,py310,py311,py312,py313}-Djangolatest, - linux-framework_django-{py39}-Django{0202,0300,0301,0302,0401}, - linux-framework_falcon-{py37,py38,py39,py310,py311,py312,py313,pypy310}-falconlatest, - linux-framework_falcon-{py38,py39,py310,py311,py312,py313,pypy310}-falconmaster, - linux-framework_fastapi-{py37,py38,py39,py310,py311,py312,py313}, - linux-framework_flask-py37-flask020205, + linux-framework_bottle-{py38,py39,py310,py311,py312,py313,pypy310}-bottle0012, + linux-framework_cherrypy-{py38,py39,py310,py311,py312,py313,pypy310}-CherryPylatest, + linux-framework_django-{py38,py39,py310,py311,py312,py313}-Djangolatest, + linux-framework_django-py39-Django{0202,0300,0301,0302,0401}, + linux-framework_falcon-{py39,py310,py311,py312,py313,pypy310}-falconlatest, + linux-framework_falcon-py38-falcon0410, + linux-framework_falcon-{py39,py310,py311,py312,py313,pypy310}-falconmaster, + linux-framework_fastapi-{py38,py39,py310,py311,py312,py313}, linux-framework_flask-{py38,py39,py310,py311,py312,pypy310}-flask02, - ; linux-framework_flask-py38-flaskmaster fails, even with Flask-Compress<1.16 and coverage==7.61 for py37,py38 + ; linux-framework_flask-py38-flaskmaster fails, even with Flask-Compress<1.16 and coverage==7.61 for py38 linux-framework_flask-py38-flasklatest, ; flaskmaster tests disabled until they can be fixed linux-framework_flask-{py39,py310,py311,py312,py313,pypy310}-flask{latest}, - linux-framework_graphene-{py37,py38,py39,py310,py311,py312,py313}-graphenelatest, - linux-component_graphenedjango-{py37,py38,py39,py310,py311,py312,py313}-graphenedjangolatest, - linux-framework_graphql-{py37,py38,py39,py310,py311,py312,py313,pypy310}-graphql03, - linux-framework_graphql-{py37,py38,py39,py310,py311,py312,py313,pypy310}-graphql{latest}, - linux-framework_graphql-py37-graphql{0301,0302}, - linux-framework_pyramid-{py37,py38,py39,py310,py311,py312,py313,pypy310}-Pyramidlatest, - linux-framework_pyramid-{py37,py38,py39,py310,py311,py312,py313,pypy310}-Pyramid0110-cornice, - linux-framework_sanic-{py37,py38}-sanic2406, + linux-framework_graphene-{py38,py39,py310,py311,py312,py313}-graphenelatest, + linux-component_graphenedjango-{py38,py39,py310,py311,py312,py313}-graphenedjangolatest, + linux-framework_graphql-{py38,py39,py310,py311,py312,py313,pypy310}-graphql03, + linux-framework_graphql-{py38,py39,py310,py311,py312,py313,pypy310}-graphqllatest, + linux-framework_pyramid-{py38,py39,py310,py311,py312,py313,pypy310}-Pyramidlatest, + linux-framework_pyramid-{py38,py39,py310,py311,py312,py313,pypy310}-Pyramid0110-cornice, + linux-framework_sanic-{py38}-sanic2406, linux-framework_sanic-{py39,py310,py311,py312,py313,pypy310}-saniclatest, - linux-framework_sanic-{py38,pypy310}-sanic{201207,2112,2290}, + linux-framework_sanic-{py38,pypy310}-sanic2290, linux-framework_starlette-{py310,pypy310}-starlette{0014,0015,0019,0028}, - linux-framework_starlette-{py37,py38,py39,py310,py311,py312,py313,pypy310}-starlettelatest, - linux-framework_starlette-{py37,py38}-starlette002001, + linux-framework_starlette-{py38,py39,py310,py311,py312,py313,pypy310}-starlettelatest, + linux-framework_starlette-{py38}-starlette002001, linux-framework_strawberry-{py38,py39,py310,py311,py312}-strawberry02352, - linux-framework_strawberry-{py37,py38,py39,py310,py311,py312,py313}-strawberrylatest, + linux-framework_strawberry-{py38,py39,py310,py311,py312,py313}-strawberrylatest, linux-framework_tornado-{py38,py39,py310,py311,py312,py313}-tornadolatest, linux-framework_tornado-{py310,py311,py312,py313}-tornadomaster, - linux-logger_logging-{py37,py38,py39,py310,py311,py312,py313,pypy310}, - linux-logger_loguru-{py37,py38,py39,py310,py311,py312,py313,pypy310}-logurulatest, - linux-logger_structlog-{py37,py38,py39,py310,py311,py312,py313,pypy310}-structloglatest, + linux-logger_logging-{py38,py39,py310,py311,py312,py313,pypy310}, + linux-logger_loguru-{py38,py39,py310,py311,py312,py313,pypy310}-logurulatest, + linux-logger_structlog-{py38,py39,py310,py311,py312,py313,pypy310}-structloglatest, + linux-mlmodel_autogen-{py310,py311,py312,py313,pypy310}-autogen061, + linux-mlmodel_autogen-{py310,py311,py312,py313,pypy310}-autogenlatest, linux-mlmodel_gemini-{py39,py310,py311,py312,py313}, linux-mlmodel_langchain-{py39,py310,py311,py312}, ;; Package not ready for Python 3.13 (uses an older version of numpy) ; linux-mlmodel_langchain-py313, - linux-mlmodel_openai-openai0-{py37,py38,py39,py310,py311,py312}, + linux-mlmodel_openai-openai0-{py38,py39,py310,py311,py312}, linux-mlmodel_openai-openai107-py312, - linux-mlmodel_openai-openailatest-{py37,py38,py39,py310,py311,py312,py313}, - linux-mlmodel_sklearn-{py37}-scikitlearn0101, + linux-mlmodel_openai-openailatest-{py38,py39,py310,py311,py312,py313}, linux-mlmodel_sklearn-{py38,py39,py310,py311,py312,py313}-scikitlearnlatest, - linux-template_genshi-{py37,py38,py39,py310,py311,py312,py313}-genshilatest, + linux-template_genshi-{py38,py39,py310,py311,py312,py313}-genshilatest, linux-template_jinja2-{py38,py39,py310,py311,py312,py313}-jinja2latest, - linux-template_jinja2-py37-jinja2030103, - linux-template_mako-{py37,py38,py39,py310,py311,py312,py313}, - rabbitmq-messagebroker_pika-{py37,py38,py39,py310,py311,py312,py313,pypy310}-pikalatest, + linux-template_mako-{py38,py39,py310,py311,py312,py313}, + rabbitmq-messagebroker_pika-{py38,py39,py310,py311,py312,py313,pypy310}-pikalatest, rabbitmq-messagebroker_kombu-{py38,py39,py310,py311,py312,py313,pypy310}-kombulatest, rabbitmq-messagebroker_kombu-{py38,py39,py310,pypy310}-kombu050204, - redis-datastore_redis-{py37,py38,py39,py310,py311,pypy310}-redis04, + redis-datastore_redis-{py38,py39,py310,py311,pypy310}-redis04, redis-datastore_redis-{py38,py39,py310,py311,py312,pypy310}-redis05, redis-datastore_redis-{py38,py39,py310,py311,py312,py313,pypy310}-redislatest, - rediscluster-datastore_rediscluster-{py37,py312,py313,pypy310}-redislatest, + rediscluster-datastore_rediscluster-{py312,py313,pypy310}-redislatest, valkey-datastore_valkey-{py38,py39,py310,py311,py312,py313,pypy310}-valkeylatest, - solr-datastore_pysolr-{py37,py38,py39,py310,py311,py312,py313,pypy310}, + solr-datastore_pysolr-{py38,py39,py310,py311,py312,py313,pypy310}, [testenv] deps = # Base Dependencies {py39,py310,py311,py312,py313,pypy310}: pytest==8.4.1 py38: pytest==8.3.5 - py37: pytest==7.4.4 iniconfig coverage {py39,py310,py311,py312,py313,pypy310}: WebTest==3.0.6 + py38: WebTest==3.0.1 py313: legacy-cgi==2.6.1 # cgi was removed from the stdlib in 3.13, and is required for WebTest - {py37,py38}: WebTest==3.0.1 # Test Suite Dependencies adapter_asgiref-asgireflatest: asgiref @@ -251,14 +250,12 @@ deps = application_celery-celery0504: celery[pytest]<5.5 application_celery-celery0503: celery[pytest]<5.4 application_celery-celery0502: celery[pytest]<5.3 - application_celery-{py37,pypy310}: importlib-metadata<5.0 + application_celery-pypy310: importlib-metadata<5.0 mlmodel_sklearn: pandas mlmodel_sklearn: protobuf mlmodel_sklearn: numpy mlmodel_sklearn-scikitlearnlatest: scikit-learn mlmodel_sklearn-scikitlearnlatest: scipy - mlmodel_sklearn-scikitlearn0101: scikit-learn<1.1 - mlmodel_sklearn-scikitlearn0101: scipy<1.11.0 component_djangorestframework-djangorestframeworklatest: Django component_djangorestframework-djangorestframeworklatest: djangorestframework component_flask_rest: flask-restful @@ -266,11 +263,6 @@ deps = component_flask_rest: itsdangerous component_flask_rest-flaskrestxlatest: flask component_flask_rest-flaskrestxlatest: flask-restx - ; flask-restx only supports Flask v3 after flask-restx v1.3.0 - component_flask_rest-flaskrestx110: Flask<3.0 - component_flask_rest-flaskrestx110: flask-restx<1.2 - component_flask_rest-flaskrestx051: Flask<3.0 - component_flask_rest-flaskrestx051: flask-restx<1.0 component_graphqlserver: graphql-server[sanic,flask]==3.0.0b5 component_graphqlserver: sanic>20 component_graphqlserver: Flask @@ -279,7 +271,7 @@ deps = component_tastypie-tastypielatest: django-tastypie component_tastypie-tastypielatest: django<4.1 component_tastypie-tastypielatest: asgiref<3.7.1 # asgiref==3.7.1 only suppport Python 3.10+ - coroutines_asyncio-{py37,py38,py39,py310,py311,py312,py313}: uvloop + coroutines_asyncio-{py38,py39,py310,py311,py312,py313}: uvloop cross_agent: requests datastore_asyncpg: asyncpg datastore_aiomcache: aiomcache @@ -347,14 +339,12 @@ deps = external_httpx: httpx[http2] external_requests: urllib3 external_requests: requests + external_pyzeebe: pyzeebe external_urllib3-urllib30126: urllib3<1.27 external_urllib3-urllib3latest: urllib3 framework_aiohttp-aiohttp03: aiohttp<4 framework_aiohttp-aiohttp030900rc0: aiohttp==3.9.0rc0 framework_ariadne-ariadnelatest: ariadne - framework_ariadne-ariadne0011: ariadne<0.12 - framework_ariadne-ariadne0012: ariadne<0.13 - framework_ariadne-ariadne0013: ariadne<0.14 framework_azurefunctions: azure-functions framework_azurefunctions: requests framework_bottle-bottle0012: bottle<0.13.0 @@ -369,7 +359,7 @@ deps = framework_django-Django0401: Django<4.2 framework_django-Djangolatest: Django framework_django-Djangomaster: https://github.com/django/django/archive/main.zip - framework_falcon-falcon0300: falcon<3.1 + framework_falcon-falcon0410: falcon<4.2 framework_falcon-falconlatest: falcon framework_falcon-falconmaster: https://github.com/falconry/falcon/archive/master.zip framework_fastapi: fastapi @@ -377,8 +367,6 @@ deps = framework_flask: Flask-Compress framework_flask-flask02: flask[async]<3 framework_flask-flask02: jinja2<3.1.3 - framework_flask-flask020205: jinja2<3.1.3 - framework_flask-flask020205: flask[async]<2.3 framework_flask-flasklatest: markupsafe framework_flask-flasklatest: jinja2 framework_flask-flasklatest: flask[async] @@ -387,11 +375,8 @@ deps = framework_flask-flaskmaster: asgiref framework_graphene-graphenelatest: graphene component_graphenedjango-graphenedjangolatest: graphene-django - framework_graphql-graphqllatest: graphql-core framework_graphql-graphql03: graphql-core<4 - framework_graphql-graphql0301: graphql-core<3.2 - framework_graphql-graphql0302: graphql-core<3.3 - framework_graphql-graphqlmaster: https://github.com/graphql-python/graphql-core/archive/main.zip + framework_graphql-graphqllatest: graphql-core framework_grpc-grpclatest: protobuf framework_grpc-grpclatest: grpcio framework_grpc-grpclatest: grpcio-tools @@ -401,14 +386,12 @@ deps = framework_pyramid: routes framework_pyramid-cornice: cornice!=5.0.0 framework_pyramid-Pyramidlatest: Pyramid - framework_sanic-sanic201207: sanic<20.12.8 - framework_sanic-sanic2112: sanic<21.13 framework_sanic-sanic2290: sanic<22.9.1 framework_sanic-sanic2406: sanic<24.07 framework_sanic-saniclatest: sanic - ; This is the last version of tracerite that supports Python 3.7 - framework_sanic-sanic2406: tracerite<1.1.2 - framework_sanic-sanic{201207,2112,2290}: websockets<11 + ; This is the last version of tracerite that supports Python 3.8 + framework_sanic-sanic{2290,2406}: tracerite<1.1.2 + framework_sanic-sanic2290: websockets<11 ; For test_exception_in_middleware test, anyio is used: ; https://github.com/encode/starlette/pull/1157 ; but anyiolatest creates breaking changes to our tests @@ -427,6 +410,13 @@ deps = framework_tornado: pycurl framework_tornado-tornadolatest: tornado framework_tornado-tornadomaster: https://github.com/tornadoweb/tornado/archive/master.zip + mlmodel_autogen-autogen061: autogen-agentchat<0.6.2 + mlmodel_autogen-autogen061: autogen-core<0.6.2 + mlmodel_autogen-autogen061: autogen-ext<0.6.2 + mlmodel_autogen-autogenlatest: autogen-core + mlmodel_autogen-autogenlatest: autogen-ext + mlmodel_autogen-autogenlatest: autogen-agentchat + mlmodel_autogen: mcp mlmodel_gemini: google-genai mlmodel_openai-openai0: openai[datalib]<1.0 mlmodel_openai-openai107: openai[datalib]<1.8 @@ -460,7 +450,6 @@ deps = messagebroker_kafkapython-kafkapythonlatest: kafka-python<2.1 template_genshi-genshilatest: genshi template_jinja2-jinja2latest: Jinja2 - template_jinja2-jinja2030103: Jinja2<3.1.4 template_mako: mako setenv = @@ -489,8 +478,6 @@ commands = framework_grpc: --python_out={toxinidir}/tests/framework_grpc/sample_application \ framework_grpc: --grpc_python_out={toxinidir}/tests/framework_grpc/sample_application \ framework_grpc: /{toxinidir}/tests/framework_grpc/sample_application/sample_application.proto - - framework_tornado: pip install --ignore-installed --config-settings="--build-option=--with-openssl" pycurl framework_azurefunctions: {toxinidir}/.github/scripts/install_azure_functions_worker.sh @@ -499,9 +486,6 @@ commands = allowlist_externals = {toxinidir}/.github/scripts/* -install_command= - pip install {opts} {packages} - extras = agent_streaming: infinite-tracing @@ -559,6 +543,7 @@ changedir = external_httplib: tests/external_httplib external_httplib2: tests/external_httplib2 external_httpx: tests/external_httpx + external_pyzeebe: tests/external_pyzeebe external_requests: tests/external_requests external_urllib3: tests/external_urllib3 framework_aiohttp: tests/framework_aiohttp @@ -586,6 +571,7 @@ changedir = messagebroker_kafkapython: tests/messagebroker_kafkapython messagebroker_kombu: tests/messagebroker_kombu messagebroker_pika: tests/messagebroker_pika + mlmodel_autogen: tests/mlmodel_autogen mlmodel_gemini: tests/mlmodel_gemini mlmodel_langchain: tests/mlmodel_langchain mlmodel_openai: tests/mlmodel_openai @@ -604,6 +590,8 @@ usefixtures = branch = True disable_warnings = couldnt-parse source = newrelic +omit = + **/_version.py [coverage:paths] source =