Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions newrelic/hooks/mlmodel_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,7 @@ def wrap_embedding_create(wrapped, instance, args, kwargs):
response_headers, "x-ratelimit-remaining-requests", True
),
"vendor": "openAI",
"ingest_source": "Python",
}

transaction.record_ml_event("LlmEmbedding", embedding_dict)
Expand Down Expand Up @@ -208,6 +209,7 @@ def wrap_chat_completion_create(wrapped, instance, args, kwargs):
response_headers, "x-ratelimit-remaining-requests", True
),
"vendor": "openAI",
"ingest_source": "Python",
"response.number_of_messages": len(messages) + len(choices),
}

Expand Down Expand Up @@ -283,6 +285,7 @@ def create_chat_completion_message_event(
"sequence": index,
"response.model": response_model,
"vendor": "openAI",
"ingest_source": "Python",
}
transaction.record_ml_event("LlmChatCompletionMessage", chat_completion_message_dict)
return (conversation_id, request_id, message_ids)
Expand Down Expand Up @@ -362,6 +365,7 @@ async def wrap_embedding_acreate(wrapped, instance, args, kwargs):
response_headers, "x-ratelimit-remaining-requests", True
),
"vendor": "openAI",
"ingest_source": "Python",
}

transaction.record_ml_event("LlmEmbedding", embedding_dict)
Expand Down Expand Up @@ -458,6 +462,7 @@ async def wrap_chat_completion_acreate(wrapped, instance, args, kwargs):
response_headers, "x-ratelimit-remaining-requests", True
),
"vendor": "openAI",
"ingest_source": "Python",
}

transaction.record_ml_event("LlmChatCompletionSummary", chat_completion_summary_dict)
Expand Down
30 changes: 18 additions & 12 deletions tests/framework_starlette/test_application.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,21 @@
import pytest
import starlette
from testing_support.fixtures import override_ignore_status_codes
from testing_support.validators.validate_code_level_metrics import (
validate_code_level_metrics,
)
from testing_support.validators.validate_transaction_errors import (
validate_transaction_errors,
)
from testing_support.validators.validate_transaction_metrics import (
validate_transaction_metrics,
)

from newrelic.common.object_names import callable_name
from testing_support.validators.validate_code_level_metrics import validate_code_level_metrics
from testing_support.validators.validate_transaction_errors import validate_transaction_errors
from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics
from newrelic.common.package_version_utils import get_package_version_tuple

starlette_version = get_package_version_tuple("starlette")[:3]

starlette_version = tuple(int(x) for x in starlette.__version__.split("."))

@pytest.fixture(scope="session")
def target_application():
Expand Down Expand Up @@ -78,6 +86,7 @@ def test_application_non_async(target_application, app_name):
response = app.get("/non_async")
assert response.status == 200


# Starting in Starlette v0.20.1, the ExceptionMiddleware class
# has been moved to the starlette.middleware.exceptions from
# starlette.exceptions
Expand All @@ -96,8 +105,10 @@ def test_application_non_async(target_application, app_name):
),
)


@pytest.mark.parametrize(
"app_name, transaction_name", middleware_test,
"app_name, transaction_name",
middleware_test,
)
def test_application_nonexistent_route(target_application, app_name, transaction_name):
@validate_transaction_metrics(
Expand All @@ -117,10 +128,6 @@ def _test():
def test_exception_in_middleware(target_application, app_name):
app = target_application[app_name]

from starlette import __version__ as version

starlette_version = tuple(int(v) for v in version.split("."))

# Starlette >=0.15 and <0.17 raises an exception group instead of reraising the ValueError
# This only occurs on Python versions >=3.8
if sys.version_info[0:2] > (3, 7) and starlette_version >= (0, 15, 0) and starlette_version < (0, 17, 0):
Expand Down Expand Up @@ -272,9 +279,8 @@ def _test():
),
)

@pytest.mark.parametrize(
"app_name,scoped_metrics", middleware_test_exception
)

@pytest.mark.parametrize("app_name,scoped_metrics", middleware_test_exception)
def test_starlette_http_exception(target_application, app_name, scoped_metrics):
@validate_transaction_errors(errors=["starlette.exceptions:HTTPException"])
@validate_transaction_metrics(
Expand Down
5 changes: 3 additions & 2 deletions tests/framework_starlette/test_bg_tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,16 @@
import sys

import pytest
from starlette import __version__
from testing_support.validators.validate_transaction_count import (
validate_transaction_count,
)
from testing_support.validators.validate_transaction_metrics import (
validate_transaction_metrics,
)

starlette_version = tuple(int(x) for x in __version__.split("."))
from newrelic.common.package_version_utils import get_package_version_tuple

starlette_version = get_package_version_tuple("starlette")[:3]

try:
from starlette.middleware import Middleware # noqa: F401
Expand Down
8 changes: 8 additions & 0 deletions tests/mlmodel_openai/test_chat_completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@
"response.headers.ratelimitRemainingTokens": 39940,
"response.headers.ratelimitRemainingRequests": 199,
"vendor": "openAI",
"ingest_source": "Python",
"response.number_of_messages": 3,
},
),
Expand All @@ -83,6 +84,7 @@
"sequence": 0,
"response.model": "gpt-3.5-turbo-0613",
"vendor": "openAI",
"ingest_source": "Python",
},
),
(
Expand All @@ -101,6 +103,7 @@
"sequence": 1,
"response.model": "gpt-3.5-turbo-0613",
"vendor": "openAI",
"ingest_source": "Python",
},
),
(
Expand All @@ -119,6 +122,7 @@
"sequence": 2,
"response.model": "gpt-3.5-turbo-0613",
"vendor": "openAI",
"ingest_source": "Python",
},
),
]
Expand Down Expand Up @@ -175,6 +179,7 @@ def test_openai_chat_completion_sync_in_txn_with_convo_id(set_trace_info):
"response.headers.ratelimitRemainingTokens": 39940,
"response.headers.ratelimitRemainingRequests": 199,
"vendor": "openAI",
"ingest_source": "Python",
"response.number_of_messages": 3,
},
),
Expand All @@ -194,6 +199,7 @@ def test_openai_chat_completion_sync_in_txn_with_convo_id(set_trace_info):
"sequence": 0,
"response.model": "gpt-3.5-turbo-0613",
"vendor": "openAI",
"ingest_source": "Python",
},
),
(
Expand All @@ -212,6 +218,7 @@ def test_openai_chat_completion_sync_in_txn_with_convo_id(set_trace_info):
"sequence": 1,
"response.model": "gpt-3.5-turbo-0613",
"vendor": "openAI",
"ingest_source": "Python",
},
),
(
Expand All @@ -230,6 +237,7 @@ def test_openai_chat_completion_sync_in_txn_with_convo_id(set_trace_info):
"sequence": 2,
"response.model": "gpt-3.5-turbo-0613",
"vendor": "openAI",
"ingest_source": "Python",
},
),
]
Expand Down
1 change: 1 addition & 0 deletions tests/mlmodel_openai/test_embeddings.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@
"response.headers.ratelimitRemainingTokens": 149994,
"response.headers.ratelimitRemainingRequests": 197,
"vendor": "openAI",
"ingest_source": "Python",
},
),
]
Expand Down
58 changes: 0 additions & 58 deletions tests/mlmodel_openai/test_error.py

This file was deleted.