Skip to content

Commit 02ad97d

Browse files
Add ingest source to openai events (#961)
* Pin openai below 1.0 * Fixup * Add ingest_source to events * Remove duplicate test file * Handle 0.32.0.post1 version in tests (#963) --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
1 parent e407657 commit 02ad97d

File tree

6 files changed

+35
-72
lines changed

6 files changed

+35
-72
lines changed

newrelic/hooks/mlmodel_openai.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -119,6 +119,7 @@ def wrap_embedding_create(wrapped, instance, args, kwargs):
119119
response_headers, "x-ratelimit-remaining-requests", True
120120
),
121121
"vendor": "openAI",
122+
"ingest_source": "Python",
122123
}
123124

124125
transaction.record_ml_event("LlmEmbedding", embedding_dict)
@@ -208,6 +209,7 @@ def wrap_chat_completion_create(wrapped, instance, args, kwargs):
208209
response_headers, "x-ratelimit-remaining-requests", True
209210
),
210211
"vendor": "openAI",
212+
"ingest_source": "Python",
211213
"response.number_of_messages": len(messages) + len(choices),
212214
}
213215

@@ -283,6 +285,7 @@ def create_chat_completion_message_event(
283285
"sequence": index,
284286
"response.model": response_model,
285287
"vendor": "openAI",
288+
"ingest_source": "Python",
286289
}
287290
transaction.record_ml_event("LlmChatCompletionMessage", chat_completion_message_dict)
288291
return (conversation_id, request_id, message_ids)
@@ -362,6 +365,7 @@ async def wrap_embedding_acreate(wrapped, instance, args, kwargs):
362365
response_headers, "x-ratelimit-remaining-requests", True
363366
),
364367
"vendor": "openAI",
368+
"ingest_source": "Python",
365369
}
366370

367371
transaction.record_ml_event("LlmEmbedding", embedding_dict)
@@ -458,6 +462,7 @@ async def wrap_chat_completion_acreate(wrapped, instance, args, kwargs):
458462
response_headers, "x-ratelimit-remaining-requests", True
459463
),
460464
"vendor": "openAI",
465+
"ingest_source": "Python",
461466
}
462467

463468
transaction.record_ml_event("LlmChatCompletionSummary", chat_completion_summary_dict)

tests/framework_starlette/test_application.py

Lines changed: 18 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -17,13 +17,21 @@
1717
import pytest
1818
import starlette
1919
from testing_support.fixtures import override_ignore_status_codes
20+
from testing_support.validators.validate_code_level_metrics import (
21+
validate_code_level_metrics,
22+
)
23+
from testing_support.validators.validate_transaction_errors import (
24+
validate_transaction_errors,
25+
)
26+
from testing_support.validators.validate_transaction_metrics import (
27+
validate_transaction_metrics,
28+
)
2029

2130
from newrelic.common.object_names import callable_name
22-
from testing_support.validators.validate_code_level_metrics import validate_code_level_metrics
23-
from testing_support.validators.validate_transaction_errors import validate_transaction_errors
24-
from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics
31+
from newrelic.common.package_version_utils import get_package_version_tuple
32+
33+
starlette_version = get_package_version_tuple("starlette")[:3]
2534

26-
starlette_version = tuple(int(x) for x in starlette.__version__.split("."))
2735

2836
@pytest.fixture(scope="session")
2937
def target_application():
@@ -78,6 +86,7 @@ def test_application_non_async(target_application, app_name):
7886
response = app.get("/non_async")
7987
assert response.status == 200
8088

89+
8190
# Starting in Starlette v0.20.1, the ExceptionMiddleware class
8291
# has been moved to the starlette.middleware.exceptions from
8392
# starlette.exceptions
@@ -96,8 +105,10 @@ def test_application_non_async(target_application, app_name):
96105
),
97106
)
98107

108+
99109
@pytest.mark.parametrize(
100-
"app_name, transaction_name", middleware_test,
110+
"app_name, transaction_name",
111+
middleware_test,
101112
)
102113
def test_application_nonexistent_route(target_application, app_name, transaction_name):
103114
@validate_transaction_metrics(
@@ -117,10 +128,6 @@ def _test():
117128
def test_exception_in_middleware(target_application, app_name):
118129
app = target_application[app_name]
119130

120-
from starlette import __version__ as version
121-
122-
starlette_version = tuple(int(v) for v in version.split("."))
123-
124131
# Starlette >=0.15 and <0.17 raises an exception group instead of reraising the ValueError
125132
# This only occurs on Python versions >=3.8
126133
if sys.version_info[0:2] > (3, 7) and starlette_version >= (0, 15, 0) and starlette_version < (0, 17, 0):
@@ -272,9 +279,8 @@ def _test():
272279
),
273280
)
274281

275-
@pytest.mark.parametrize(
276-
"app_name,scoped_metrics", middleware_test_exception
277-
)
282+
283+
@pytest.mark.parametrize("app_name,scoped_metrics", middleware_test_exception)
278284
def test_starlette_http_exception(target_application, app_name, scoped_metrics):
279285
@validate_transaction_errors(errors=["starlette.exceptions:HTTPException"])
280286
@validate_transaction_metrics(

tests/framework_starlette/test_bg_tasks.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,15 +15,16 @@
1515
import sys
1616

1717
import pytest
18-
from starlette import __version__
1918
from testing_support.validators.validate_transaction_count import (
2019
validate_transaction_count,
2120
)
2221
from testing_support.validators.validate_transaction_metrics import (
2322
validate_transaction_metrics,
2423
)
2524

26-
starlette_version = tuple(int(x) for x in __version__.split("."))
25+
from newrelic.common.package_version_utils import get_package_version_tuple
26+
27+
starlette_version = get_package_version_tuple("starlette")[:3]
2728

2829
try:
2930
from starlette.middleware import Middleware # noqa: F401

tests/mlmodel_openai/test_chat_completion.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,7 @@
6464
"response.headers.ratelimitRemainingTokens": 39940,
6565
"response.headers.ratelimitRemainingRequests": 199,
6666
"vendor": "openAI",
67+
"ingest_source": "Python",
6768
"response.number_of_messages": 3,
6869
},
6970
),
@@ -83,6 +84,7 @@
8384
"sequence": 0,
8485
"response.model": "gpt-3.5-turbo-0613",
8586
"vendor": "openAI",
87+
"ingest_source": "Python",
8688
},
8789
),
8890
(
@@ -101,6 +103,7 @@
101103
"sequence": 1,
102104
"response.model": "gpt-3.5-turbo-0613",
103105
"vendor": "openAI",
106+
"ingest_source": "Python",
104107
},
105108
),
106109
(
@@ -119,6 +122,7 @@
119122
"sequence": 2,
120123
"response.model": "gpt-3.5-turbo-0613",
121124
"vendor": "openAI",
125+
"ingest_source": "Python",
122126
},
123127
),
124128
]
@@ -175,6 +179,7 @@ def test_openai_chat_completion_sync_in_txn_with_convo_id(set_trace_info):
175179
"response.headers.ratelimitRemainingTokens": 39940,
176180
"response.headers.ratelimitRemainingRequests": 199,
177181
"vendor": "openAI",
182+
"ingest_source": "Python",
178183
"response.number_of_messages": 3,
179184
},
180185
),
@@ -194,6 +199,7 @@ def test_openai_chat_completion_sync_in_txn_with_convo_id(set_trace_info):
194199
"sequence": 0,
195200
"response.model": "gpt-3.5-turbo-0613",
196201
"vendor": "openAI",
202+
"ingest_source": "Python",
197203
},
198204
),
199205
(
@@ -212,6 +218,7 @@ def test_openai_chat_completion_sync_in_txn_with_convo_id(set_trace_info):
212218
"sequence": 1,
213219
"response.model": "gpt-3.5-turbo-0613",
214220
"vendor": "openAI",
221+
"ingest_source": "Python",
215222
},
216223
),
217224
(
@@ -230,6 +237,7 @@ def test_openai_chat_completion_sync_in_txn_with_convo_id(set_trace_info):
230237
"sequence": 2,
231238
"response.model": "gpt-3.5-turbo-0613",
232239
"vendor": "openAI",
240+
"ingest_source": "Python",
233241
},
234242
),
235243
]

tests/mlmodel_openai/test_embeddings.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@
5555
"response.headers.ratelimitRemainingTokens": 149994,
5656
"response.headers.ratelimitRemainingRequests": 197,
5757
"vendor": "openAI",
58+
"ingest_source": "Python",
5859
},
5960
),
6061
]

tests/mlmodel_openai/test_error.py

Lines changed: 0 additions & 58 deletions
This file was deleted.

0 commit comments

Comments
 (0)