diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/instrument-with-options.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/instrument-with-options.mjs new file mode 100644 index 000000000000..35f97fd84093 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/openai/instrument-with-options.mjs @@ -0,0 +1,16 @@ +import * as Sentry from '@sentry/node'; +import { loggingTransport } from '@sentry-internal/node-integration-tests'; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + release: '1.0', + tracesSampleRate: 1.0, + sendDefaultPii: false, + transport: loggingTransport, + integrations: [ + Sentry.openAIIntegration({ + recordInputs: true, + recordOutputs: true, + }), + ], +}); diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/instrument-with-pii.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/instrument-with-pii.mjs new file mode 100644 index 000000000000..a53a13af7738 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/openai/instrument-with-pii.mjs @@ -0,0 +1,11 @@ +import * as Sentry from '@sentry/node'; +import { loggingTransport } from '@sentry-internal/node-integration-tests'; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + release: '1.0', + tracesSampleRate: 1.0, + sendDefaultPii: true, + transport: loggingTransport, + integrations: [Sentry.openAIIntegration()], +}); diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/instrument.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/instrument.mjs new file mode 100644 index 000000000000..f3fbac9d1274 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/openai/instrument.mjs @@ -0,0 +1,11 @@ +import * as Sentry from '@sentry/node'; +import { loggingTransport } from '@sentry-internal/node-integration-tests'; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + release: '1.0', + tracesSampleRate: 1.0, + sendDefaultPii: false, + transport: loggingTransport, + integrations: [Sentry.openAIIntegration()], +}); diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/scenario.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/scenario.mjs new file mode 100644 index 000000000000..3958517bea40 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/openai/scenario.mjs @@ -0,0 +1,108 @@ +import { instrumentOpenAiClient } from '@sentry/core'; +import * as Sentry from '@sentry/node'; + +class MockOpenAI { + constructor(config) { + this.apiKey = config.apiKey; + + this.chat = { + completions: { + create: async params => { + // Simulate processing time + await new Promise(resolve => setTimeout(resolve, 10)); + + if (params.model === 'error-model') { + const error = new Error('Model not found'); + error.status = 404; + error.headers = { 'x-request-id': 'mock-request-123' }; + throw error; + } + + return { + id: 'chatcmpl-mock123', + object: 'chat.completion', + created: 1677652288, + model: params.model, + system_fingerprint: 'fp_44709d6fcb', + choices: [ + { + index: 0, + message: { + role: 'assistant', + content: 'Hello from OpenAI mock!', + }, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 10, + completion_tokens: 15, + total_tokens: 25, + }, + }; + }, + }, + }; + + this.responses = { + create: async params => { + await new Promise(resolve => setTimeout(resolve, 10)); + + return { + id: 'resp_mock456', + object: 'response', + created: 1677652290, + model: params.model, + input_text: params.input, + output_text: `Response to: ${params.input}`, + finish_reason: 'stop', + usage: { + input_tokens: 5, + output_tokens: 8, + total_tokens: 13, + }, + }; + }, + }; + } +} + +async function run() { + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { + const mockClient = new MockOpenAI({ + apiKey: 'mock-api-key', + }); + + const client = instrumentOpenAiClient(mockClient); + + // First test: basic chat completion + await client.chat.completions.create({ + model: 'gpt-3.5-turbo', + messages: [ + { role: 'system', content: 'You are a helpful assistant.' }, + { role: 'user', content: 'What is the capital of France?' }, + ], + temperature: 0.7, + max_tokens: 100, + }); + + // Second test: responses API + await client.responses.create({ + model: 'gpt-3.5-turbo', + input: 'Translate this to French: Hello', + instructions: 'You are a translator', + }); + + // Third test: error handling + try { + await client.chat.completions.create({ + model: 'error-model', + messages: [{ role: 'user', content: 'This will fail' }], + }); + } catch { + // Error is expected and handled + } + }); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/test.ts new file mode 100644 index 000000000000..ec6f97a6aa00 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/openai/test.ts @@ -0,0 +1,183 @@ +import { afterAll, describe, expect } from 'vitest'; +import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../utils/runner'; + +describe('OpenAI integration', () => { + afterAll(() => { + cleanupChildProcesses(); + }); + + const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE = { + transaction: 'main', + spans: expect.arrayContaining([ + // First span - basic chat completion without PII + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', + 'sentry.origin': 'manual', + 'gen_ai.system': 'openai', + 'gen_ai.request.model': 'gpt-3.5-turbo', + 'gen_ai.request.temperature': 0.7, + 'gen_ai.response.model': 'gpt-3.5-turbo', + 'gen_ai.response.id': 'chatcmpl-mock123', + 'gen_ai.response.finish_reasons': '["stop"]', + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 15, + 'gen_ai.usage.total_tokens': 25, + 'openai.response.id': 'chatcmpl-mock123', + 'openai.response.model': 'gpt-3.5-turbo', + 'openai.response.timestamp': '2023-03-01T06:31:28.000Z', + 'openai.usage.completion_tokens': 15, + 'openai.usage.prompt_tokens': 10, + }, + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', + origin: 'manual', + status: 'ok', + }), + // Second span - responses API + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', + 'sentry.origin': 'manual', + 'gen_ai.system': 'openai', + 'gen_ai.request.model': 'gpt-3.5-turbo', + 'gen_ai.response.model': 'gpt-3.5-turbo', + 'gen_ai.response.id': 'resp_mock456', + 'gen_ai.usage.input_tokens': 5, + 'gen_ai.usage.output_tokens': 8, + 'gen_ai.usage.total_tokens': 13, + 'openai.response.id': 'resp_mock456', + 'openai.response.model': 'gpt-3.5-turbo', + 'openai.usage.completion_tokens': 8, + 'openai.usage.prompt_tokens': 5, + }, + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', + origin: 'manual', + status: 'ok', + }), + // Third span - error handling + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', + 'sentry.origin': 'manual', + 'gen_ai.system': 'openai', + 'gen_ai.request.model': 'error-model', + }, + description: 'chat error-model', + op: 'gen_ai.chat', + origin: 'manual', + status: 'unknown_error', + }), + ]), + }; + + const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = { + transaction: 'main', + spans: expect.arrayContaining([ + // First span - basic chat completion with PII + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', + 'sentry.origin': 'manual', + 'gen_ai.system': 'openai', + 'gen_ai.request.model': 'gpt-3.5-turbo', + 'gen_ai.request.temperature': 0.7, + 'gen_ai.request.messages': + '[{"role":"system","content":"You are a helpful assistant."},{"role":"user","content":"What is the capital of France?"}]', + 'gen_ai.response.model': 'gpt-3.5-turbo', + 'gen_ai.response.id': 'chatcmpl-mock123', + 'gen_ai.response.finish_reasons': '["stop"]', + 'gen_ai.response.text': '["Hello from OpenAI mock!"]', + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 15, + 'gen_ai.usage.total_tokens': 25, + 'openai.response.id': 'chatcmpl-mock123', + 'openai.response.model': 'gpt-3.5-turbo', + 'openai.response.timestamp': '2023-03-01T06:31:28.000Z', + 'openai.usage.completion_tokens': 15, + 'openai.usage.prompt_tokens': 10, + }, + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', + origin: 'manual', + status: 'ok', + }), + // Second span - responses API with PII + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', + 'sentry.origin': 'manual', + 'gen_ai.system': 'openai', + 'gen_ai.request.model': 'gpt-3.5-turbo', + 'gen_ai.request.messages': '"Translate this to French: Hello"', + 'gen_ai.response.text': 'Response to: Translate this to French: Hello', + 'gen_ai.response.model': 'gpt-3.5-turbo', + 'gen_ai.response.id': 'resp_mock456', + 'gen_ai.usage.input_tokens': 5, + 'gen_ai.usage.output_tokens': 8, + 'gen_ai.usage.total_tokens': 13, + 'openai.response.id': 'resp_mock456', + 'openai.response.model': 'gpt-3.5-turbo', + 'openai.usage.completion_tokens': 8, + 'openai.usage.prompt_tokens': 5, + }, + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', + origin: 'manual', + status: 'ok', + }), + // Third span - error handling with PII + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', + 'sentry.origin': 'manual', + 'gen_ai.system': 'openai', + 'gen_ai.request.model': 'error-model', + 'gen_ai.request.messages': '[{"role":"user","content":"This will fail"}]', + }, + description: 'chat error-model', + op: 'gen_ai.chat', + origin: 'manual', + status: 'unknown_error', + }), + ]), + }; + + const EXPECTED_TRANSACTION_WITH_OPTIONS = { + transaction: 'main', + spans: expect.arrayContaining([ + // Check that custom options are respected + expect.objectContaining({ + data: expect.objectContaining({ + 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true + 'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true + }), + }), + ]), + }; + + createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument.mjs', (createRunner, test) => { + test('creates openai related spans with sendDefaultPii: false', async () => { + await createRunner().expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE }).start().completed(); + }); + }); + + createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument-with-pii.mjs', (createRunner, test) => { + test('creates openai related spans with sendDefaultPii: true', async () => { + await createRunner().expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE }).start().completed(); + }); + }); + + createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument-with-options.mjs', (createRunner, test) => { + test('creates openai related spans with custom options', async () => { + await createRunner().expect({ transaction: EXPECTED_TRANSACTION_WITH_OPTIONS }).start().completed(); + }); + }); +}); diff --git a/dev-packages/rollup-utils/npmHelpers.mjs b/dev-packages/rollup-utils/npmHelpers.mjs index 83053aaeea98..cff113d622d6 100644 --- a/dev-packages/rollup-utils/npmHelpers.mjs +++ b/dev-packages/rollup-utils/npmHelpers.mjs @@ -93,7 +93,7 @@ export function makeBaseNPMConfig(options = {}) { } return true; - } + }, }, plugins: [nodeResolvePlugin, sucrasePlugin, debugBuildStatementReplacePlugin, rrwebBuildPlugin, cleanupPlugin], diff --git a/packages/astro/src/index.server.ts b/packages/astro/src/index.server.ts index 1139f58d092c..0b92c8a4a6f8 100644 --- a/packages/astro/src/index.server.ts +++ b/packages/astro/src/index.server.ts @@ -84,6 +84,7 @@ export { nodeContextIntegration, onUncaughtExceptionIntegration, onUnhandledRejectionIntegration, + openAIIntegration, parameterize, postgresIntegration, postgresJsIntegration, diff --git a/packages/aws-serverless/src/index.ts b/packages/aws-serverless/src/index.ts index 7d2abeaf6a12..7cf8e17f0dd7 100644 --- a/packages/aws-serverless/src/index.ts +++ b/packages/aws-serverless/src/index.ts @@ -51,6 +51,7 @@ export { nativeNodeFetchIntegration, onUncaughtExceptionIntegration, onUnhandledRejectionIntegration, + openAIIntegration, modulesIntegration, contextLinesIntegration, nodeContextIntegration, diff --git a/packages/bun/src/index.ts b/packages/bun/src/index.ts index d027539931cc..024e3e3af5e8 100644 --- a/packages/bun/src/index.ts +++ b/packages/bun/src/index.ts @@ -71,6 +71,7 @@ export { nativeNodeFetchIntegration, onUncaughtExceptionIntegration, onUnhandledRejectionIntegration, + openAIIntegration, modulesIntegration, contextLinesIntegration, nodeContextIntegration, diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts index 8852e2c9293f..984b8c2e7fd4 100644 --- a/packages/core/src/index.ts +++ b/packages/core/src/index.ts @@ -124,7 +124,9 @@ export type { ReportDialogOptions } from './report-dialog'; export { _INTERNAL_captureLog, _INTERNAL_flushLogsBuffer, _INTERNAL_captureSerializedLog } from './logs/exports'; export { consoleLoggingIntegration } from './logs/console-integration'; export { addVercelAiProcessors } from './utils/vercel-ai'; - +export { instrumentOpenAiClient } from './utils/openai'; +export { OPENAI_INTEGRATION_NAME } from './utils/openai/constants'; +export type { OpenAiClient, OpenAiOptions, InstrumentedMethod } from './utils/openai/types'; export type { FeatureFlag } from './utils/featureFlags'; export { _INTERNAL_copyFlagsFromScopeToEvent, diff --git a/packages/core/src/utils/gen-ai-attributes.ts b/packages/core/src/utils/gen-ai-attributes.ts new file mode 100644 index 000000000000..cf8a073a4313 --- /dev/null +++ b/packages/core/src/utils/gen-ai-attributes.ts @@ -0,0 +1,148 @@ +/** + * OpenAI Integration Telemetry Attributes + * Based on OpenTelemetry Semantic Conventions for Generative AI + * @see https://opentelemetry.io/docs/specs/semconv/gen-ai/ + */ + +// ============================================================================= +// OPENTELEMETRY SEMANTIC CONVENTIONS FOR GENAI +// ============================================================================= + +/** + * The Generative AI system being used + * For OpenAI, this should always be "openai" + */ +export const GEN_AI_SYSTEM_ATTRIBUTE = 'gen_ai.system'; + +/** + * The name of the model as requested + * Examples: "gpt-4", "gpt-3.5-turbo" + */ +export const GEN_AI_REQUEST_MODEL_ATTRIBUTE = 'gen_ai.request.model'; + +/** + * Whether streaming was enabled for the request + */ +export const GEN_AI_REQUEST_STREAM_ATTRIBUTE = 'gen_ai.request.stream'; + +/** + * The temperature setting for the model request + */ +export const GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE = 'gen_ai.request.temperature'; + +/** + * The maximum number of tokens requested + */ +export const GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE = 'gen_ai.request.max_tokens'; + +/** + * The frequency penalty setting for the model request + */ +export const GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE = 'gen_ai.request.frequency_penalty'; + +/** + * The presence penalty setting for the model request + */ +export const GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE = 'gen_ai.request.presence_penalty'; + +/** + * The top_p (nucleus sampling) setting for the model request + */ +export const GEN_AI_REQUEST_TOP_P_ATTRIBUTE = 'gen_ai.request.top_p'; + +/** + * The top_k setting for the model request + */ +export const GEN_AI_REQUEST_TOP_K_ATTRIBUTE = 'gen_ai.request.top_k'; + +/** + * Stop sequences for the model request + */ +export const GEN_AI_REQUEST_STOP_SEQUENCES_ATTRIBUTE = 'gen_ai.request.stop_sequences'; + +/** + * Array of reasons why the model stopped generating tokens + */ +export const GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE = 'gen_ai.response.finish_reasons'; + +/** + * The name of the model that generated the response + */ +export const GEN_AI_RESPONSE_MODEL_ATTRIBUTE = 'gen_ai.response.model'; + +/** + * The unique identifier for the response + */ +export const GEN_AI_RESPONSE_ID_ATTRIBUTE = 'gen_ai.response.id'; + +/** + * The number of tokens used in the prompt + */ +export const GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE = 'gen_ai.usage.input_tokens'; + +/** + * The number of tokens used in the response + */ +export const GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE = 'gen_ai.usage.output_tokens'; + +/** + * The total number of tokens used (input + output) + */ +export const GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE = 'gen_ai.usage.total_tokens'; + +/** + * The operation name for OpenAI API calls + */ +export const GEN_AI_OPERATION_NAME_ATTRIBUTE = 'gen_ai.operation.name'; + +/** + * The prompt messages sent to OpenAI (stringified JSON) + * Only recorded when recordInputs is enabled + */ +export const GEN_AI_REQUEST_MESSAGES_ATTRIBUTE = 'gen_ai.request.messages'; + +/** + * The response text from OpenAI (stringified JSON array) + * Only recorded when recordOutputs is enabled + */ +export const GEN_AI_RESPONSE_TEXT_ATTRIBUTE = 'gen_ai.response.text'; + +// ============================================================================= +// OPENAI-SPECIFIC ATTRIBUTES +// ============================================================================= + +/** + * The response ID from OpenAI + */ +export const OPENAI_RESPONSE_ID_ATTRIBUTE = 'openai.response.id'; + +/** + * The response model from OpenAI + */ +export const OPENAI_RESPONSE_MODEL_ATTRIBUTE = 'openai.response.model'; + +/** + * The response timestamp from OpenAI (ISO string) + */ +export const OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE = 'openai.response.timestamp'; + +/** + * The number of completion tokens used (OpenAI specific) + */ +export const OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE = 'openai.usage.completion_tokens'; + +/** + * The number of prompt tokens used (OpenAI specific) + */ +export const OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE = 'openai.usage.prompt_tokens'; + +// ============================================================================= +// OPENAI OPERATIONS +// ============================================================================= + +/** + * OpenAI API operations + */ +export const OPENAI_OPERATIONS = { + CHAT: 'chat', +} as const; diff --git a/packages/core/src/utils/openai/constants.ts b/packages/core/src/utils/openai/constants.ts new file mode 100644 index 000000000000..e552616cc1db --- /dev/null +++ b/packages/core/src/utils/openai/constants.ts @@ -0,0 +1,5 @@ +export const OPENAI_INTEGRATION_NAME = 'OpenAI'; + +// https://platform.openai.com/docs/quickstart?api-mode=responses +// https://platform.openai.com/docs/quickstart?api-mode=chat +export const INSTRUMENTED_METHODS = ['responses.create', 'chat.completions.create'] as const; diff --git a/packages/core/src/utils/openai/index.ts b/packages/core/src/utils/openai/index.ts new file mode 100644 index 000000000000..2b5fdbef9c11 --- /dev/null +++ b/packages/core/src/utils/openai/index.ts @@ -0,0 +1,282 @@ +import { getCurrentScope } from '../../currentScopes'; +import { captureException } from '../../exports'; +import { startSpan } from '../../tracing/trace'; +import type { Span, SpanAttributeValue } from '../../types-hoist/span'; +import { + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE, + GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_REQUEST_TOP_P_ATTRIBUTE, + GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_RESPONSE_ID_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_TEXT_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, + OPENAI_RESPONSE_ID_ATTRIBUTE, + OPENAI_RESPONSE_MODEL_ATTRIBUTE, + OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE, + OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, + OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE, +} from '../gen-ai-attributes'; +import { OPENAI_INTEGRATION_NAME } from './constants'; +import type { + InstrumentedMethod, + OpenAiChatCompletionObject, + OpenAiClient, + OpenAiIntegration, + OpenAiOptions, + OpenAiResponse, + OpenAIResponseObject, +} from './types'; +import { + buildMethodPath, + getOperationName, + getSpanOperation, + isChatCompletionResponse, + isResponsesApiResponse, + shouldInstrument, +} from './utils'; + +/** + * Extract request attributes from method arguments + */ +function extractRequestAttributes(args: unknown[], methodPath: string): Record { + const attributes: Record = { + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: getOperationName(methodPath), + }; + + if (args.length > 0 && typeof args[0] === 'object' && args[0] !== null) { + const params = args[0] as Record; + + attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE] = params.model ?? 'unknown'; + if ('temperature' in params) attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE] = params.temperature; + if ('top_p' in params) attributes[GEN_AI_REQUEST_TOP_P_ATTRIBUTE] = params.top_p; + if ('frequency_penalty' in params) + attributes[GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE] = params.frequency_penalty; + if ('presence_penalty' in params) attributes[GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE] = params.presence_penalty; + } else { + attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE] = 'unknown'; + } + + return attributes; +} + +/** + * Helper function to set token usage attributes + */ +function setTokenUsageAttributes( + span: Span, + promptTokens?: number, + completionTokens?: number, + totalTokens?: number, +): void { + if (promptTokens !== undefined) { + span.setAttributes({ + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: promptTokens, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: promptTokens, + }); + } + if (completionTokens !== undefined) { + span.setAttributes({ + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: completionTokens, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: completionTokens, + }); + } + if (totalTokens !== undefined) { + span.setAttributes({ + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: totalTokens, + }); + } +} + +/** + * Helper function to set common response attributes (ID, model, timestamp) + */ +function setCommonResponseAttributes(span: Span, id?: string, model?: string, timestamp?: number): void { + if (id) { + span.setAttributes({ + [OPENAI_RESPONSE_ID_ATTRIBUTE]: id, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: id, + }); + } + if (model) { + span.setAttributes({ + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: model, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: model, + }); + } + if (timestamp) { + span.setAttributes({ + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: new Date(timestamp * 1000).toISOString(), + }); + } +} + +/** + * Add attributes for Chat Completion responses + */ +function addChatCompletionAttributes(span: Span, response: OpenAiChatCompletionObject): void { + setCommonResponseAttributes(span, response.id, response.model, response.created); + if (response.usage) { + setTokenUsageAttributes( + span, + response.usage.prompt_tokens, + response.usage.completion_tokens, + response.usage.total_tokens, + ); + } + if (Array.isArray(response.choices)) { + const finishReasons = response.choices + .map(choice => choice.finish_reason) + .filter((reason): reason is string => reason !== null); + if (finishReasons.length > 0) { + span.setAttributes({ + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: JSON.stringify(finishReasons), + }); + } + } +} + +/** + * Add attributes for Responses API responses + */ +function addResponsesApiAttributes(span: Span, response: OpenAIResponseObject): void { + setCommonResponseAttributes(span, response.id, response.model, response.created_at); + if (response.status) { + span.setAttributes({ + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: JSON.stringify([response.status]), + }); + } + if (response.usage) { + setTokenUsageAttributes( + span, + response.usage.input_tokens, + response.usage.output_tokens, + response.usage.total_tokens, + ); + } +} + +/** + * Add response attributes to spans + * This currently supports both Chat Completion and Responses API responses + */ +function addResponseAttributes(span: Span, result: unknown, recordOutputs?: boolean): void { + if (!result || typeof result !== 'object') return; + + const response = result as OpenAiResponse; + + if (isChatCompletionResponse(response)) { + addChatCompletionAttributes(span, response); + if (recordOutputs && response.choices?.length) { + const responseTexts = response.choices.map(choice => choice.message?.content || ''); + span.setAttributes({ [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: JSON.stringify(responseTexts) }); + } + } else if (isResponsesApiResponse(response)) { + addResponsesApiAttributes(span, response); + if (recordOutputs && response.output_text) { + span.setAttributes({ [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: response.output_text }); + } + } +} + +// Extract and record AI request inputs, if present. This is intentionally separate from response attributes. +function addRequestAttributes(span: Span, params: Record): void { + if ('messages' in params) { + span.setAttributes({ [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify(params.messages) }); + } + if ('input' in params) { + span.setAttributes({ [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify(params.input) }); + } +} + +function getOptionsFromIntegration(): OpenAiOptions { + const scope = getCurrentScope(); + const client = scope.getClient(); + const integration = client?.getIntegrationByName(OPENAI_INTEGRATION_NAME) as OpenAiIntegration | undefined; + const shouldRecordInputsAndOutputs = integration ? Boolean(client?.getOptions().sendDefaultPii) : false; + + return { + recordInputs: integration?.options?.recordInputs ?? shouldRecordInputsAndOutputs, + recordOutputs: integration?.options?.recordOutputs ?? shouldRecordInputsAndOutputs, + }; +} + +/** + * Instrument a method with Sentry spans + * Following Sentry AI Agents Manual Instrumentation conventions + * @see https://docs.sentry.io/platforms/javascript/guides/node/tracing/instrumentation/ai-agents-module/#manual-instrumentation + */ +function instrumentMethod( + originalMethod: (...args: T) => Promise, + methodPath: InstrumentedMethod, + context: unknown, + options?: OpenAiOptions, +): (...args: T) => Promise { + return async function instrumentedMethod(...args: T): Promise { + const finalOptions = options || getOptionsFromIntegration(); + const requestAttributes = extractRequestAttributes(args, methodPath); + const model = (requestAttributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE] as string) || 'unknown'; + const operationName = getOperationName(methodPath); + + return startSpan( + { + name: `${operationName} ${model}`, + op: getSpanOperation(methodPath), + attributes: requestAttributes as Record, + }, + async (span: Span) => { + try { + if (finalOptions.recordInputs && args[0] && typeof args[0] === 'object') { + addRequestAttributes(span, args[0] as Record); + } + + const result = await originalMethod.apply(context, args); + // TODO: Add streaming support + addResponseAttributes(span, result, finalOptions.recordOutputs); + return result; + } catch (error) { + captureException(error); + throw error; + } + }, + ); + }; +} + +/** + * Create a deep proxy for OpenAI client instrumentation + */ +function createDeepProxy(target: object, currentPath = '', options?: OpenAiOptions): OpenAiClient { + return new Proxy(target, { + get(obj: object, prop: string): unknown { + const value = (obj as Record)[prop]; + const methodPath = buildMethodPath(currentPath, String(prop)); + + if (typeof value === 'function' && shouldInstrument(methodPath)) { + return instrumentMethod(value as (...args: unknown[]) => Promise, methodPath, obj, options); + } + + if (value && typeof value === 'object') { + return createDeepProxy(value as object, methodPath, options); + } + + return value; + }, + }); +} + +/** + * Instrument an OpenAI client with Sentry tracing + * Can be used across Node.js, Cloudflare Workers, and Vercel Edge + */ +export function instrumentOpenAiClient(client: OpenAiClient, options?: OpenAiOptions): OpenAiClient { + return createDeepProxy(client, '', options); +} diff --git a/packages/core/src/utils/openai/types.ts b/packages/core/src/utils/openai/types.ts new file mode 100644 index 000000000000..c9a3870a959e --- /dev/null +++ b/packages/core/src/utils/openai/types.ts @@ -0,0 +1,143 @@ +import type { INSTRUMENTED_METHODS } from './constants'; + +/** + * Attribute values may be any non-nullish primitive value except an object. + * + * null or undefined attribute values are invalid and will result in undefined behavior. + */ +export type AttributeValue = + | string + | number + | boolean + | Array + | Array + | Array; + +export interface OpenAiOptions { + /** + * Enable or disable input recording. Enabled if `sendDefaultPii` is `true` + */ + recordInputs?: boolean; + /** + * Enable or disable output recording. Enabled if `sendDefaultPii` is `true` + */ + recordOutputs?: boolean; +} + +export interface OpenAiClient { + responses?: { + create: (...args: unknown[]) => Promise; + }; + chat?: { + completions?: { + create: (...args: unknown[]) => Promise; + }; + }; +} + +/** + * @see https://platform.openai.com/docs/api-reference/chat/object + */ +export interface OpenAiChatCompletionObject { + id: string; + object: 'chat.completion'; + created: number; + model: string; + choices: Array<{ + index: number; + message: { + role: 'assistant' | 'user' | 'system' | string; + content: string | null; + refusal?: string | null; + annotations?: Array; // Depends on whether annotations are enabled + }; + logprobs?: unknown | null; + finish_reason: string | null; + }>; + usage: { + prompt_tokens: number; + completion_tokens: number; + total_tokens: number; + prompt_tokens_details?: { + cached_tokens?: number; + audio_tokens?: number; + }; + completion_tokens_details?: { + reasoning_tokens?: number; + audio_tokens?: number; + accepted_prediction_tokens?: number; + rejected_prediction_tokens?: number; + }; + }; + service_tier?: string; + system_fingerprint?: string; +} + +/** + * @see https://platform.openai.com/docs/api-reference/responses/object + */ +export interface OpenAIResponseObject { + id: string; + object: 'response'; + created_at: number; + status: 'in_progress' | 'completed' | 'failed' | 'cancelled'; + error: string | null; + incomplete_details: unknown | null; + instructions: unknown | null; + max_output_tokens: number | null; + model: string; + output: Array<{ + type: 'message'; + id: string; + status: 'completed' | string; + role: 'assistant' | string; + content: Array<{ + type: 'output_text'; + text: string; + annotations: Array; + }>; + }>; + output_text: string; // Direct text output field + parallel_tool_calls: boolean; + previous_response_id: string | null; + reasoning: { + effort: string | null; + summary: string | null; + }; + store: boolean; + temperature: number; + text: { + format: { + type: 'text' | string; + }; + }; + tool_choice: 'auto' | string; + tools: Array; + top_p: number; + truncation: 'disabled' | string; + usage: { + input_tokens: number; + input_tokens_details?: { + cached_tokens?: number; + }; + output_tokens: number; + output_tokens_details?: { + reasoning_tokens?: number; + }; + total_tokens: number; + }; + user: string | null; + metadata: Record; +} + +export type OpenAiResponse = OpenAiChatCompletionObject | OpenAIResponseObject; + +/** + * OpenAI Integration interface for type safety + */ +export interface OpenAiIntegration { + name: string; + options: OpenAiOptions; +} + +export type InstrumentedMethod = (typeof INSTRUMENTED_METHODS)[number]; diff --git a/packages/core/src/utils/openai/utils.ts b/packages/core/src/utils/openai/utils.ts new file mode 100644 index 000000000000..b7d5e12ecf62 --- /dev/null +++ b/packages/core/src/utils/openai/utils.ts @@ -0,0 +1,63 @@ +import { OPENAI_OPERATIONS } from '../gen-ai-attributes'; +import { INSTRUMENTED_METHODS } from './constants'; +import type { InstrumentedMethod, OpenAiChatCompletionObject, OpenAIResponseObject } from './types'; + +/** + * Maps OpenAI method paths to Sentry operation names + */ +export function getOperationName(methodPath: string): string { + if (methodPath.includes('chat.completions')) { + return OPENAI_OPERATIONS.CHAT; + } + if (methodPath.includes('responses')) { + // The responses API is also a chat operation + return OPENAI_OPERATIONS.CHAT; + } + return methodPath.split('.').pop() || 'unknown'; +} + +/** + * Get the span operation for OpenAI methods + * Following Sentry's convention: "gen_ai.{operation_name}" + */ +export function getSpanOperation(methodPath: string): string { + return `gen_ai.${getOperationName(methodPath)}`; +} + +/** + * Check if a method path should be instrumented + */ +export function shouldInstrument(methodPath: string): methodPath is InstrumentedMethod { + return INSTRUMENTED_METHODS.includes(methodPath as InstrumentedMethod); +} + +/** + * Build method path from current traversal + */ +export function buildMethodPath(currentPath: string, prop: string): string { + return currentPath ? `${currentPath}.${prop}` : prop; +} + +/** + * Check if response is a Chat Completion object + */ +export function isChatCompletionResponse(response: unknown): response is OpenAiChatCompletionObject { + return ( + response !== null && + typeof response === 'object' && + 'object' in response && + (response as Record).object === 'chat.completion' + ); +} + +/** + * Check if response is a Responses API object + */ +export function isResponsesApiResponse(response: unknown): response is OpenAIResponseObject { + return ( + response !== null && + typeof response === 'object' && + 'object' in response && + (response as Record).object === 'response' + ); +} diff --git a/packages/core/test/lib/utils/openai-utils.test.ts b/packages/core/test/lib/utils/openai-utils.test.ts new file mode 100644 index 000000000000..bcff545627ed --- /dev/null +++ b/packages/core/test/lib/utils/openai-utils.test.ts @@ -0,0 +1,104 @@ +import { describe, expect, it } from 'vitest'; +import { + buildMethodPath, + getOperationName, + getSpanOperation, + isChatCompletionResponse, + isResponsesApiResponse, + shouldInstrument, +} from '../../../src/utils/openai/utils'; + +describe('openai-utils', () => { + describe('getOperationName', () => { + it('should return chat for chat.completions methods', () => { + expect(getOperationName('chat.completions.create')).toBe('chat'); + expect(getOperationName('some.path.chat.completions.method')).toBe('chat'); + }); + + it('should return chat for responses methods', () => { + expect(getOperationName('responses.create')).toBe('chat'); + expect(getOperationName('some.path.responses.method')).toBe('chat'); + }); + + it('should return the last part of path for unknown methods', () => { + expect(getOperationName('some.unknown.method')).toBe('method'); + expect(getOperationName('create')).toBe('create'); + }); + + it('should return unknown for empty path', () => { + expect(getOperationName('')).toBe('unknown'); + }); + }); + + describe('getSpanOperation', () => { + it('should prefix operation with gen_ai', () => { + expect(getSpanOperation('chat.completions.create')).toBe('gen_ai.chat'); + expect(getSpanOperation('responses.create')).toBe('gen_ai.chat'); + expect(getSpanOperation('some.custom.operation')).toBe('gen_ai.operation'); + }); + }); + + describe('shouldInstrument', () => { + it('should return true for instrumented methods', () => { + expect(shouldInstrument('responses.create')).toBe(true); + expect(shouldInstrument('chat.completions.create')).toBe(true); + }); + + it('should return false for non-instrumented methods', () => { + expect(shouldInstrument('unknown.method')).toBe(false); + expect(shouldInstrument('')).toBe(false); + }); + }); + + describe('buildMethodPath', () => { + it('should build method path correctly', () => { + expect(buildMethodPath('', 'chat')).toBe('chat'); + expect(buildMethodPath('chat', 'completions')).toBe('chat.completions'); + expect(buildMethodPath('chat.completions', 'create')).toBe('chat.completions.create'); + }); + }); + + describe('isChatCompletionResponse', () => { + it('should return true for valid chat completion responses', () => { + const validResponse = { + object: 'chat.completion', + id: 'chatcmpl-123', + model: 'gpt-4', + choices: [], + }; + expect(isChatCompletionResponse(validResponse)).toBe(true); + }); + + it('should return false for invalid responses', () => { + expect(isChatCompletionResponse(null)).toBe(false); + expect(isChatCompletionResponse(undefined)).toBe(false); + expect(isChatCompletionResponse('string')).toBe(false); + expect(isChatCompletionResponse(123)).toBe(false); + expect(isChatCompletionResponse({})).toBe(false); + expect(isChatCompletionResponse({ object: 'different' })).toBe(false); + expect(isChatCompletionResponse({ object: null })).toBe(false); + }); + }); + + describe('isResponsesApiResponse', () => { + it('should return true for valid responses API responses', () => { + const validResponse = { + object: 'response', + id: 'resp_123', + model: 'gpt-4', + choices: [], + }; + expect(isResponsesApiResponse(validResponse)).toBe(true); + }); + + it('should return false for invalid responses', () => { + expect(isResponsesApiResponse(null)).toBe(false); + expect(isResponsesApiResponse(undefined)).toBe(false); + expect(isResponsesApiResponse('string')).toBe(false); + expect(isResponsesApiResponse(123)).toBe(false); + expect(isResponsesApiResponse({})).toBe(false); + expect(isResponsesApiResponse({ object: 'different' })).toBe(false); + expect(isResponsesApiResponse({ object: null })).toBe(false); + }); + }); +}); diff --git a/packages/google-cloud-serverless/src/index.ts b/packages/google-cloud-serverless/src/index.ts index 11547ba933a1..ba6d9640a8b5 100644 --- a/packages/google-cloud-serverless/src/index.ts +++ b/packages/google-cloud-serverless/src/index.ts @@ -51,6 +51,7 @@ export { nativeNodeFetchIntegration, onUncaughtExceptionIntegration, onUnhandledRejectionIntegration, + openAIIntegration, modulesIntegration, contextLinesIntegration, nodeContextIntegration, diff --git a/packages/node/src/index.ts b/packages/node/src/index.ts index afcc42f16e84..4e7a8482c474 100644 --- a/packages/node/src/index.ts +++ b/packages/node/src/index.ts @@ -23,6 +23,7 @@ export { genericPoolIntegration } from './integrations/tracing/genericPool'; export { dataloaderIntegration } from './integrations/tracing/dataloader'; export { amqplibIntegration } from './integrations/tracing/amqplib'; export { vercelAIIntegration } from './integrations/tracing/vercelai'; +export { openAIIntegration } from './integrations/tracing/openai'; export { launchDarklyIntegration, buildLaunchDarklyFlagUsedHandler, diff --git a/packages/node/src/integrations/tracing/index.ts b/packages/node/src/integrations/tracing/index.ts index e7122562d619..54fb4c72be2d 100644 --- a/packages/node/src/integrations/tracing/index.ts +++ b/packages/node/src/integrations/tracing/index.ts @@ -14,6 +14,7 @@ import { instrumentMongo, mongoIntegration } from './mongo'; import { instrumentMongoose, mongooseIntegration } from './mongoose'; import { instrumentMysql, mysqlIntegration } from './mysql'; import { instrumentMysql2, mysql2Integration } from './mysql2'; +import { instrumentOpenAi, openAIIntegration } from './openai'; import { instrumentPostgres, postgresIntegration } from './postgres'; import { instrumentPostgresJs, postgresJsIntegration } from './postgresjs'; import { prismaIntegration } from './prisma'; @@ -45,6 +46,7 @@ export function getAutoPerformanceIntegrations(): Integration[] { amqplibIntegration(), lruMemoizerIntegration(), vercelAIIntegration(), + openAIIntegration(), postgresJsIntegration(), ]; } @@ -77,6 +79,7 @@ export function getOpenTelemetryInstrumentationToPreload(): (((options?: any) => instrumentGenericPool, instrumentAmqplib, instrumentVercelAi, + instrumentOpenAi, instrumentPostgresJs, ]; } diff --git a/packages/node/src/integrations/tracing/openai/index.ts b/packages/node/src/integrations/tracing/openai/index.ts new file mode 100644 index 000000000000..0e88d2b315cc --- /dev/null +++ b/packages/node/src/integrations/tracing/openai/index.ts @@ -0,0 +1,74 @@ +import type { IntegrationFn, OpenAiOptions } from '@sentry/core'; +import { defineIntegration, OPENAI_INTEGRATION_NAME } from '@sentry/core'; +import { generateInstrumentOnce } from '@sentry/node-core'; +import { SentryOpenAiInstrumentation } from './instrumentation'; + +export const instrumentOpenAi = generateInstrumentOnce( + OPENAI_INTEGRATION_NAME, + () => new SentryOpenAiInstrumentation({}), +); + +const _openAiIntegration = ((options: OpenAiOptions = {}) => { + return { + name: OPENAI_INTEGRATION_NAME, + options, + setupOnce() { + instrumentOpenAi(); + }, + }; +}) satisfies IntegrationFn; + +/** + * Adds Sentry tracing instrumentation for the OpenAI SDK. + * + * This integration is enabled by default. + * + * When configured, this integration automatically instruments OpenAI SDK client instances + * to capture telemetry data following OpenTelemetry Semantic Conventions for Generative AI. + * + * @example + * ```javascript + * import * as Sentry from '@sentry/node'; + * + * Sentry.init({ + * integrations: [Sentry.openAIIntegration()], + * }); + * ``` + * + * ## Options + * + * - `recordInputs`: Whether to record prompt messages (default: respects `sendDefaultPii` client option) + * - `recordOutputs`: Whether to record response text (default: respects `sendDefaultPii` client option) + * + * ### Default Behavior + * + * By default, the integration will: + * - Record inputs and outputs ONLY if `sendDefaultPii` is set to `true` in your Sentry client options + * - Otherwise, inputs and outputs are NOT recorded unless explicitly enabled + * + * @example + * ```javascript + * // Record inputs and outputs when sendDefaultPii is false + * Sentry.init({ + * integrations: [ + * Sentry.openAIIntegration({ + * recordInputs: true, + * recordOutputs: true + * }) + * ], + * }); + * + * // Never record inputs/outputs regardless of sendDefaultPii + * Sentry.init({ + * sendDefaultPii: true, + * integrations: [ + * Sentry.openAIIntegration({ + * recordInputs: false, + * recordOutputs: false + * }) + * ], + * }); + * ``` + * + */ +export const openAIIntegration = defineIntegration(_openAiIntegration); diff --git a/packages/node/src/integrations/tracing/openai/instrumentation.ts b/packages/node/src/integrations/tracing/openai/instrumentation.ts new file mode 100644 index 000000000000..2cce987db182 --- /dev/null +++ b/packages/node/src/integrations/tracing/openai/instrumentation.ts @@ -0,0 +1,94 @@ +import { + type InstrumentationConfig, + type InstrumentationModuleDefinition, + InstrumentationBase, + InstrumentationNodeModuleDefinition, +} from '@opentelemetry/instrumentation'; +import type { Integration, OpenAiClient, OpenAiOptions } from '@sentry/core'; +import { getCurrentScope, instrumentOpenAiClient, OPENAI_INTEGRATION_NAME, SDK_VERSION } from '@sentry/core'; + +const supportedVersions = ['>=4.0.0 <6']; + +export interface OpenAiIntegration extends Integration { + options: OpenAiOptions; +} + +/** + * Represents the patched shape of the OpenAI module export. + */ +interface PatchedModuleExports { + [key: string]: unknown; + OpenAI: abstract new (...args: unknown[]) => OpenAiClient; +} + +/** + * Determines telemetry recording settings. + */ +function determineRecordingSettings( + integrationOptions: OpenAiOptions | undefined, + defaultEnabled: boolean, +): { recordInputs: boolean; recordOutputs: boolean } { + const recordInputs = integrationOptions?.recordInputs ?? defaultEnabled; + const recordOutputs = integrationOptions?.recordOutputs ?? defaultEnabled; + return { recordInputs, recordOutputs }; +} + +/** + * Sentry OpenAI instrumentation using OpenTelemetry. + */ +export class SentryOpenAiInstrumentation extends InstrumentationBase { + public constructor(config: InstrumentationConfig = {}) { + super('@sentry/instrumentation-openai', SDK_VERSION, config); + } + + /** + * Initializes the instrumentation by defining the modules to be patched. + */ + public init(): InstrumentationModuleDefinition { + const module = new InstrumentationNodeModuleDefinition('openai', supportedVersions, this._patch.bind(this)); + return module; + } + + /** + * Core patch logic applying instrumentation to the OpenAI client constructor. + */ + private _patch(exports: PatchedModuleExports): PatchedModuleExports | void { + const Original = exports.OpenAI; + + const WrappedOpenAI = function (this: unknown, ...args: unknown[]) { + const instance = Reflect.construct(Original, args); + const scopeClient = getCurrentScope().getClient(); + const integration = scopeClient?.getIntegrationByName(OPENAI_INTEGRATION_NAME); + const integrationOpts = integration?.options; + const defaultPii = Boolean(scopeClient?.getOptions().sendDefaultPii); + + const { recordInputs, recordOutputs } = determineRecordingSettings(integrationOpts, defaultPii); + + return instrumentOpenAiClient(instance as OpenAiClient, { + recordInputs, + recordOutputs, + }); + } as unknown as abstract new (...args: unknown[]) => OpenAiClient; + + // Preserve static and prototype chains + Object.setPrototypeOf(WrappedOpenAI, Original); + Object.setPrototypeOf(WrappedOpenAI.prototype, Original.prototype); + + for (const key of Object.getOwnPropertyNames(Original)) { + if (!['length', 'name', 'prototype'].includes(key)) { + const descriptor = Object.getOwnPropertyDescriptor(Original, key); + if (descriptor) { + Object.defineProperty(WrappedOpenAI, key, descriptor); + } + } + } + + const isESM = Object.prototype.toString.call(exports) === '[object Module]'; + if (isESM) { + exports.OpenAI = WrappedOpenAI; + return exports; + } + + return { ...exports, OpenAI: WrappedOpenAI }; + } +}