From 96d51754835b515e0a81d8236b589840d08ad684 Mon Sep 17 00:00:00 2001 From: Simon Hellmayr Date: Tue, 9 Sep 2025 09:20:37 +0200 Subject: [PATCH 1/2] fix(langchain): make new langchain integration work with just langchain-core --- sentry_sdk/integrations/langchain.py | 6 +- .../integrations/langchain/test_langchain.py | 88 ++++++++++++++++++- 2 files changed, 92 insertions(+), 2 deletions(-) diff --git a/sentry_sdk/integrations/langchain.py b/sentry_sdk/integrations/langchain.py index a53115a2a9..e14dd619fe 100644 --- a/sentry_sdk/integrations/langchain.py +++ b/sentry_sdk/integrations/langchain.py @@ -29,7 +29,6 @@ try: - from langchain.agents import AgentExecutor from langchain_core.agents import AgentFinish from langchain_core.callbacks import ( BaseCallbackHandler, @@ -44,6 +43,11 @@ raise DidNotEnable("langchain not installed") +try: + from langchain.agents import AgentExecutor +except ImportError: + AgentExecutor = None + DATA_FIELDS = { "frequency_penalty": SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY, "function_call": SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, diff --git a/tests/integrations/langchain/test_langchain.py b/tests/integrations/langchain/test_langchain.py index 99dc5f4e37..9d48201ba0 100644 --- a/tests/integrations/langchain/test_langchain.py +++ b/tests/integrations/langchain/test_langchain.py @@ -1,6 +1,6 @@ from typing import List, Optional, Any, Iterator from unittest import mock -from unittest.mock import Mock +from unittest.mock import Mock, patch import pytest @@ -662,3 +662,89 @@ def test_tools_integration_in_spans(sentry_init, capture_events): # Ensure we found at least one span with tools data assert tools_found, "No spans found with tools data" + + +def test_langchain_integration_with_langchain_core_only(sentry_init, capture_events): + """Test that the langchain integration works when langchain.agents.AgentExecutor + is not available or langchain is not installed, but langchain-core is. + """ + + from langchain_core.outputs import LLMResult, Generation + + # Patch AgentExecutor to be None to simulate it not being available + with patch("sentry_sdk.integrations.langchain.AgentExecutor", None): + from sentry_sdk.integrations.langchain import ( + LangchainIntegration, + SentryLangchainCallback, + ) + + # Initialize Sentry with the integration - should not fail + sentry_init( + integrations=[LangchainIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + # Verify that setup_once doesn't crash when AgentExecutor is None + try: + LangchainIntegration.setup_once() + except Exception as e: + pytest.fail(f"setup_once() failed when AgentExecutor is None: {e}") + + # Test that the core functionality still works by directly using the callback + callback = SentryLangchainCallback(max_span_map_size=100, include_prompts=True) + + run_id = "12345678-1234-1234-1234-123456789012" + serialized = {"_type": "openai-chat", "model_name": "gpt-3.5-turbo"} + prompts = ["What is the capital of France?"] + + with start_transaction(): + # Test on_llm_start + callback.on_llm_start( + serialized=serialized, + prompts=prompts, + run_id=run_id, + invocation_params={ + "temperature": 0.7, + "max_tokens": 100, + "model": "gpt-3.5-turbo", + }, + ) + + # Test on_llm_end + response = LLMResult( + generations=[[Generation(text="The capital of France is Paris.")]], + llm_output={ + "token_usage": { + "total_tokens": 25, + "prompt_tokens": 10, + "completion_tokens": 15, + } + }, + ) + callback.on_llm_end(response=response, run_id=run_id) + + # Verify that spans were created + assert len(events) > 0 + tx = events[0] + assert tx["type"] == "transaction" + + # Check that LLM spans were created + llm_spans = [ + span for span in tx.get("spans", []) if span.get("op") == "gen_ai.pipeline" + ] + assert len(llm_spans) > 0 + + llm_span = llm_spans[0] + assert llm_span["description"] == "Langchain LLM call" + assert llm_span["data"]["gen_ai.request.model"] == "gpt-3.5-turbo" + assert ( + llm_span["data"]["gen_ai.response.text"] + == "The capital of France is Paris." + ) + + # Verify token usage was recorded + assert llm_span["data"]["gen_ai.usage.total_tokens"] == 25 + assert llm_span["data"]["gen_ai.usage.input_tokens"] == 10 + assert llm_span["data"]["gen_ai.usage.output_tokens"] == 15 From 89c1be758d449d60cb9e65218fe2864262fa3f4a Mon Sep 17 00:00:00 2001 From: Simon Hellmayr Date: Tue, 9 Sep 2025 09:23:16 +0200 Subject: [PATCH 2/2] remove comments --- tests/integrations/langchain/test_langchain.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/tests/integrations/langchain/test_langchain.py b/tests/integrations/langchain/test_langchain.py index 9d48201ba0..b6b432c523 100644 --- a/tests/integrations/langchain/test_langchain.py +++ b/tests/integrations/langchain/test_langchain.py @@ -671,14 +671,12 @@ def test_langchain_integration_with_langchain_core_only(sentry_init, capture_eve from langchain_core.outputs import LLMResult, Generation - # Patch AgentExecutor to be None to simulate it not being available with patch("sentry_sdk.integrations.langchain.AgentExecutor", None): from sentry_sdk.integrations.langchain import ( LangchainIntegration, SentryLangchainCallback, ) - # Initialize Sentry with the integration - should not fail sentry_init( integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, @@ -686,13 +684,11 @@ def test_langchain_integration_with_langchain_core_only(sentry_init, capture_eve ) events = capture_events() - # Verify that setup_once doesn't crash when AgentExecutor is None try: LangchainIntegration.setup_once() except Exception as e: pytest.fail(f"setup_once() failed when AgentExecutor is None: {e}") - # Test that the core functionality still works by directly using the callback callback = SentryLangchainCallback(max_span_map_size=100, include_prompts=True) run_id = "12345678-1234-1234-1234-123456789012" @@ -700,7 +696,6 @@ def test_langchain_integration_with_langchain_core_only(sentry_init, capture_eve prompts = ["What is the capital of France?"] with start_transaction(): - # Test on_llm_start callback.on_llm_start( serialized=serialized, prompts=prompts, @@ -712,7 +707,6 @@ def test_langchain_integration_with_langchain_core_only(sentry_init, capture_eve }, ) - # Test on_llm_end response = LLMResult( generations=[[Generation(text="The capital of France is Paris.")]], llm_output={ @@ -725,12 +719,10 @@ def test_langchain_integration_with_langchain_core_only(sentry_init, capture_eve ) callback.on_llm_end(response=response, run_id=run_id) - # Verify that spans were created assert len(events) > 0 tx = events[0] assert tx["type"] == "transaction" - # Check that LLM spans were created llm_spans = [ span for span in tx.get("spans", []) if span.get("op") == "gen_ai.pipeline" ] @@ -743,8 +735,6 @@ def test_langchain_integration_with_langchain_core_only(sentry_init, capture_eve llm_span["data"]["gen_ai.response.text"] == "The capital of France is Paris." ) - - # Verify token usage was recorded assert llm_span["data"]["gen_ai.usage.total_tokens"] == 25 assert llm_span["data"]["gen_ai.usage.input_tokens"] == 10 assert llm_span["data"]["gen_ai.usage.output_tokens"] == 15