Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
aaf4125
Add instrumentation for autogen MCP tool adapter. (#1409)
umaannamalai Jun 23, 2025
6b82b2c
Add autogen agent instrumentation.
umaannamalai Jun 24, 2025
d8cc40e
Revert "Add autogen agent instrumentation."
umaannamalai Jun 24, 2025
c70213f
Add autogen agent instrumentation.
umaannamalai Jun 24, 2025
5aca49f
Revert "Add autogen agent instrumentation."
umaannamalai Jun 24, 2025
5bc8b67
Autogen Agent Instrumentation (#1412)
umaannamalai Jun 27, 2025
02c4f37
Sync autogen feature branch (#1472)
umaannamalai Aug 29, 2025
a15bcfb
MCP Streamable HTTP Spans (DO NOT MERGE YET) (#1462)
umaannamalai Aug 29, 2025
7beb3ab
Update MCP instrumentation to check if AIM is enabled. (#1456)
umaannamalai Aug 29, 2025
0fe6836
Add clause to avoid reporting empty content in openai. (#1517)
umaannamalai Sep 24, 2025
6f3477f
Add instrumentation for autogen MCP tool adapter. (#1409)
umaannamalai Jun 23, 2025
301280f
Add autogen agent instrumentation.
umaannamalai Jun 24, 2025
4a45eb5
Revert "Add autogen agent instrumentation."
umaannamalai Jun 24, 2025
db6fc1b
Add autogen agent instrumentation.
umaannamalai Jun 24, 2025
16e00b6
Revert "Add autogen agent instrumentation."
umaannamalai Jun 24, 2025
1c31fd3
Autogen Agent Instrumentation (#1412)
umaannamalai Jun 27, 2025
9420b06
Sync autogen feature branch (#1472)
umaannamalai Aug 29, 2025
9bddd09
MCP Streamable HTTP Spans (DO NOT MERGE YET) (#1462)
umaannamalai Aug 29, 2025
92d2a74
Update MCP instrumentation to check if AIM is enabled. (#1456)
umaannamalai Aug 29, 2025
6c342fe
Add clause to avoid reporting empty content in openai. (#1517)
umaannamalai Sep 24, 2025
f838e3f
Update validator path.
umaannamalai Sep 24, 2025
6b30c9a
Merge branch 'feature-autogen-instrumentation' of github.com:newrelic…
umaannamalai Sep 24, 2025
1015ab6
Merge branch 'main' into feature-autogen-instrumentation
mergify[bot] Sep 24, 2025
5f72c12
Add MCP adapter test for no transaction.
umaannamalai Sep 24, 2025
a068a35
Merge branch 'feature-autogen-instrumentation' of github.com:newrelic…
umaannamalai Sep 24, 2025
3ea01e7
Linting fixups.
umaannamalai Sep 25, 2025
338696a
Linting
TimPansino Sep 25, 2025
9e4427e
Merge branch 'main' into feature-autogen-instrumentation
mergify[bot] Sep 25, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions newrelic/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -2842,7 +2842,20 @@ def _process_module_builtin_defaults():
_process_module_definition("loguru", "newrelic.hooks.logger_loguru", "instrument_loguru")
_process_module_definition("loguru._logger", "newrelic.hooks.logger_loguru", "instrument_loguru_logger")

_process_module_definition(
"autogen_ext.tools.mcp._base", "newrelic.hooks.mlmodel_autogen", "instrument_autogen_ext_tools_mcp__base"
)
_process_module_definition(
"autogen_agentchat.agents._assistant_agent",
"newrelic.hooks.mlmodel_autogen",
"instrument_autogen_agentchat_agents__assistant_agent",
)
_process_module_definition("mcp.client.session", "newrelic.hooks.adapter_mcp", "instrument_mcp_client_session")
_process_module_definition(
"mcp.server.fastmcp.tools.tool_manager",
"newrelic.hooks.adapter_mcp",
"instrument_mcp_server_fastmcp_tools_tool_manager",
)

_process_module_definition("structlog._base", "newrelic.hooks.logger_structlog", "instrument_structlog__base")
_process_module_definition("structlog._frames", "newrelic.hooks.logger_structlog", "instrument_structlog__frames")
Expand Down
19 changes: 19 additions & 0 deletions newrelic/hooks/adapter_mcp.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from newrelic.common.object_names import callable_name
from newrelic.common.object_wrapper import wrap_function_wrapper
from newrelic.common.signature import bind_args
from newrelic.core.config import global_settings

_logger = logging.getLogger(__name__)

Expand All @@ -28,6 +29,10 @@ async def wrap_call_tool(wrapped, instance, args, kwargs):
if not transaction:
return await wrapped(*args, **kwargs)

settings = transaction.settings or global_settings()
if not settings.ai_monitoring.enabled:
return await wrapped(*args, **kwargs)

func_name = callable_name(wrapped)
bound_args = bind_args(wrapped, args, kwargs)
tool_name = bound_args.get("name") or "tool"
Expand All @@ -42,6 +47,10 @@ async def wrap_read_resource(wrapped, instance, args, kwargs):
if not transaction:
return await wrapped(*args, **kwargs)

settings = transaction.settings or global_settings()
if not settings.ai_monitoring.enabled:
return await wrapped(*args, **kwargs)

func_name = callable_name(wrapped)
bound_args = bind_args(wrapped, args, kwargs)
# Set a default value in case we can't parse out the URI scheme successfully
Expand All @@ -64,6 +73,10 @@ async def wrap_get_prompt(wrapped, instance, args, kwargs):
if not transaction:
return await wrapped(*args, **kwargs)

settings = transaction.settings or global_settings()
if not settings.ai_monitoring.enabled:
return await wrapped(*args, **kwargs)

func_name = callable_name(wrapped)
bound_args = bind_args(wrapped, args, kwargs)
prompt_name = bound_args.get("name") or "prompt"
Expand All @@ -81,3 +94,9 @@ def instrument_mcp_client_session(module):
wrap_function_wrapper(module, "ClientSession.read_resource", wrap_read_resource)
if hasattr(module.ClientSession, "get_prompt"):
wrap_function_wrapper(module, "ClientSession.get_prompt", wrap_get_prompt)


def instrument_mcp_server_fastmcp_tools_tool_manager(module):
if hasattr(module, "ToolManager"):
if hasattr(module.ToolManager, "call_tool"):
wrap_function_wrapper(module, "ToolManager.call_tool", wrap_call_tool)
224 changes: 224 additions & 0 deletions newrelic/hooks/mlmodel_autogen.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,224 @@
# Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


import logging
import sys
import uuid

from newrelic.api.function_trace import FunctionTrace
from newrelic.api.time_trace import get_trace_linking_metadata
from newrelic.api.transaction import current_transaction
from newrelic.common.object_names import callable_name
from newrelic.common.object_wrapper import wrap_function_wrapper
from newrelic.common.package_version_utils import get_package_version
from newrelic.common.signature import bind_args
from newrelic.core.config import global_settings

# Check for the presence of the autogen-core, autogen-agentchat, or autogen-ext package as they should all have the
# same version and one or multiple could be installed
AUTOGEN_VERSION = (
get_package_version("autogen-core")
or get_package_version("autogen-agentchat")
or get_package_version("autogen-ext")
)


RECORD_EVENTS_FAILURE_LOG_MESSAGE = "Exception occurred in Autogen instrumentation: Failed to record LLM events. Please report this issue to New Relic Support.\n%s"


_logger = logging.getLogger(__name__)


async def wrap_from_server_params(wrapped, instance, args, kwargs):
transaction = current_transaction()
if not transaction:
return await wrapped(*args, **kwargs)

func_name = callable_name(wrapped)
bound_args = bind_args(wrapped, args, kwargs)
tool_name = bound_args.get("tool_name") or "tool"
function_trace_name = f"{func_name}/{tool_name}"
with FunctionTrace(name=function_trace_name, group="Llm", source=wrapped):
return await wrapped(*args, **kwargs)


def wrap_on_messages_stream(wrapped, instance, args, kwargs):
transaction = current_transaction()
if not transaction:
return wrapped(*args, **kwargs)

settings = transaction.settings or global_settings()
if not settings.ai_monitoring.enabled:
return wrapped(*args, **kwargs)

# Framework metric also used for entity tagging in the UI
transaction.add_ml_model_info("Autogen", AUTOGEN_VERSION)
transaction._add_agent_attribute("llm", True)

agent_name = getattr(instance, "name", "agent")
agent_id = str(uuid.uuid4())
agent_event_dict = _construct_base_agent_event_dict(agent_name, agent_id, transaction)
func_name = callable_name(wrapped)
function_trace_name = f"{func_name}/{agent_name}"

ft = FunctionTrace(name=function_trace_name, group="Llm/agent/Autogen")
ft.__enter__()

try:
return_val = wrapped(*args, **kwargs)
except Exception:
ft.notice_error(attributes={"agent_id": agent_id})
ft.__exit__(*sys.exc_info())
# If we hit an exception, append the error attribute and duration from the exited function trace
agent_event_dict.update({"duration": ft.duration * 1000, "error": True})
transaction.record_custom_event("LlmAgent", agent_event_dict)
raise

ft.__exit__(None, None, None)
agent_event_dict.update({"duration": ft.duration * 1000})

transaction.record_custom_event("LlmAgent", agent_event_dict)

return return_val


def _get_llm_metadata(transaction):
# Grab LLM-related custom attributes off of the transaction to store as metadata on LLM events
custom_attrs_dict = transaction._custom_params
llm_metadata_dict = {key: value for key, value in custom_attrs_dict.items() if key.startswith("llm.")}
llm_context_attrs = getattr(transaction, "_llm_context_attrs", None)
if llm_context_attrs:
llm_metadata_dict.update(llm_context_attrs)

return llm_metadata_dict


def _extract_tool_output(return_val, tool_name):
try:
output = getattr(return_val[1], "content", None)
return output
except Exception:
_logger.warning("Unable to parse tool output value from %s. Omitting output from LlmTool event.", tool_name)
return None


def _construct_base_tool_event_dict(bound_args, tool_call_data, tool_id, transaction, settings):
try:
_input = getattr(tool_call_data, "arguments", None)
tool_input = str(_input) if _input else None
run_id = getattr(tool_call_data, "id", None)
tool_name = getattr(tool_call_data, "name", "tool")
agent_name = bound_args.get("agent_name")
linking_metadata = get_trace_linking_metadata()

tool_event_dict = {
"id": tool_id,
"run_id": run_id,
"name": tool_name,
"span_id": linking_metadata.get("span.id"),
"trace_id": linking_metadata.get("trace.id"),
"agent_name": agent_name,
"vendor": "autogen",
"ingest_source": "Python",
}
if settings.ai_monitoring.record_content.enabled:
tool_event_dict.update({"input": tool_input})
tool_event_dict.update(_get_llm_metadata(transaction))
except Exception:
tool_event_dict = {}
_logger.warning(RECORD_EVENTS_FAILURE_LOG_MESSAGE, exc_info=True)

return tool_event_dict


def _construct_base_agent_event_dict(agent_name, agent_id, transaction):
try:
linking_metadata = get_trace_linking_metadata()

agent_event_dict = {
"id": agent_id,
"name": agent_name,
"span_id": linking_metadata.get("span.id"),
"trace_id": linking_metadata.get("trace.id"),
"vendor": "autogen",
"ingest_source": "Python",
}
agent_event_dict.update(_get_llm_metadata(transaction))
except Exception:
agent_event_dict = {}
_logger.warning(RECORD_EVENTS_FAILURE_LOG_MESSAGE, exc_info=True)

return agent_event_dict


async def wrap__execute_tool_call(wrapped, instance, args, kwargs):
transaction = current_transaction()
if not transaction:
return await wrapped(*args, **kwargs)

settings = transaction.settings or global_settings()
if not settings.ai_monitoring.enabled:
return await wrapped(*args, **kwargs)

# Framework metric also used for entity tagging in the UI
transaction.add_ml_model_info("Autogen", AUTOGEN_VERSION)
transaction._add_agent_attribute("llm", True)

tool_id = str(uuid.uuid4())
bound_args = bind_args(wrapped, args, kwargs)
tool_call_data = bound_args.get("tool_call")
tool_event_dict = _construct_base_tool_event_dict(bound_args, tool_call_data, tool_id, transaction, settings)

tool_name = getattr(tool_call_data, "name", "tool")

func_name = callable_name(wrapped)
ft = FunctionTrace(name=f"{func_name}/{tool_name}", group="Llm/tool/Autogen")
ft.__enter__()

try:
return_val = await wrapped(*args, **kwargs)
except Exception:
ft.notice_error(attributes={"tool_id": tool_id})
ft.__exit__(*sys.exc_info())
# If we hit an exception, append the error attribute and duration from the exited function trace
tool_event_dict.update({"duration": ft.duration * 1000, "error": True})
transaction.record_custom_event("LlmTool", tool_event_dict)
raise

ft.__exit__(None, None, None)
tool_event_dict.update({"duration": ft.duration * 1000})

# If the tool was executed successfully, we can grab the tool output from the result
tool_output = _extract_tool_output(return_val, tool_name)
if settings.ai_monitoring.record_content.enabled:
tool_event_dict.update({"output": tool_output})

transaction.record_custom_event("LlmTool", tool_event_dict)

return return_val


def instrument_autogen_agentchat_agents__assistant_agent(module):
if hasattr(module, "AssistantAgent"):
if hasattr(module.AssistantAgent, "on_messages_stream"):
wrap_function_wrapper(module, "AssistantAgent.on_messages_stream", wrap_on_messages_stream)
if hasattr(module.AssistantAgent, "_execute_tool_call"):
wrap_function_wrapper(module, "AssistantAgent._execute_tool_call", wrap__execute_tool_call)


def instrument_autogen_ext_tools_mcp__base(module):
if hasattr(module, "McpToolAdapter"):
if hasattr(module.McpToolAdapter, "from_server_params"):
wrap_function_wrapper(module, "McpToolAdapter.from_server_params", wrap_from_server_params)
18 changes: 15 additions & 3 deletions newrelic/hooks/mlmodel_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -442,6 +442,7 @@ def _handle_completion_success(transaction, linking_metadata, completion_id, kwa
# The function trace will be exited when in the final iteration of the response
# generator.
return_val._nr_ft = ft
return_val._nr_metadata = linking_metadata
return_val._nr_openai_attrs = getattr(return_val, "_nr_openai_attrs", {})
return_val._nr_openai_attrs["messages"] = kwargs.get("messages", [])
return_val._nr_openai_attrs["temperature"] = kwargs.get("temperature")
Expand Down Expand Up @@ -488,14 +489,20 @@ def _record_completion_success(transaction, linking_metadata, completion_id, kwa
choices[0].get("message") or {"content": choices[0].get("text"), "role": "assistant"}
]
finish_reason = choices[0].get("finish_reason")
if "tool_calls" in output_message_list[0] and not output_message_list[0].get("content"):
output_message_list = []
else:
response_model = kwargs.get("response.model")
response_id = kwargs.get("id")
output_message_list = []
finish_reason = None
finish_reason = kwargs.get("finish_reason")
if "content" in kwargs:
output_message_list = [{"content": kwargs.get("content"), "role": kwargs.get("role")}]
finish_reason = kwargs.get("finish_reason")
# When tools are involved, the content key may hold an empty string which we do not want to report
# In this case, the content we are interested in capturing will already be covered in the input_message_list
# We empty out the output_message_list so that we do not report an empty message
if "tool_call" in finish_reason and not kwargs.get("content"):
output_message_list = []
request_model = kwargs.get("model") or kwargs.get("engine")

request_id = response_headers.get("x-request-id")
Expand Down Expand Up @@ -765,7 +772,10 @@ def _record_stream_chunk(self, return_val):

def _record_events_on_stop_iteration(self, transaction):
if hasattr(self, "_nr_ft"):
linking_metadata = get_trace_linking_metadata()
# We first check for our saved linking metadata before making a new call to get_trace_linking_metadata
# Directly calling get_trace_linking_metadata() causes the incorrect span ID to be captured and associated with the LLM call
# This leads to incorrect linking of the LLM call in the UI
linking_metadata = self._nr_metadata or get_trace_linking_metadata()
self._nr_ft.__exit__(None, None, None)
try:
openai_attrs = getattr(self, "_nr_openai_attrs", {})
Expand Down Expand Up @@ -872,6 +882,8 @@ def set_attrs_on_generator_proxy(proxy, instance):
proxy._nr_response_headers = instance._nr_response_headers
if hasattr(instance, "_nr_openai_attrs"):
proxy._nr_openai_attrs = instance._nr_openai_attrs
if hasattr(instance, "_nr_metadata"):
proxy._nr_metadata = instance._nr_metadata


def wrap_engine_api_resource_create_sync(wrapped, instance, args, kwargs):
Expand Down
1 change: 1 addition & 0 deletions tests/adapter_mcp/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
"transaction_tracer.stack_trace_threshold": 0.0,
"debug.log_data_collector_payloads": True,
"debug.record_transaction_failure": True,
"ai_monitoring.enabled": True,
}

collector_agent_registration = collector_agent_registration_fixture(
Expand Down
Loading
Loading