Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 11 additions & 1 deletion examples/model_providers/litellm_auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,20 +2,28 @@

import asyncio

from agents import Agent, Runner, function_tool, set_tracing_disabled
from pydantic import BaseModel

from agents import Agent, ModelSettings, Runner, function_tool, set_tracing_disabled

"""This example uses the built-in support for LiteLLM. To use this, ensure you have the
ANTHROPIC_API_KEY environment variable set.
"""

set_tracing_disabled(disabled=True)

# import logging
# logging.basicConfig(level=logging.DEBUG)

@function_tool
def get_weather(city: str):
print(f"[debug] getting weather for {city}")
return f"The weather in {city} is sunny."

class Result(BaseModel):
output_text: str
tool_results: list[str]


async def main():
agent = Agent(
Expand All @@ -24,6 +32,8 @@ async def main():
# We prefix with litellm/ to tell the Runner to use the LitellmModel
model="litellm/anthropic/claude-3-5-sonnet-20240620",
tools=[get_weather],
model_settings=ModelSettings(tool_choice="required"),
output_type=Result,
)

result = await Runner.run(agent, "What's the weather in Tokyo?")
Expand Down
46 changes: 40 additions & 6 deletions src/agents/_run_impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -509,13 +509,29 @@ def process_model_response(
# Regular function tool call
else:
if output.name not in function_map:
_error_tracing.attach_error_to_current_span(
SpanError(
message="Tool not found",
data={"tool_name": output.name},
if output_schema is not None and output.name == "json_tool_call":
# LiteLLM could generate non-existent tool calls for structured outputs
items.append(ToolCallItem(raw_item=output, agent=agent))
functions.append(
ToolRunFunction(
tool_call=output,
# this tool does not exist in function_map, so generate ad-hoc one,
# which just parses the input if it's a string, and returns the
# value otherwise
function_tool=_build_litellm_json_tool_call(output),
)
Comment on lines 509 to +522

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[P1] json_tool_call result never returned as final output

The new branch allows a LiteLLM json_tool_call to execute by synthesizing a dummy FunctionTool, but nothing marks that tool result as the final agent output. With the default tool_use_behavior="run_llm_again" (see _check_for_final_output_from_tools), the runner will always invoke the model again after the synthetic tool runs. LiteLLM treats json_tool_call itself as the structured response and does not send a follow‑up message, so the agent re-enters the loop until it hits MaxTurnsExceeded and the parsed JSON is never surfaced to the caller. Consider short‑circuiting to a final output when this special tool is encountered (or automatically switching the tool use behavior) so the structured output can be returned instead of triggering another model turn.

Useful? React with 👍 / 👎.

)
)
raise ModelBehaviorError(f"Tool {output.name} not found in agent {agent.name}")
continue
else:
_error_tracing.attach_error_to_current_span(
SpanError(
message="Tool not found",
data={"tool_name": output.name},
)
)
error = f"Tool {output.name} not found in agent {agent.name}"
raise ModelBehaviorError(error)

items.append(ToolCallItem(raw_item=output, agent=agent))
functions.append(
ToolRunFunction(
Expand Down Expand Up @@ -1193,3 +1209,21 @@ async def execute(
# "id": "out" + call.tool_call.id, # TODO remove this, it should be optional
},
)


def _build_litellm_json_tool_call(output: ResponseFunctionToolCall) -> FunctionTool:
async def on_invoke_tool(_ctx: ToolContext[Any], value: Any) -> Any:
if isinstance(value, str):
import json

return json.loads(value)
return value

return FunctionTool(
name=output.name,
description=output.name,
params_json_schema={},
on_invoke_tool=on_invoke_tool,
strict_json_schema=True,
is_enabled=True,
)