Skip to content

Add reasoning content - fix on #494 #871

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions examples/reasoning_content/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
"""
Examples demonstrating how to use models that provide reasoning content.
"""
123 changes: 123 additions & 0 deletions examples/reasoning_content/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
"""
Example demonstrating how to use the reasoning content feature with models that support it.
Some models, like deepseek-reasoner, provide a reasoning_content field in addition to the regular content.
This example shows how to access and use this reasoning content from both streaming and non-streaming responses.
To run this example, you need to:
1. Set your OPENAI_API_KEY environment variable
2. Use a model that supports reasoning content (e.g., deepseek-reasoner)
"""

import asyncio
from typing import Any, cast

from agents import ModelSettings
from agents.models.interface import ModelTracing
from agents.models.openai_provider import OpenAIProvider
from agents.types import ResponseOutputRefusal, ResponseOutputText # type: ignore

# Replace this with a model that supports reasoning content (e.g., deepseek-reasoner)
# For demonstration purposes, we'll use a placeholder model name
MODEL_NAME = "deepseek-reasoner"


async def stream_with_reasoning_content():
"""
Example of streaming a response from a model that provides reasoning content.
The reasoning content will be emitted as separate events.
"""
provider = OpenAIProvider()
model = provider.get_model(MODEL_NAME)

print("\n=== Streaming Example ===")
print("Prompt: Write a haiku about recursion in programming")

reasoning_content = ""
regular_content = ""

async for event in model.stream_response(
system_instructions="You are a helpful assistant that writes creative content.",
input="Write a haiku about recursion in programming",
model_settings=ModelSettings(),
tools=[],
output_schema=None,
handoffs=[],
tracing=ModelTracing.DISABLED,
previous_response_id=None,
):
if event.type == "response.reasoning_summary_text.delta":
print(
f"\033[33m{event.delta}\033[0m", end="", flush=True
) # Yellow for reasoning content
reasoning_content += event.delta
elif event.type == "response.output_text.delta":
print(f"\033[32m{event.delta}\033[0m", end="", flush=True) # Green for regular content
regular_content += event.delta

print("\n\nReasoning Content:")
print(reasoning_content)
print("\nRegular Content:")
print(regular_content)
print("\n")


async def get_response_with_reasoning_content():
"""
Example of getting a complete response from a model that provides reasoning content.
The reasoning content will be available as a separate item in the response.
"""
provider = OpenAIProvider()
model = provider.get_model(MODEL_NAME)

print("\n=== Non-streaming Example ===")
print("Prompt: Explain the concept of recursion in programming")

response = await model.get_response(
system_instructions="You are a helpful assistant that explains technical concepts clearly.",
input="Explain the concept of recursion in programming",
model_settings=ModelSettings(),
tools=[],
output_schema=None,
handoffs=[],
tracing=ModelTracing.DISABLED,
previous_response_id=None,
)

# Extract reasoning content and regular content from the response
reasoning_content = None
regular_content = None

for item in response.output:
if hasattr(item, "type") and item.type == "reasoning":
reasoning_content = item.summary[0].text
elif hasattr(item, "type") and item.type == "message":
if item.content and len(item.content) > 0:
content_item = item.content[0]
if isinstance(content_item, ResponseOutputText):
regular_content = content_item.text
elif isinstance(content_item, ResponseOutputRefusal):
refusal_item = cast(Any, content_item)
regular_content = refusal_item.refusal

print("\nReasoning Content:")
print(reasoning_content or "No reasoning content provided")

print("\nRegular Content:")
print(regular_content or "No regular content provided")

print("\n")


async def main():
try:
await stream_with_reasoning_content()
await get_response_with_reasoning_content()
except Exception as e:
print(f"Error: {e}")
print("\nNote: This example requires a model that supports reasoning content.")
print("You may need to use a specific model like deepseek-reasoner or similar.")


if __name__ == "__main__":
asyncio.run(main())
89 changes: 89 additions & 0 deletions examples/reasoning_content/runner_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
"""
Example demonstrating how to use the reasoning content feature with the Runner API.
This example shows how to extract and use reasoning content from responses when using
the Runner API, which is the most common way users interact with the Agents library.
To run this example, you need to:
1. Set your OPENAI_API_KEY environment variable
2. Use a model that supports reasoning content (e.g., deepseek-reasoner)
"""

import asyncio
from typing import Any

from agents import Agent, Runner, trace
from agents.items import ReasoningItem

# Replace this with a model that supports reasoning content (e.g., deepseek-reasoner)
# For demonstration purposes, we'll use a placeholder model name
MODEL_NAME = "deepseek-reasoner"


async def main():
print(f"Using model: {MODEL_NAME}")

# Create an agent with a model that supports reasoning content
agent = Agent(
name="Reasoning Agent",
instructions="You are a helpful assistant that explains your reasoning step by step.",
model=MODEL_NAME,
)

# Example 1: Non-streaming response
with trace("Reasoning Content - Non-streaming"):
print("\n=== Example 1: Non-streaming response ===")
result = await Runner.run(
agent, "What is the square root of 841? Please explain your reasoning."
)

# Extract reasoning content from the result items
reasoning_content = None
# RunResult has 'response' attribute which has 'output' attribute
for item in result.response.output: # type: ignore
if isinstance(item, ReasoningItem):
reasoning_content = item.summary[0].text # type: ignore
break

print("\nReasoning Content:")
print(reasoning_content or "No reasoning content provided")

print("\nFinal Output:")
print(result.final_output)

# Example 2: Streaming response
with trace("Reasoning Content - Streaming"):
print("\n=== Example 2: Streaming response ===")
print("\nStreaming response:")

# Buffers to collect reasoning and regular content
reasoning_buffer = ""
content_buffer = ""

# RunResultStreaming is async iterable
stream = Runner.run_streamed(agent, "What is 15 × 27? Please explain your reasoning.")

async for event in stream: # type: ignore
if isinstance(event, ReasoningItem):
# This is reasoning content
reasoning_item: Any = event
reasoning_buffer += reasoning_item.summary[0].text
print(
f"\033[33m{reasoning_item.summary[0].text}\033[0m", end="", flush=True
) # Yellow for reasoning
elif hasattr(event, "text"):
# This is regular content
content_buffer += event.text
print(
f"\033[32m{event.text}\033[0m", end="", flush=True
) # Green for regular content

print("\n\nCollected Reasoning Content:")
print(reasoning_buffer)

print("\nCollected Final Answer:")
print(content_buffer)


if __name__ == "__main__":
asyncio.run(main())
12 changes: 12 additions & 0 deletions src/agents/models/chatcmpl_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,10 @@
ResponseOutputMessageParam,
ResponseOutputRefusal,
ResponseOutputText,
ResponseReasoningItem,
)
from openai.types.responses.response_input_param import FunctionCallOutput, ItemReference, Message
from openai.types.responses.response_reasoning_item import Summary

from ..agent_output import AgentOutputSchemaBase
from ..exceptions import AgentsException, UserError
Expand Down Expand Up @@ -85,6 +87,16 @@ def convert_response_format(
def message_to_output_items(cls, message: ChatCompletionMessage) -> list[TResponseOutputItem]:
items: list[TResponseOutputItem] = []

# Handle reasoning content if available
if hasattr(message, "reasoning_content") and message.reasoning_content:
items.append(
ResponseReasoningItem(
id=FAKE_RESPONSES_ID,
summary=[Summary(text=message.reasoning_content, type="summary_text")],
type="reasoning",
)
)

message_item = ResponseOutputMessage(
id=FAKE_RESPONSES_ID,
content=[],
Expand Down
Loading