Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
62 changes: 49 additions & 13 deletions src/praisonai-agents/praisonaiagents/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -722,6 +722,33 @@ def _build_messages(self, prompt, system_prompt=None, chat_history=None, output_
if schema_model and hasattr(schema_model, 'model_json_schema'):
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(schema_model.model_json_schema())}"

# For XML format models, add tool descriptions to system prompt
if tools and self._supports_xml_tool_format():
system_prompt += "\n\nYou have access to the following tools:"
for tool in tools:
if isinstance(tool, dict) and 'function' in tool:
func = tool['function']
name = func.get('name', 'unknown')
description = func.get('description', 'No description available')

# Add parameter information if available
params = func.get('parameters', {}).get('properties', {})
required = func.get('parameters', {}).get('required', [])

param_info = ""
if params:
param_list = []
for param_name, param_details in params.items():
param_type = param_details.get('type', 'any')
is_required = param_name in required
param_list.append(f"{param_name} ({param_type}){'*' if is_required else ''}")
param_info = f" - Parameters: {', '.join(param_list)}"

system_prompt += f"\n- {name}: {description}{param_info}"

system_prompt += "\n\nWhen you need to use a tool, wrap your tool call in XML tags like this:"
system_prompt += "\n<tool_call>\n{\"name\": \"tool_name\", \"arguments\": {\"param\": \"value\"}}\n</tool_call>"

# Skip system messages for legacy o1 models as they don't support them
if not self._needs_system_message_skip():
messages.append({"role": "system", "content": system_prompt})
Expand Down Expand Up @@ -3155,19 +3182,28 @@ def _build_completion_params(self, **override_params) -> Dict[str, Any]:

logging.debug(f"Using Gemini native structured output with schema: {json.dumps(schema, indent=2)}")

# Add tool_choice="auto" when tools are provided (unless already specified)
if 'tools' in params and params['tools'] and 'tool_choice' not in params:
# For Gemini models, use tool_choice to encourage tool usage
if self._is_gemini_model():
try:
import litellm
# Check if model supports function calling before setting tool_choice
if litellm.supports_function_calling(model=self.model):
params['tool_choice'] = 'auto'
except Exception as e:
# If check fails, still set tool_choice for known Gemini models
logging.debug(f"Could not verify function calling support: {e}. Setting tool_choice anyway.")
params['tool_choice'] = 'auto'
# Handle XML format models (like Qwen) differently for tool calls
if 'tools' in params and params['tools']:
if self._supports_xml_tool_format():
# For XML format models, remove tools parameter to avoid OpenRouter routing issues
# Tools will be described in the system prompt instead
logging.debug("Removing tools parameter for XML format model to avoid provider routing issues")
params.pop('tools', None)
params.pop('tool_choice', None)
else:
# Add tool_choice="auto" when tools are provided (unless already specified)
if 'tool_choice' not in params:
# For Gemini models, use tool_choice to encourage tool usage
if self._is_gemini_model():
try:
import litellm
# Check if model supports function calling before setting tool_choice
if litellm.supports_function_calling(model=self.model):
params['tool_choice'] = 'auto'
except Exception as e:
# If check fails, still set tool_choice for known Gemini models
logging.debug(f"Could not verify function calling support: {e}. Setting tool_choice anyway.")
params['tool_choice'] = 'auto'

return params

Expand Down
87 changes: 87 additions & 0 deletions test_openrouter_xml_fix.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
#!/usr/bin/env python3
"""Test script for OpenRouter XML tool call fix"""

import os
import sys

# Add the source directory to the path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src', 'praisonai-agents'))

from praisonaiagents import Agent

def get_weather(city: str) -> str:
"""Get weather information for a city"""
return f"The weather in {city} is sunny with 22°C"

def main():
print("Testing OpenRouter XML tool call fix...")

# Test with auto-detection (should detect Qwen as XML format)
agent = Agent(
instructions="You are a helpful assistant",
llm="openrouter/qwen/qwen-2.5-7b-instruct",
tools=[get_weather],
verbose=True
)

print("Created agent with Qwen model...")

# Get the LLM instance directly from the agent
llm_instance = agent.llm_instance # This should be the LLM object
print(f"XML tool format supported: {llm_instance._supports_xml_tool_format()}")

# Test the tool call without actually making API request
# We'll just verify the parameters are built correctly
test_tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get weather information for a city",
"parameters": {
"type": "object",
"properties": {
"city": {
"type": "string",
"description": "The city name"
}
},
"required": ["city"]
}
}
}
]

# Test _build_completion_params
params = llm_instance._build_completion_params(
messages=[{"role": "user", "content": "What's the weather in Tokyo?"}],
tools=test_tools,
temperature=0.2
)

print("\n=== Completion Parameters ===")
print(f"Model: {params.get('model')}")
print(f"Tools included: {'tools' in params}")
print(f"Tool choice included: {'tool_choice' in params}")

# Test _build_messages
messages, original = llm_instance._build_messages(
prompt="What's the weather in Tokyo?",
system_prompt="You are a helpful assistant",
tools=test_tools
)

print("\n=== System Message ===")
for msg in messages:
if msg['role'] == 'system':
print(msg['content'])
break

print("\n✅ Test completed successfully!")
print("Key improvements:")
print("- Tools parameter is removed for XML format models")
print("- Tool descriptions are added to system prompt")
print("- XML tool call format instructions are included")

if __name__ == "__main__":
main()
Comment on lines +16 to +87
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

The current test script relies on manual inspection of the output. Convert it into an automated test with assertions to prevent future regressions and facilitate integration with testing frameworks like pytest.

Here's a suggested structure for an automated test:

import pytest
from praisonaiagents import Agent

def test_openrouter_xml_fix():
    agent = Agent(
        instructions="You are a helpful assistant",
        llm="openrouter/qwen/qwen-2.5-7b-instruct",
        tools=[lambda city: f"The weather in {city} is sunny with 22°C"],
        verbose=False  # Set verbose to False to avoid printing during tests
    )

    llm_instance = agent.llm_instance
    assert llm_instance._supports_xml_tool_format(), "Qwen model should be detected as supporting XML tool format"

    test_tools = [
        {
            "type": "function",
            "function": {
                "name": "get_weather",
                "description": "Get weather information for a city",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "city": {
                            "type": "string",
                            "description": "The city name"
                        }
                    },
                    "required": ["city"]
                }
            }
        }
    ]

    params = llm_instance._build_completion_params(
        messages=[{"role": "user", "content": "What's the weather in Tokyo?"}],
        tools=test_tools,
        temperature=0.2
    )

    assert 'tools' not in params, "Tools parameter should be removed for XML models"
    assert 'tool_choice' not in params, "Tool choice parameter should be removed for XML models"

    messages, _ = llm_instance._build_messages(
        prompt="What's the weather in Tokyo?",
        system_prompt="You are a helpful assistant",
        tools=test_tools
    )

    system_message = next((m['content'] for m in messages if m['role'] == 'system'), None)

    assert system_message is not None, "System message should be present"
    assert "You have access to the following tools:" in system_message
    assert "- get_weather: Get weather information for a city" in system_message
    assert "<tool_call>" in system_message