Skip to content

Commit 706c89b

Browse files
authored
Changes to adapt to openai version 0.2.11 (#1092)
* Changes to adapt to openai version 0.2.11 * Upper bound openai to 0.3
1 parent 101d89d commit 706c89b

File tree

6 files changed

+22
-24
lines changed

6 files changed

+22
-24
lines changed

pyproject.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ opentelemetry = [
2626
]
2727
pydantic = ["pydantic>=2.0.0,<3"]
2828
openai-agents = [
29-
"openai-agents>=0.2.3,<=0.2.9", # 0.2.10 doesn't work: https://github.com/openai/openai-agents-python/issues/1639
29+
"openai-agents>=0.2.11,<0.3",
3030
"eval-type-backport>=0.2.2; python_version < '3.10'"
3131
]
3232

@@ -57,7 +57,7 @@ dev = [
5757
"pytest-cov>=6.1.1",
5858
"httpx>=0.28.1",
5959
"pytest-pretty>=1.3.0",
60-
"openai-agents[litellm]>=0.2.3,<=0.2.9", # 0.2.10 doesn't work: https://github.com/openai/openai-agents-python/issues/1639
60+
"openai-agents[litellm]>=0.2.11,<0.3"
6161
]
6262

6363
[tool.poe.tasks]

temporalio/contrib/openai_agents/_invoke_model_activity.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -144,6 +144,7 @@ class ActivityModelInput(TypedDict, total=False):
144144
handoffs: list[HandoffInput]
145145
tracing: Required[ModelTracingInput]
146146
previous_response_id: Optional[str]
147+
conversation_id: Optional[str]
147148
prompt: Optional[Any]
148149

149150

@@ -226,6 +227,7 @@ def make_tool(tool: ToolInput) -> Tool:
226227
handoffs=handoffs,
227228
tracing=ModelTracing(input["tracing"]),
228229
previous_response_id=input.get("previous_response_id"),
230+
conversation_id=input.get("conversation_id"),
229231
prompt=input.get("prompt"),
230232
)
231233
except APIStatusError as e:

temporalio/contrib/openai_agents/_temporal_model_stub.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@ async def get_response(
6868
tracing: ModelTracing,
6969
*,
7070
previous_response_id: Optional[str],
71+
conversation_id: Optional[str],
7172
prompt: Optional[ResponsePromptParam],
7273
) -> ModelResponse:
7374
def make_tool_info(tool: Tool) -> ToolInput:
@@ -134,6 +135,7 @@ def make_tool_info(tool: Tool) -> ToolInput:
134135
handoffs=handoff_infos,
135136
tracing=ModelTracingInput(tracing.value),
136137
previous_response_id=previous_response_id,
138+
conversation_id=conversation_id,
137139
prompt=prompt,
138140
)
139141

@@ -178,6 +180,7 @@ def stream_response(
178180
tracing: ModelTracing,
179181
*,
180182
previous_response_id: Optional[str],
183+
conversation_id: Optional[str],
181184
prompt: ResponsePromptParam | None,
182185
) -> AsyncIterator[TResponseStreamEvent]:
183186
raise NotImplementedError("Temporal model doesn't support streams yet")

temporalio/contrib/openai_agents/_temporal_openai_agents.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -126,9 +126,7 @@ async def get_response(
126126
output_schema: Union[AgentOutputSchemaBase, None],
127127
handoffs: list[Handoff],
128128
tracing: ModelTracing,
129-
*,
130-
previous_response_id: Union[str, None],
131-
prompt: Union[ResponsePromptParam, None] = None,
129+
**kwargs,
132130
) -> ModelResponse:
133131
"""Get a response from the model."""
134132
return self.fn()
@@ -142,9 +140,7 @@ def stream_response(
142140
output_schema: Optional[AgentOutputSchemaBase],
143141
handoffs: list[Handoff],
144142
tracing: ModelTracing,
145-
*,
146-
previous_response_id: Optional[str],
147-
prompt: Optional[ResponsePromptParam],
143+
**kwargs,
148144
) -> AsyncIterator[TResponseStreamEvent]:
149145
"""Get a streamed response from the model. Unimplemented."""
150146
raise NotImplementedError()

tests/contrib/openai_agents/test_openai.py

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1164,8 +1164,9 @@ async def get_response(
11641164
output_schema: Union[AgentOutputSchemaBase, None],
11651165
handoffs: list[Handoff],
11661166
tracing: ModelTracing,
1167-
previous_response_id: Union[str, None],
1168-
prompt: Union[ResponsePromptParam, None] = None,
1167+
previous_response_id: Optional[str] = None,
1168+
conversation_id: Optional[str] = None,
1169+
prompt: Optional[ResponsePromptParam] = None,
11691170
) -> ModelResponse:
11701171
if (
11711172
system_instructions
@@ -1553,9 +1554,7 @@ async def get_response(
15531554
output_schema: Union[AgentOutputSchemaBase, None],
15541555
handoffs: list[Handoff],
15551556
tracing: ModelTracing,
1556-
*,
1557-
previous_response_id: Union[str, None],
1558-
prompt: Union[ResponsePromptParam, None] = None,
1557+
**kwargs,
15591558
) -> ModelResponse:
15601559
activity.logger.info("Waiting")
15611560
await asyncio.sleep(1.0)
@@ -1571,9 +1570,7 @@ def stream_response(
15711570
output_schema: Optional[AgentOutputSchemaBase],
15721571
handoffs: list[Handoff],
15731572
tracing: ModelTracing,
1574-
*,
1575-
previous_response_id: Optional[str],
1576-
prompt: Optional[ResponsePromptParam],
1573+
**kwargs,
15771574
) -> AsyncIterator[TResponseStreamEvent]:
15781575
raise NotImplementedError()
15791576

uv.lock

Lines changed: 8 additions & 8 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)