Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 80de53e

Browse files
authoredApr 15, 2025
Extract chat completions conversion code into helper (openai#522)
Small refactor for rest of stack. --- [//]: # (BEGIN SAPLING FOOTER) * openai#524 * openai#523 * __->__ openai#522
1 parent ce1abe6 commit 80de53e

File tree

5 files changed

+530
-511
lines changed

5 files changed

+530
-511
lines changed
 
Lines changed: 466 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,466 @@
1+
from __future__ import annotations
2+
3+
import json
4+
from collections.abc import Iterable
5+
from typing import Any, Literal, cast
6+
7+
from openai import NOT_GIVEN, NotGiven
8+
from openai.types.chat import (
9+
ChatCompletionAssistantMessageParam,
10+
ChatCompletionContentPartImageParam,
11+
ChatCompletionContentPartParam,
12+
ChatCompletionContentPartTextParam,
13+
ChatCompletionDeveloperMessageParam,
14+
ChatCompletionMessage,
15+
ChatCompletionMessageParam,
16+
ChatCompletionMessageToolCallParam,
17+
ChatCompletionSystemMessageParam,
18+
ChatCompletionToolChoiceOptionParam,
19+
ChatCompletionToolMessageParam,
20+
ChatCompletionUserMessageParam,
21+
)
22+
from openai.types.chat.chat_completion_tool_param import ChatCompletionToolParam
23+
from openai.types.chat.completion_create_params import ResponseFormat
24+
from openai.types.responses import (
25+
EasyInputMessageParam,
26+
ResponseFileSearchToolCallParam,
27+
ResponseFunctionToolCall,
28+
ResponseFunctionToolCallParam,
29+
ResponseInputContentParam,
30+
ResponseInputImageParam,
31+
ResponseInputTextParam,
32+
ResponseOutputMessage,
33+
ResponseOutputMessageParam,
34+
ResponseOutputRefusal,
35+
ResponseOutputText,
36+
)
37+
from openai.types.responses.response_input_param import FunctionCallOutput, ItemReference, Message
38+
39+
from ..agent_output import AgentOutputSchema
40+
from ..exceptions import AgentsException, UserError
41+
from ..handoffs import Handoff
42+
from ..items import TResponseInputItem, TResponseOutputItem
43+
from ..tool import FunctionTool, Tool
44+
from .fake_id import FAKE_RESPONSES_ID
45+
46+
47+
class Converter:
48+
@classmethod
49+
def convert_tool_choice(
50+
cls, tool_choice: Literal["auto", "required", "none"] | str | None
51+
) -> ChatCompletionToolChoiceOptionParam | NotGiven:
52+
if tool_choice is None:
53+
return NOT_GIVEN
54+
elif tool_choice == "auto":
55+
return "auto"
56+
elif tool_choice == "required":
57+
return "required"
58+
elif tool_choice == "none":
59+
return "none"
60+
else:
61+
return {
62+
"type": "function",
63+
"function": {
64+
"name": tool_choice,
65+
},
66+
}
67+
68+
@classmethod
69+
def convert_response_format(
70+
cls, final_output_schema: AgentOutputSchema | None
71+
) -> ResponseFormat | NotGiven:
72+
if not final_output_schema or final_output_schema.is_plain_text():
73+
return NOT_GIVEN
74+
75+
return {
76+
"type": "json_schema",
77+
"json_schema": {
78+
"name": "final_output",
79+
"strict": final_output_schema.strict_json_schema,
80+
"schema": final_output_schema.json_schema(),
81+
},
82+
}
83+
84+
@classmethod
85+
def message_to_output_items(cls, message: ChatCompletionMessage) -> list[TResponseOutputItem]:
86+
items: list[TResponseOutputItem] = []
87+
88+
message_item = ResponseOutputMessage(
89+
id=FAKE_RESPONSES_ID,
90+
content=[],
91+
role="assistant",
92+
type="message",
93+
status="completed",
94+
)
95+
if message.content:
96+
message_item.content.append(
97+
ResponseOutputText(text=message.content, type="output_text", annotations=[])
98+
)
99+
if message.refusal:
100+
message_item.content.append(
101+
ResponseOutputRefusal(refusal=message.refusal, type="refusal")
102+
)
103+
if message.audio:
104+
raise AgentsException("Audio is not currently supported")
105+
106+
if message_item.content:
107+
items.append(message_item)
108+
109+
if message.tool_calls:
110+
for tool_call in message.tool_calls:
111+
items.append(
112+
ResponseFunctionToolCall(
113+
id=FAKE_RESPONSES_ID,
114+
call_id=tool_call.id,
115+
arguments=tool_call.function.arguments,
116+
name=tool_call.function.name,
117+
type="function_call",
118+
)
119+
)
120+
121+
return items
122+
123+
@classmethod
124+
def maybe_easy_input_message(cls, item: Any) -> EasyInputMessageParam | None:
125+
if not isinstance(item, dict):
126+
return None
127+
128+
keys = item.keys()
129+
# EasyInputMessageParam only has these two keys
130+
if keys != {"content", "role"}:
131+
return None
132+
133+
role = item.get("role", None)
134+
if role not in ("user", "assistant", "system", "developer"):
135+
return None
136+
137+
if "content" not in item:
138+
return None
139+
140+
return cast(EasyInputMessageParam, item)
141+
142+
@classmethod
143+
def maybe_input_message(cls, item: Any) -> Message | None:
144+
if (
145+
isinstance(item, dict)
146+
and item.get("type") == "message"
147+
and item.get("role")
148+
in (
149+
"user",
150+
"system",
151+
"developer",
152+
)
153+
):
154+
return cast(Message, item)
155+
156+
return None
157+
158+
@classmethod
159+
def maybe_file_search_call(cls, item: Any) -> ResponseFileSearchToolCallParam | None:
160+
if isinstance(item, dict) and item.get("type") == "file_search_call":
161+
return cast(ResponseFileSearchToolCallParam, item)
162+
return None
163+
164+
@classmethod
165+
def maybe_function_tool_call(cls, item: Any) -> ResponseFunctionToolCallParam | None:
166+
if isinstance(item, dict) and item.get("type") == "function_call":
167+
return cast(ResponseFunctionToolCallParam, item)
168+
return None
169+
170+
@classmethod
171+
def maybe_function_tool_call_output(
172+
cls,
173+
item: Any,
174+
) -> FunctionCallOutput | None:
175+
if isinstance(item, dict) and item.get("type") == "function_call_output":
176+
return cast(FunctionCallOutput, item)
177+
return None
178+
179+
@classmethod
180+
def maybe_item_reference(cls, item: Any) -> ItemReference | None:
181+
if isinstance(item, dict) and item.get("type") == "item_reference":
182+
return cast(ItemReference, item)
183+
return None
184+
185+
@classmethod
186+
def maybe_response_output_message(cls, item: Any) -> ResponseOutputMessageParam | None:
187+
# ResponseOutputMessage is only used for messages with role assistant
188+
if (
189+
isinstance(item, dict)
190+
and item.get("type") == "message"
191+
and item.get("role") == "assistant"
192+
):
193+
return cast(ResponseOutputMessageParam, item)
194+
return None
195+
196+
@classmethod
197+
def extract_text_content(
198+
cls, content: str | Iterable[ResponseInputContentParam]
199+
) -> str | list[ChatCompletionContentPartTextParam]:
200+
all_content = cls.extract_all_content(content)
201+
if isinstance(all_content, str):
202+
return all_content
203+
out: list[ChatCompletionContentPartTextParam] = []
204+
for c in all_content:
205+
if c.get("type") == "text":
206+
out.append(cast(ChatCompletionContentPartTextParam, c))
207+
return out
208+
209+
@classmethod
210+
def extract_all_content(
211+
cls, content: str | Iterable[ResponseInputContentParam]
212+
) -> str | list[ChatCompletionContentPartParam]:
213+
if isinstance(content, str):
214+
return content
215+
out: list[ChatCompletionContentPartParam] = []
216+
217+
for c in content:
218+
if isinstance(c, dict) and c.get("type") == "input_text":
219+
casted_text_param = cast(ResponseInputTextParam, c)
220+
out.append(
221+
ChatCompletionContentPartTextParam(
222+
type="text",
223+
text=casted_text_param["text"],
224+
)
225+
)
226+
elif isinstance(c, dict) and c.get("type") == "input_image":
227+
casted_image_param = cast(ResponseInputImageParam, c)
228+
if "image_url" not in casted_image_param or not casted_image_param["image_url"]:
229+
raise UserError(
230+
f"Only image URLs are supported for input_image {casted_image_param}"
231+
)
232+
out.append(
233+
ChatCompletionContentPartImageParam(
234+
type="image_url",
235+
image_url={
236+
"url": casted_image_param["image_url"],
237+
"detail": casted_image_param["detail"],
238+
},
239+
)
240+
)
241+
elif isinstance(c, dict) and c.get("type") == "input_file":
242+
raise UserError(f"File uploads are not supported for chat completions {c}")
243+
else:
244+
raise UserError(f"Unknown content: {c}")
245+
return out
246+
247+
@classmethod
248+
def items_to_messages(
249+
cls,
250+
items: str | Iterable[TResponseInputItem],
251+
) -> list[ChatCompletionMessageParam]:
252+
"""
253+
Convert a sequence of 'Item' objects into a list of ChatCompletionMessageParam.
254+
255+
Rules:
256+
- EasyInputMessage or InputMessage (role=user) => ChatCompletionUserMessageParam
257+
- EasyInputMessage or InputMessage (role=system) => ChatCompletionSystemMessageParam
258+
- EasyInputMessage or InputMessage (role=developer) => ChatCompletionDeveloperMessageParam
259+
- InputMessage (role=assistant) => Start or flush a ChatCompletionAssistantMessageParam
260+
- response_output_message => Also produces/flushes a ChatCompletionAssistantMessageParam
261+
- tool calls get attached to the *current* assistant message, or create one if none.
262+
- tool outputs => ChatCompletionToolMessageParam
263+
"""
264+
265+
if isinstance(items, str):
266+
return [
267+
ChatCompletionUserMessageParam(
268+
role="user",
269+
content=items,
270+
)
271+
]
272+
273+
result: list[ChatCompletionMessageParam] = []
274+
current_assistant_msg: ChatCompletionAssistantMessageParam | None = None
275+
276+
def flush_assistant_message() -> None:
277+
nonlocal current_assistant_msg
278+
if current_assistant_msg is not None:
279+
# The API doesn't support empty arrays for tool_calls
280+
if not current_assistant_msg.get("tool_calls"):
281+
del current_assistant_msg["tool_calls"]
282+
result.append(current_assistant_msg)
283+
current_assistant_msg = None
284+
285+
def ensure_assistant_message() -> ChatCompletionAssistantMessageParam:
286+
nonlocal current_assistant_msg
287+
if current_assistant_msg is None:
288+
current_assistant_msg = ChatCompletionAssistantMessageParam(role="assistant")
289+
current_assistant_msg["tool_calls"] = []
290+
return current_assistant_msg
291+
292+
for item in items:
293+
# 1) Check easy input message
294+
if easy_msg := cls.maybe_easy_input_message(item):
295+
role = easy_msg["role"]
296+
content = easy_msg["content"]
297+
298+
if role == "user":
299+
flush_assistant_message()
300+
msg_user: ChatCompletionUserMessageParam = {
301+
"role": "user",
302+
"content": cls.extract_all_content(content),
303+
}
304+
result.append(msg_user)
305+
elif role == "system":
306+
flush_assistant_message()
307+
msg_system: ChatCompletionSystemMessageParam = {
308+
"role": "system",
309+
"content": cls.extract_text_content(content),
310+
}
311+
result.append(msg_system)
312+
elif role == "developer":
313+
flush_assistant_message()
314+
msg_developer: ChatCompletionDeveloperMessageParam = {
315+
"role": "developer",
316+
"content": cls.extract_text_content(content),
317+
}
318+
result.append(msg_developer)
319+
elif role == "assistant":
320+
flush_assistant_message()
321+
msg_assistant: ChatCompletionAssistantMessageParam = {
322+
"role": "assistant",
323+
"content": cls.extract_text_content(content),
324+
}
325+
result.append(msg_assistant)
326+
else:
327+
raise UserError(f"Unexpected role in easy_input_message: {role}")
328+
329+
# 2) Check input message
330+
elif in_msg := cls.maybe_input_message(item):
331+
role = in_msg["role"]
332+
content = in_msg["content"]
333+
flush_assistant_message()
334+
335+
if role == "user":
336+
msg_user = {
337+
"role": "user",
338+
"content": cls.extract_all_content(content),
339+
}
340+
result.append(msg_user)
341+
elif role == "system":
342+
msg_system = {
343+
"role": "system",
344+
"content": cls.extract_text_content(content),
345+
}
346+
result.append(msg_system)
347+
elif role == "developer":
348+
msg_developer = {
349+
"role": "developer",
350+
"content": cls.extract_text_content(content),
351+
}
352+
result.append(msg_developer)
353+
else:
354+
raise UserError(f"Unexpected role in input_message: {role}")
355+
356+
# 3) response output message => assistant
357+
elif resp_msg := cls.maybe_response_output_message(item):
358+
flush_assistant_message()
359+
new_asst = ChatCompletionAssistantMessageParam(role="assistant")
360+
contents = resp_msg["content"]
361+
362+
text_segments = []
363+
for c in contents:
364+
if c["type"] == "output_text":
365+
text_segments.append(c["text"])
366+
elif c["type"] == "refusal":
367+
new_asst["refusal"] = c["refusal"]
368+
elif c["type"] == "output_audio":
369+
# Can't handle this, b/c chat completions expects an ID which we dont have
370+
raise UserError(
371+
f"Only audio IDs are supported for chat completions, but got: {c}"
372+
)
373+
else:
374+
raise UserError(f"Unknown content type in ResponseOutputMessage: {c}")
375+
376+
if text_segments:
377+
combined = "\n".join(text_segments)
378+
new_asst["content"] = combined
379+
380+
new_asst["tool_calls"] = []
381+
current_assistant_msg = new_asst
382+
383+
# 4) function/file-search calls => attach to assistant
384+
elif file_search := cls.maybe_file_search_call(item):
385+
asst = ensure_assistant_message()
386+
tool_calls = list(asst.get("tool_calls", []))
387+
new_tool_call = ChatCompletionMessageToolCallParam(
388+
id=file_search["id"],
389+
type="function",
390+
function={
391+
"name": "file_search_call",
392+
"arguments": json.dumps(
393+
{
394+
"queries": file_search.get("queries", []),
395+
"status": file_search.get("status"),
396+
}
397+
),
398+
},
399+
)
400+
tool_calls.append(new_tool_call)
401+
asst["tool_calls"] = tool_calls
402+
403+
elif func_call := cls.maybe_function_tool_call(item):
404+
asst = ensure_assistant_message()
405+
tool_calls = list(asst.get("tool_calls", []))
406+
arguments = func_call["arguments"] if func_call["arguments"] else "{}"
407+
new_tool_call = ChatCompletionMessageToolCallParam(
408+
id=func_call["call_id"],
409+
type="function",
410+
function={
411+
"name": func_call["name"],
412+
"arguments": arguments,
413+
},
414+
)
415+
tool_calls.append(new_tool_call)
416+
asst["tool_calls"] = tool_calls
417+
# 5) function call output => tool message
418+
elif func_output := cls.maybe_function_tool_call_output(item):
419+
flush_assistant_message()
420+
msg: ChatCompletionToolMessageParam = {
421+
"role": "tool",
422+
"tool_call_id": func_output["call_id"],
423+
"content": func_output["output"],
424+
}
425+
result.append(msg)
426+
427+
# 6) item reference => handle or raise
428+
elif item_ref := cls.maybe_item_reference(item):
429+
raise UserError(
430+
f"Encountered an item_reference, which is not supported: {item_ref}"
431+
)
432+
433+
# 7) If we haven't recognized it => fail or ignore
434+
else:
435+
raise UserError(f"Unhandled item type or structure: {item}")
436+
437+
flush_assistant_message()
438+
return result
439+
440+
@classmethod
441+
def tool_to_openai(cls, tool: Tool) -> ChatCompletionToolParam:
442+
if isinstance(tool, FunctionTool):
443+
return {
444+
"type": "function",
445+
"function": {
446+
"name": tool.name,
447+
"description": tool.description or "",
448+
"parameters": tool.params_json_schema,
449+
},
450+
}
451+
452+
raise UserError(
453+
f"Hosted tools are not supported with the ChatCompletions API. Got tool type: "
454+
f"{type(tool)}, tool: {tool}"
455+
)
456+
457+
@classmethod
458+
def convert_handoff_tool(cls, handoff: Handoff[Any]) -> ChatCompletionToolParam:
459+
return {
460+
"type": "function",
461+
"function": {
462+
"name": handoff.tool_name,
463+
"description": handoff.tool_description,
464+
"parameters": handoff.input_json_schema,
465+
},
466+
}

‎src/agents/models/openai_chatcompletions.py

Lines changed: 15 additions & 462 deletions
Original file line numberDiff line numberDiff line change
@@ -3,71 +3,46 @@
33
import dataclasses
44
import json
55
import time
6-
from collections.abc import AsyncIterator, Iterable
6+
from collections.abc import AsyncIterator
77
from dataclasses import dataclass, field
88
from typing import TYPE_CHECKING, Any, Literal, cast, overload
99

10-
from openai import NOT_GIVEN, AsyncOpenAI, AsyncStream, NotGiven
10+
from openai import NOT_GIVEN, AsyncOpenAI, AsyncStream
1111
from openai.types import ChatModel
12-
from openai.types.chat import (
13-
ChatCompletion,
14-
ChatCompletionAssistantMessageParam,
15-
ChatCompletionChunk,
16-
ChatCompletionContentPartImageParam,
17-
ChatCompletionContentPartParam,
18-
ChatCompletionContentPartTextParam,
19-
ChatCompletionDeveloperMessageParam,
20-
ChatCompletionMessage,
21-
ChatCompletionMessageParam,
22-
ChatCompletionMessageToolCallParam,
23-
ChatCompletionSystemMessageParam,
24-
ChatCompletionToolChoiceOptionParam,
25-
ChatCompletionToolMessageParam,
26-
ChatCompletionUserMessageParam,
27-
)
28-
from openai.types.chat.chat_completion_tool_param import ChatCompletionToolParam
29-
from openai.types.chat.completion_create_params import ResponseFormat
12+
from openai.types.chat import ChatCompletion, ChatCompletionChunk
3013
from openai.types.completion_usage import CompletionUsage
3114
from openai.types.responses import (
32-
EasyInputMessageParam,
3315
Response,
3416
ResponseCompletedEvent,
3517
ResponseContentPartAddedEvent,
3618
ResponseContentPartDoneEvent,
3719
ResponseCreatedEvent,
38-
ResponseFileSearchToolCallParam,
3920
ResponseFunctionCallArgumentsDeltaEvent,
4021
ResponseFunctionToolCall,
41-
ResponseFunctionToolCallParam,
42-
ResponseInputContentParam,
43-
ResponseInputImageParam,
44-
ResponseInputTextParam,
4522
ResponseOutputItem,
4623
ResponseOutputItemAddedEvent,
4724
ResponseOutputItemDoneEvent,
4825
ResponseOutputMessage,
49-
ResponseOutputMessageParam,
5026
ResponseOutputRefusal,
5127
ResponseOutputText,
5228
ResponseRefusalDeltaEvent,
5329
ResponseTextDeltaEvent,
5430
ResponseUsage,
5531
)
56-
from openai.types.responses.response_input_param import FunctionCallOutput, ItemReference, Message
5732
from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
5833

5934
from .. import _debug
6035
from ..agent_output import AgentOutputSchema
61-
from ..exceptions import AgentsException, UserError
6236
from ..handoffs import Handoff
63-
from ..items import ModelResponse, TResponseInputItem, TResponseOutputItem, TResponseStreamEvent
37+
from ..items import ModelResponse, TResponseInputItem, TResponseStreamEvent
6438
from ..logger import logger
65-
from ..tool import FunctionTool, Tool
39+
from ..tool import Tool
6640
from ..tracing import generation_span
6741
from ..tracing.span_data import GenerationSpanData
6842
from ..tracing.spans import Span
6943
from ..usage import Usage
7044
from ..version import __version__
45+
from .chatcmpl_converter import Converter
7146
from .fake_id import FAKE_RESPONSES_ID
7247
from .interface import Model, ModelTracing
7348

@@ -152,7 +127,7 @@ async def get_response(
152127
"output_tokens": usage.output_tokens,
153128
}
154129

155-
items = _Converter.message_to_output_items(response.choices[0].message)
130+
items = Converter.message_to_output_items(response.choices[0].message)
156131

157132
return ModelResponse(
158133
output=items,
@@ -486,7 +461,7 @@ async def _fetch_response(
486461
tracing: ModelTracing,
487462
stream: bool = False,
488463
) -> ChatCompletion | tuple[Response, AsyncStream[ChatCompletionChunk]]:
489-
converted_messages = _Converter.items_to_messages(input)
464+
converted_messages = Converter.items_to_messages(input)
490465

491466
if system_instructions:
492467
converted_messages.insert(
@@ -506,13 +481,13 @@ async def _fetch_response(
506481
if model_settings.parallel_tool_calls is False
507482
else NOT_GIVEN
508483
)
509-
tool_choice = _Converter.convert_tool_choice(model_settings.tool_choice)
510-
response_format = _Converter.convert_response_format(output_schema)
484+
tool_choice = Converter.convert_tool_choice(model_settings.tool_choice)
485+
response_format = Converter.convert_response_format(output_schema)
511486

512-
converted_tools = [ToolConverter.to_openai(tool) for tool in tools] if tools else []
487+
converted_tools = [Converter.tool_to_openai(tool) for tool in tools] if tools else []
513488

514489
for handoff in handoffs:
515-
converted_tools.append(ToolConverter.convert_handoff_tool(handoff))
490+
converted_tools.append(Converter.convert_handoff_tool(handoff))
516491

517492
if _debug.DONT_LOG_MODEL_DATA:
518493
logger.debug("Calling LLM")
@@ -526,9 +501,9 @@ async def _fetch_response(
526501
)
527502

528503
reasoning_effort = model_settings.reasoning.effort if model_settings.reasoning else None
529-
store = _Converter.get_store_param(self._get_client(), model_settings)
504+
store = _Helpers.get_store_param(self._get_client(), model_settings)
530505

531-
stream_options = _Converter.get_stream_options_param(
506+
stream_options = _Helpers.get_stream_options_param(
532507
self._get_client(), model_settings, stream=stream
533508
)
534509

@@ -580,7 +555,7 @@ def _get_client(self) -> AsyncOpenAI:
580555
return self._client
581556

582557

583-
class _Converter:
558+
class _Helpers:
584559
@classmethod
585560
def is_openai(cls, client: AsyncOpenAI):
586561
return str(client.base_url).startswith("https://api.openai.com")
@@ -606,425 +581,3 @@ def get_stream_options_param(
606581
)
607582
stream_options = {"include_usage": include_usage} if include_usage is not None else None
608583
return stream_options
609-
610-
@classmethod
611-
def convert_tool_choice(
612-
cls, tool_choice: Literal["auto", "required", "none"] | str | None
613-
) -> ChatCompletionToolChoiceOptionParam | NotGiven:
614-
if tool_choice is None:
615-
return NOT_GIVEN
616-
elif tool_choice == "auto":
617-
return "auto"
618-
elif tool_choice == "required":
619-
return "required"
620-
elif tool_choice == "none":
621-
return "none"
622-
else:
623-
return {
624-
"type": "function",
625-
"function": {
626-
"name": tool_choice,
627-
},
628-
}
629-
630-
@classmethod
631-
def convert_response_format(
632-
cls, final_output_schema: AgentOutputSchema | None
633-
) -> ResponseFormat | NotGiven:
634-
if not final_output_schema or final_output_schema.is_plain_text():
635-
return NOT_GIVEN
636-
637-
return {
638-
"type": "json_schema",
639-
"json_schema": {
640-
"name": "final_output",
641-
"strict": final_output_schema.strict_json_schema,
642-
"schema": final_output_schema.json_schema(),
643-
},
644-
}
645-
646-
@classmethod
647-
def message_to_output_items(cls, message: ChatCompletionMessage) -> list[TResponseOutputItem]:
648-
items: list[TResponseOutputItem] = []
649-
650-
message_item = ResponseOutputMessage(
651-
id=FAKE_RESPONSES_ID,
652-
content=[],
653-
role="assistant",
654-
type="message",
655-
status="completed",
656-
)
657-
if message.content:
658-
message_item.content.append(
659-
ResponseOutputText(text=message.content, type="output_text", annotations=[])
660-
)
661-
if message.refusal:
662-
message_item.content.append(
663-
ResponseOutputRefusal(refusal=message.refusal, type="refusal")
664-
)
665-
if message.audio:
666-
raise AgentsException("Audio is not currently supported")
667-
668-
if message_item.content:
669-
items.append(message_item)
670-
671-
if message.tool_calls:
672-
for tool_call in message.tool_calls:
673-
items.append(
674-
ResponseFunctionToolCall(
675-
id=FAKE_RESPONSES_ID,
676-
call_id=tool_call.id,
677-
arguments=tool_call.function.arguments,
678-
name=tool_call.function.name,
679-
type="function_call",
680-
)
681-
)
682-
683-
return items
684-
685-
@classmethod
686-
def maybe_easy_input_message(cls, item: Any) -> EasyInputMessageParam | None:
687-
if not isinstance(item, dict):
688-
return None
689-
690-
keys = item.keys()
691-
# EasyInputMessageParam only has these two keys
692-
if keys != {"content", "role"}:
693-
return None
694-
695-
role = item.get("role", None)
696-
if role not in ("user", "assistant", "system", "developer"):
697-
return None
698-
699-
if "content" not in item:
700-
return None
701-
702-
return cast(EasyInputMessageParam, item)
703-
704-
@classmethod
705-
def maybe_input_message(cls, item: Any) -> Message | None:
706-
if (
707-
isinstance(item, dict)
708-
and item.get("type") == "message"
709-
and item.get("role")
710-
in (
711-
"user",
712-
"system",
713-
"developer",
714-
)
715-
):
716-
return cast(Message, item)
717-
718-
return None
719-
720-
@classmethod
721-
def maybe_file_search_call(cls, item: Any) -> ResponseFileSearchToolCallParam | None:
722-
if isinstance(item, dict) and item.get("type") == "file_search_call":
723-
return cast(ResponseFileSearchToolCallParam, item)
724-
return None
725-
726-
@classmethod
727-
def maybe_function_tool_call(cls, item: Any) -> ResponseFunctionToolCallParam | None:
728-
if isinstance(item, dict) and item.get("type") == "function_call":
729-
return cast(ResponseFunctionToolCallParam, item)
730-
return None
731-
732-
@classmethod
733-
def maybe_function_tool_call_output(
734-
cls,
735-
item: Any,
736-
) -> FunctionCallOutput | None:
737-
if isinstance(item, dict) and item.get("type") == "function_call_output":
738-
return cast(FunctionCallOutput, item)
739-
return None
740-
741-
@classmethod
742-
def maybe_item_reference(cls, item: Any) -> ItemReference | None:
743-
if isinstance(item, dict) and item.get("type") == "item_reference":
744-
return cast(ItemReference, item)
745-
return None
746-
747-
@classmethod
748-
def maybe_response_output_message(cls, item: Any) -> ResponseOutputMessageParam | None:
749-
# ResponseOutputMessage is only used for messages with role assistant
750-
if (
751-
isinstance(item, dict)
752-
and item.get("type") == "message"
753-
and item.get("role") == "assistant"
754-
):
755-
return cast(ResponseOutputMessageParam, item)
756-
return None
757-
758-
@classmethod
759-
def extract_text_content(
760-
cls, content: str | Iterable[ResponseInputContentParam]
761-
) -> str | list[ChatCompletionContentPartTextParam]:
762-
all_content = cls.extract_all_content(content)
763-
if isinstance(all_content, str):
764-
return all_content
765-
out: list[ChatCompletionContentPartTextParam] = []
766-
for c in all_content:
767-
if c.get("type") == "text":
768-
out.append(cast(ChatCompletionContentPartTextParam, c))
769-
return out
770-
771-
@classmethod
772-
def extract_all_content(
773-
cls, content: str | Iterable[ResponseInputContentParam]
774-
) -> str | list[ChatCompletionContentPartParam]:
775-
if isinstance(content, str):
776-
return content
777-
out: list[ChatCompletionContentPartParam] = []
778-
779-
for c in content:
780-
if isinstance(c, dict) and c.get("type") == "input_text":
781-
casted_text_param = cast(ResponseInputTextParam, c)
782-
out.append(
783-
ChatCompletionContentPartTextParam(
784-
type="text",
785-
text=casted_text_param["text"],
786-
)
787-
)
788-
elif isinstance(c, dict) and c.get("type") == "input_image":
789-
casted_image_param = cast(ResponseInputImageParam, c)
790-
if "image_url" not in casted_image_param or not casted_image_param["image_url"]:
791-
raise UserError(
792-
f"Only image URLs are supported for input_image {casted_image_param}"
793-
)
794-
out.append(
795-
ChatCompletionContentPartImageParam(
796-
type="image_url",
797-
image_url={
798-
"url": casted_image_param["image_url"],
799-
"detail": casted_image_param["detail"],
800-
},
801-
)
802-
)
803-
elif isinstance(c, dict) and c.get("type") == "input_file":
804-
raise UserError(f"File uploads are not supported for chat completions {c}")
805-
else:
806-
raise UserError(f"Unknown content: {c}")
807-
return out
808-
809-
@classmethod
810-
def items_to_messages(
811-
cls,
812-
items: str | Iterable[TResponseInputItem],
813-
) -> list[ChatCompletionMessageParam]:
814-
"""
815-
Convert a sequence of 'Item' objects into a list of ChatCompletionMessageParam.
816-
817-
Rules:
818-
- EasyInputMessage or InputMessage (role=user) => ChatCompletionUserMessageParam
819-
- EasyInputMessage or InputMessage (role=system) => ChatCompletionSystemMessageParam
820-
- EasyInputMessage or InputMessage (role=developer) => ChatCompletionDeveloperMessageParam
821-
- InputMessage (role=assistant) => Start or flush a ChatCompletionAssistantMessageParam
822-
- response_output_message => Also produces/flushes a ChatCompletionAssistantMessageParam
823-
- tool calls get attached to the *current* assistant message, or create one if none.
824-
- tool outputs => ChatCompletionToolMessageParam
825-
"""
826-
827-
if isinstance(items, str):
828-
return [
829-
ChatCompletionUserMessageParam(
830-
role="user",
831-
content=items,
832-
)
833-
]
834-
835-
result: list[ChatCompletionMessageParam] = []
836-
current_assistant_msg: ChatCompletionAssistantMessageParam | None = None
837-
838-
def flush_assistant_message() -> None:
839-
nonlocal current_assistant_msg
840-
if current_assistant_msg is not None:
841-
# The API doesn't support empty arrays for tool_calls
842-
if not current_assistant_msg.get("tool_calls"):
843-
del current_assistant_msg["tool_calls"]
844-
result.append(current_assistant_msg)
845-
current_assistant_msg = None
846-
847-
def ensure_assistant_message() -> ChatCompletionAssistantMessageParam:
848-
nonlocal current_assistant_msg
849-
if current_assistant_msg is None:
850-
current_assistant_msg = ChatCompletionAssistantMessageParam(role="assistant")
851-
current_assistant_msg["tool_calls"] = []
852-
return current_assistant_msg
853-
854-
for item in items:
855-
# 1) Check easy input message
856-
if easy_msg := cls.maybe_easy_input_message(item):
857-
role = easy_msg["role"]
858-
content = easy_msg["content"]
859-
860-
if role == "user":
861-
flush_assistant_message()
862-
msg_user: ChatCompletionUserMessageParam = {
863-
"role": "user",
864-
"content": cls.extract_all_content(content),
865-
}
866-
result.append(msg_user)
867-
elif role == "system":
868-
flush_assistant_message()
869-
msg_system: ChatCompletionSystemMessageParam = {
870-
"role": "system",
871-
"content": cls.extract_text_content(content),
872-
}
873-
result.append(msg_system)
874-
elif role == "developer":
875-
flush_assistant_message()
876-
msg_developer: ChatCompletionDeveloperMessageParam = {
877-
"role": "developer",
878-
"content": cls.extract_text_content(content),
879-
}
880-
result.append(msg_developer)
881-
elif role == "assistant":
882-
flush_assistant_message()
883-
msg_assistant: ChatCompletionAssistantMessageParam = {
884-
"role": "assistant",
885-
"content": cls.extract_text_content(content),
886-
}
887-
result.append(msg_assistant)
888-
else:
889-
raise UserError(f"Unexpected role in easy_input_message: {role}")
890-
891-
# 2) Check input message
892-
elif in_msg := cls.maybe_input_message(item):
893-
role = in_msg["role"]
894-
content = in_msg["content"]
895-
flush_assistant_message()
896-
897-
if role == "user":
898-
msg_user = {
899-
"role": "user",
900-
"content": cls.extract_all_content(content),
901-
}
902-
result.append(msg_user)
903-
elif role == "system":
904-
msg_system = {
905-
"role": "system",
906-
"content": cls.extract_text_content(content),
907-
}
908-
result.append(msg_system)
909-
elif role == "developer":
910-
msg_developer = {
911-
"role": "developer",
912-
"content": cls.extract_text_content(content),
913-
}
914-
result.append(msg_developer)
915-
else:
916-
raise UserError(f"Unexpected role in input_message: {role}")
917-
918-
# 3) response output message => assistant
919-
elif resp_msg := cls.maybe_response_output_message(item):
920-
flush_assistant_message()
921-
new_asst = ChatCompletionAssistantMessageParam(role="assistant")
922-
contents = resp_msg["content"]
923-
924-
text_segments = []
925-
for c in contents:
926-
if c["type"] == "output_text":
927-
text_segments.append(c["text"])
928-
elif c["type"] == "refusal":
929-
new_asst["refusal"] = c["refusal"]
930-
elif c["type"] == "output_audio":
931-
# Can't handle this, b/c chat completions expects an ID which we dont have
932-
raise UserError(
933-
f"Only audio IDs are supported for chat completions, but got: {c}"
934-
)
935-
else:
936-
raise UserError(f"Unknown content type in ResponseOutputMessage: {c}")
937-
938-
if text_segments:
939-
combined = "\n".join(text_segments)
940-
new_asst["content"] = combined
941-
942-
new_asst["tool_calls"] = []
943-
current_assistant_msg = new_asst
944-
945-
# 4) function/file-search calls => attach to assistant
946-
elif file_search := cls.maybe_file_search_call(item):
947-
asst = ensure_assistant_message()
948-
tool_calls = list(asst.get("tool_calls", []))
949-
new_tool_call = ChatCompletionMessageToolCallParam(
950-
id=file_search["id"],
951-
type="function",
952-
function={
953-
"name": "file_search_call",
954-
"arguments": json.dumps(
955-
{
956-
"queries": file_search.get("queries", []),
957-
"status": file_search.get("status"),
958-
}
959-
),
960-
},
961-
)
962-
tool_calls.append(new_tool_call)
963-
asst["tool_calls"] = tool_calls
964-
965-
elif func_call := cls.maybe_function_tool_call(item):
966-
asst = ensure_assistant_message()
967-
tool_calls = list(asst.get("tool_calls", []))
968-
arguments = func_call["arguments"] if func_call["arguments"] else "{}"
969-
new_tool_call = ChatCompletionMessageToolCallParam(
970-
id=func_call["call_id"],
971-
type="function",
972-
function={
973-
"name": func_call["name"],
974-
"arguments": arguments,
975-
},
976-
)
977-
tool_calls.append(new_tool_call)
978-
asst["tool_calls"] = tool_calls
979-
# 5) function call output => tool message
980-
elif func_output := cls.maybe_function_tool_call_output(item):
981-
flush_assistant_message()
982-
msg: ChatCompletionToolMessageParam = {
983-
"role": "tool",
984-
"tool_call_id": func_output["call_id"],
985-
"content": func_output["output"],
986-
}
987-
result.append(msg)
988-
989-
# 6) item reference => handle or raise
990-
elif item_ref := cls.maybe_item_reference(item):
991-
raise UserError(
992-
f"Encountered an item_reference, which is not supported: {item_ref}"
993-
)
994-
995-
# 7) If we haven't recognized it => fail or ignore
996-
else:
997-
raise UserError(f"Unhandled item type or structure: {item}")
998-
999-
flush_assistant_message()
1000-
return result
1001-
1002-
1003-
class ToolConverter:
1004-
@classmethod
1005-
def to_openai(cls, tool: Tool) -> ChatCompletionToolParam:
1006-
if isinstance(tool, FunctionTool):
1007-
return {
1008-
"type": "function",
1009-
"function": {
1010-
"name": tool.name,
1011-
"description": tool.description or "",
1012-
"parameters": tool.params_json_schema,
1013-
},
1014-
}
1015-
1016-
raise UserError(
1017-
f"Hosted tools are not supported with the ChatCompletions API. Got tool type: "
1018-
f"{type(tool)}, tool: {tool}"
1019-
)
1020-
1021-
@classmethod
1022-
def convert_handoff_tool(cls, handoff: Handoff[Any]) -> ChatCompletionToolParam:
1023-
return {
1024-
"type": "function",
1025-
"function": {
1026-
"name": handoff.tool_name,
1027-
"description": handoff.tool_description,
1028-
"parameters": handoff.input_json_schema,
1029-
},
1030-
}

‎tests/test_openai_chatcompletions.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131
generation_span,
3232
)
3333
from agents.models.fake_id import FAKE_RESPONSES_ID
34-
from agents.models.openai_chatcompletions import _Converter
34+
from agents.models.openai_chatcompletions import _Helpers
3535

3636

3737
@pytest.mark.allow_call_model_methods
@@ -301,32 +301,32 @@ def test_store_param():
301301

302302
model_settings = ModelSettings()
303303
client = AsyncOpenAI()
304-
assert _Converter.get_store_param(client, model_settings) is True, (
304+
assert _Helpers.get_store_param(client, model_settings) is True, (
305305
"Should default to True for OpenAI API calls"
306306
)
307307

308308
model_settings = ModelSettings(store=False)
309-
assert _Converter.get_store_param(client, model_settings) is False, (
309+
assert _Helpers.get_store_param(client, model_settings) is False, (
310310
"Should respect explicitly set store=False"
311311
)
312312

313313
model_settings = ModelSettings(store=True)
314-
assert _Converter.get_store_param(client, model_settings) is True, (
314+
assert _Helpers.get_store_param(client, model_settings) is True, (
315315
"Should respect explicitly set store=True"
316316
)
317317

318318
client = AsyncOpenAI(base_url="http://www.notopenai.com")
319319
model_settings = ModelSettings()
320-
assert _Converter.get_store_param(client, model_settings) is None, (
320+
assert _Helpers.get_store_param(client, model_settings) is None, (
321321
"Should default to None for non-OpenAI API calls"
322322
)
323323

324324
model_settings = ModelSettings(store=False)
325-
assert _Converter.get_store_param(client, model_settings) is False, (
325+
assert _Helpers.get_store_param(client, model_settings) is False, (
326326
"Should respect explicitly set store=False"
327327
)
328328

329329
model_settings = ModelSettings(store=True)
330-
assert _Converter.get_store_param(client, model_settings) is True, (
330+
assert _Helpers.get_store_param(client, model_settings) is True, (
331331
"Should respect explicitly set store=True"
332332
)

‎tests/test_openai_chatcompletions_converter.py

Lines changed: 37 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -4,18 +4,18 @@
44
# See LICENSE file in the project root for full license information.
55

66
"""
7-
Unit tests for the internal `_Converter` class defined in
7+
Unit tests for the internal `Converter` class defined in
88
`agents.models.openai_chatcompletions`. The converter is responsible for
99
translating between internal "item" structures (e.g., `ResponseOutputMessage`
1010
and related types from `openai.types.responses`) and the ChatCompletion message
1111
structures defined by the OpenAI client library.
1212
1313
These tests exercise both conversion directions:
1414
15-
- `_Converter.message_to_output_items` turns a `ChatCompletionMessage` (as
15+
- `Converter.message_to_output_items` turns a `ChatCompletionMessage` (as
1616
returned by the OpenAI API) into a list of `ResponseOutputItem` instances.
1717
18-
- `_Converter.items_to_messages` takes in either a simple string prompt, or a
18+
- `Converter.items_to_messages` takes in either a simple string prompt, or a
1919
list of input/output items such as `ResponseOutputMessage` and
2020
`ResponseFunctionToolCallParam` dicts, and constructs a list of
2121
`ChatCompletionMessageParam` dicts suitable for sending back to the API.
@@ -41,8 +41,8 @@
4141
from agents.agent_output import AgentOutputSchema
4242
from agents.exceptions import UserError
4343
from agents.items import TResponseInputItem
44+
from agents.models.chatcmpl_converter import Converter
4445
from agents.models.fake_id import FAKE_RESPONSES_ID
45-
from agents.models.openai_chatcompletions import _Converter
4646

4747

4848
def test_message_to_output_items_with_text_only():
@@ -51,7 +51,7 @@ def test_message_to_output_items_with_text_only():
5151
into a single ResponseOutputMessage containing one ResponseOutputText.
5252
"""
5353
msg = ChatCompletionMessage(role="assistant", content="Hello")
54-
items = _Converter.message_to_output_items(msg)
54+
items = Converter.message_to_output_items(msg)
5555
# Expect exactly one output item (the message)
5656
assert len(items) == 1
5757
message_item = cast(ResponseOutputMessage, items[0])
@@ -72,7 +72,7 @@ def test_message_to_output_items_with_refusal():
7272
with a ResponseOutputRefusal content part.
7373
"""
7474
msg = ChatCompletionMessage(role="assistant", refusal="I'm sorry")
75-
items = _Converter.message_to_output_items(msg)
75+
items = Converter.message_to_output_items(msg)
7676
assert len(items) == 1
7777
message_item = cast(ResponseOutputMessage, items[0])
7878
assert len(message_item.content) == 1
@@ -93,7 +93,7 @@ def test_message_to_output_items_with_tool_call():
9393
function=Function(name="myfn", arguments='{"x":1}'),
9494
)
9595
msg = ChatCompletionMessage(role="assistant", content="Hi", tool_calls=[tool_call])
96-
items = _Converter.message_to_output_items(msg)
96+
items = Converter.message_to_output_items(msg)
9797
# Should produce a message item followed by one function tool call item
9898
assert len(items) == 2
9999
message_item = cast(ResponseOutputMessage, items[0])
@@ -111,7 +111,7 @@ def test_items_to_messages_with_string_user_content():
111111
A simple string as the items argument should be converted into a user
112112
message param dict with the same content.
113113
"""
114-
result = _Converter.items_to_messages("Ask me anything")
114+
result = Converter.items_to_messages("Ask me anything")
115115
assert isinstance(result, list)
116116
assert len(result) == 1
117117
msg = result[0]
@@ -130,7 +130,7 @@ def test_items_to_messages_with_easy_input_message():
130130
"content": "How are you?",
131131
}
132132
]
133-
messages = _Converter.items_to_messages(items)
133+
messages = Converter.items_to_messages(items)
134134
assert len(messages) == 1
135135
out = messages[0]
136136
assert out["role"] == "user"
@@ -174,7 +174,7 @@ def test_items_to_messages_with_output_message_and_function_call():
174174
resp_msg.model_dump(), # type:ignore
175175
func_item,
176176
]
177-
messages = _Converter.items_to_messages(items)
177+
messages = Converter.items_to_messages(items)
178178
# Should return a single assistant message
179179
assert len(messages) == 1
180180
assistant = messages[0]
@@ -197,37 +197,37 @@ def test_items_to_messages_with_output_message_and_function_call():
197197

198198
def test_convert_tool_choice_handles_standard_and_named_options() -> None:
199199
"""
200-
The `_Converter.convert_tool_choice` method should return NOT_GIVEN
200+
The `Converter.convert_tool_choice` method should return NOT_GIVEN
201201
if no choice is provided, pass through values like "auto", "required",
202202
or "none" unchanged, and translate any other string into a function
203203
selection dict.
204204
"""
205-
assert _Converter.convert_tool_choice(None).__class__.__name__ == "NotGiven"
206-
assert _Converter.convert_tool_choice("auto") == "auto"
207-
assert _Converter.convert_tool_choice("required") == "required"
208-
assert _Converter.convert_tool_choice("none") == "none"
209-
tool_choice_dict = _Converter.convert_tool_choice("mytool")
205+
assert Converter.convert_tool_choice(None).__class__.__name__ == "NotGiven"
206+
assert Converter.convert_tool_choice("auto") == "auto"
207+
assert Converter.convert_tool_choice("required") == "required"
208+
assert Converter.convert_tool_choice("none") == "none"
209+
tool_choice_dict = Converter.convert_tool_choice("mytool")
210210
assert isinstance(tool_choice_dict, dict)
211211
assert tool_choice_dict["type"] == "function"
212212
assert tool_choice_dict["function"]["name"] == "mytool"
213213

214214

215215
def test_convert_response_format_returns_not_given_for_plain_text_and_dict_for_schemas() -> None:
216216
"""
217-
The `_Converter.convert_response_format` method should return NOT_GIVEN
217+
The `Converter.convert_response_format` method should return NOT_GIVEN
218218
when no output schema is provided or if the output schema indicates
219219
plain text. For structured output schemas, it should return a dict
220220
with type `json_schema` and include the generated JSON schema and
221221
strict flag from the provided `AgentOutputSchema`.
222222
"""
223223
# when output is plain text (schema None or output_type str), do not include response_format
224-
assert _Converter.convert_response_format(None).__class__.__name__ == "NotGiven"
224+
assert Converter.convert_response_format(None).__class__.__name__ == "NotGiven"
225225
assert (
226-
_Converter.convert_response_format(AgentOutputSchema(str)).__class__.__name__ == "NotGiven"
226+
Converter.convert_response_format(AgentOutputSchema(str)).__class__.__name__ == "NotGiven"
227227
)
228228
# For e.g. integer output, we expect a response_format dict
229229
schema = AgentOutputSchema(int)
230-
resp_format = _Converter.convert_response_format(schema)
230+
resp_format = Converter.convert_response_format(schema)
231231
assert isinstance(resp_format, dict)
232232
assert resp_format["type"] == "json_schema"
233233
assert resp_format["json_schema"]["name"] == "final_output"
@@ -247,7 +247,7 @@ def test_items_to_messages_with_function_output_item():
247247
"call_id": "somecall",
248248
"output": '{"foo": "bar"}',
249249
}
250-
messages = _Converter.items_to_messages([func_output_item])
250+
messages = Converter.items_to_messages([func_output_item])
251251
assert len(messages) == 1
252252
tool_msg = messages[0]
253253
assert tool_msg["role"] == "tool"
@@ -266,16 +266,16 @@ def test_extract_all_and_text_content_for_strings_and_lists():
266266
should filter to only the textual parts.
267267
"""
268268
prompt = "just text"
269-
assert _Converter.extract_all_content(prompt) == prompt
270-
assert _Converter.extract_text_content(prompt) == prompt
269+
assert Converter.extract_all_content(prompt) == prompt
270+
assert Converter.extract_text_content(prompt) == prompt
271271
text1: ResponseInputTextParam = {"type": "input_text", "text": "one"}
272272
text2: ResponseInputTextParam = {"type": "input_text", "text": "two"}
273-
all_parts = _Converter.extract_all_content([text1, text2])
273+
all_parts = Converter.extract_all_content([text1, text2])
274274
assert isinstance(all_parts, list)
275275
assert len(all_parts) == 2
276276
assert all_parts[0]["type"] == "text" and all_parts[0]["text"] == "one"
277277
assert all_parts[1]["type"] == "text" and all_parts[1]["text"] == "two"
278-
text_parts = _Converter.extract_text_content([text1, text2])
278+
text_parts = Converter.extract_text_content([text1, text2])
279279
assert isinstance(text_parts, list)
280280
assert all(p["type"] == "text" for p in text_parts)
281281
assert [p["text"] for p in text_parts] == ["one", "two"]
@@ -288,20 +288,20 @@ def test_items_to_messages_handles_system_and_developer_roles():
288288
`message` typed dicts.
289289
"""
290290
sys_items: list[TResponseInputItem] = [{"role": "system", "content": "setup"}]
291-
sys_msgs = _Converter.items_to_messages(sys_items)
291+
sys_msgs = Converter.items_to_messages(sys_items)
292292
assert len(sys_msgs) == 1
293293
assert sys_msgs[0]["role"] == "system"
294294
assert sys_msgs[0]["content"] == "setup"
295295
dev_items: list[TResponseInputItem] = [{"role": "developer", "content": "debug"}]
296-
dev_msgs = _Converter.items_to_messages(dev_items)
296+
dev_msgs = Converter.items_to_messages(dev_items)
297297
assert len(dev_msgs) == 1
298298
assert dev_msgs[0]["role"] == "developer"
299299
assert dev_msgs[0]["content"] == "debug"
300300

301301

302302
def test_maybe_input_message_allows_message_typed_dict():
303303
"""
304-
The `_Converter.maybe_input_message` should recognize a dict with
304+
The `Converter.maybe_input_message` should recognize a dict with
305305
"type": "message" and a supported role as an input message. Ensure
306306
that such dicts are passed through by `items_to_messages`.
307307
"""
@@ -311,9 +311,9 @@ def test_maybe_input_message_allows_message_typed_dict():
311311
"role": "user",
312312
"content": "hi",
313313
}
314-
assert _Converter.maybe_input_message(message_dict) is not None
314+
assert Converter.maybe_input_message(message_dict) is not None
315315
# items_to_messages should process this correctly
316-
msgs = _Converter.items_to_messages([message_dict])
316+
msgs = Converter.items_to_messages([message_dict])
317317
assert len(msgs) == 1
318318
assert msgs[0]["role"] == "user"
319319
assert msgs[0]["content"] == "hi"
@@ -331,7 +331,7 @@ def test_tool_call_conversion():
331331
type="function_call",
332332
)
333333

334-
messages = _Converter.items_to_messages([function_call])
334+
messages = Converter.items_to_messages([function_call])
335335
assert len(messages) == 1
336336
tool_msg = messages[0]
337337
assert tool_msg["role"] == "assistant"
@@ -348,7 +348,7 @@ def test_tool_call_conversion():
348348
@pytest.mark.parametrize("role", ["user", "system", "developer"])
349349
def test_input_message_with_all_roles(role: str):
350350
"""
351-
The `_Converter.maybe_input_message` should recognize a dict with
351+
The `Converter.maybe_input_message` should recognize a dict with
352352
"type": "message" and a supported role as an input message. Ensure
353353
that such dicts are passed through by `items_to_messages`.
354354
"""
@@ -359,9 +359,9 @@ def test_input_message_with_all_roles(role: str):
359359
"role": casted_role,
360360
"content": "hi",
361361
}
362-
assert _Converter.maybe_input_message(message_dict) is not None
362+
assert Converter.maybe_input_message(message_dict) is not None
363363
# items_to_messages should process this correctly
364-
msgs = _Converter.items_to_messages([message_dict])
364+
msgs = Converter.items_to_messages([message_dict])
365365
assert len(msgs) == 1
366366
assert msgs[0]["role"] == casted_role
367367
assert msgs[0]["content"] == "hi"
@@ -372,7 +372,7 @@ def test_item_reference_errors():
372372
Test that item references are converted correctly.
373373
"""
374374
with pytest.raises(UserError):
375-
_Converter.items_to_messages(
375+
Converter.items_to_messages(
376376
[
377377
{
378378
"type": "item_reference",
@@ -392,14 +392,14 @@ def test_unknown_object_errors():
392392
"""
393393
with pytest.raises(UserError, match="Unhandled item type or structure"):
394394
# Purposely ignore the type error
395-
_Converter.items_to_messages([TestObject()]) # type: ignore
395+
Converter.items_to_messages([TestObject()]) # type: ignore
396396

397397

398398
def test_assistant_messages_in_history():
399399
"""
400400
Test that assistant messages are added to the history.
401401
"""
402-
messages = _Converter.items_to_messages(
402+
messages = Converter.items_to_messages(
403403
[
404404
{
405405
"role": "user",

‎tests/test_tool_converter.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
from agents import Agent, Handoff, function_tool, handoff
55
from agents.exceptions import UserError
6-
from agents.models.openai_chatcompletions import ToolConverter
6+
from agents.models.chatcmpl_converter import Converter
77
from agents.tool import FileSearchTool, WebSearchTool
88

99

@@ -15,7 +15,7 @@ def test_to_openai_with_function_tool():
1515
some_function(a="foo", b=[1, 2, 3])
1616

1717
tool = function_tool(some_function)
18-
result = ToolConverter.to_openai(tool)
18+
result = Converter.tool_to_openai(tool)
1919

2020
assert result["type"] == "function"
2121
assert result["function"]["name"] == "some_function"
@@ -34,7 +34,7 @@ class Foo(BaseModel):
3434
def test_convert_handoff_tool():
3535
agent = Agent(name="test_1", handoff_description="test_2")
3636
handoff_obj = handoff(agent=agent)
37-
result = ToolConverter.convert_handoff_tool(handoff_obj)
37+
result = Converter.convert_handoff_tool(handoff_obj)
3838

3939
assert result["type"] == "function"
4040
assert result["function"]["name"] == Handoff.default_tool_name(agent)
@@ -48,7 +48,7 @@ def test_convert_handoff_tool():
4848

4949
def test_tool_converter_hosted_tools_errors():
5050
with pytest.raises(UserError):
51-
ToolConverter.to_openai(WebSearchTool())
51+
Converter.tool_to_openai(WebSearchTool())
5252

5353
with pytest.raises(UserError):
54-
ToolConverter.to_openai(FileSearchTool(vector_store_ids=["abc"], max_num_results=1))
54+
Converter.tool_to_openai(FileSearchTool(vector_store_ids=["abc"], max_num_results=1))

0 commit comments

Comments
 (0)
Please sign in to comment.