diff --git a/src/agents/models/openai_chatcompletions.py b/src/agents/models/openai_chatcompletions.py index 08803d8c0..120d726db 100644 --- a/src/agents/models/openai_chatcompletions.py +++ b/src/agents/models/openai_chatcompletions.py @@ -7,7 +7,8 @@ from openai import NOT_GIVEN, AsyncOpenAI, AsyncStream from openai.types import ChatModel -from openai.types.chat import ChatCompletion, ChatCompletionChunk +from openai.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage +from openai.types.chat.chat_completion import Choice from openai.types.responses import Response from openai.types.responses.response_prompt_param import ResponsePromptParam from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails @@ -74,8 +75,11 @@ async def get_response( prompt=prompt, ) - first_choice = response.choices[0] - message = first_choice.message + message: ChatCompletionMessage | None = None + first_choice: Choice | None = None + if response.choices and len(response.choices) > 0: + first_choice = response.choices[0] + message = first_choice.message if _debug.DONT_LOG_MODEL_DATA: logger.debug("Received model response") @@ -86,10 +90,8 @@ async def get_response( json.dumps(message.model_dump(), indent=2), ) else: - logger.debug( - "LLM resp had no message. finish_reason: %s", - first_choice.finish_reason, - ) + finish_reason = first_choice.finish_reason if first_choice else "-" + logger.debug(f"LLM resp had no message. finish_reason: {finish_reason}") usage = ( Usage(