Skip to content

Commit 94e2ab7

Browse files
committed
fix(llm): accept empty choices as valid response and handle that case gracefully
1 parent 4ea7f8e commit 94e2ab7

2 files changed

Lines changed: 26 additions & 21 deletions

File tree

app/agent/toolcall.py

Lines changed: 19 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@
1010
from app.schema import TOOL_CHOICE_TYPE, AgentState, Message, ToolCall, ToolChoice
1111
from app.tool import CreateChatCompletion, Terminate, ToolCollection
1212

13-
1413
TOOL_CALL_REQUIRED = "Tool calls required but none provided"
1514

1615

@@ -71,40 +70,42 @@ async def think(self) -> bool:
7170
return False
7271
raise
7372

74-
self.tool_calls = response.tool_calls
73+
self.tool_calls = tool_calls = (
74+
response.tool_calls if response and response.tool_calls else []
75+
)
76+
content = response.content if response and response.content else ""
7577

7678
# Log response info
77-
logger.info(f"✨ {self.name}'s thoughts: {response.content}")
79+
logger.info(f"✨ {self.name}'s thoughts: {content}")
7880
logger.info(
79-
f"🛠️ {self.name} selected {len(response.tool_calls) if response.tool_calls else 0} tools to use"
81+
f"🛠️ {self.name} selected {len(tool_calls) if tool_calls else 0} tools to use"
8082
)
81-
if response.tool_calls:
82-
logger.info(
83-
f"🧰 Tools being prepared: {[call.function.name for call in response.tool_calls]}"
84-
)
83+
if tool_calls:
8584
logger.info(
86-
f"🔧 Tool arguments: {response.tool_calls[0].function.arguments}"
85+
f"🧰 Tools being prepared: {[call.function.name for call in tool_calls]}"
8786
)
87+
logger.info(f"🔧 Tool arguments: {tool_calls[0].function.arguments}")
8888

8989
try:
90+
if response is None:
91+
raise RuntimeError("No response received from the LLM")
92+
9093
# Handle different tool_choices modes
9194
if self.tool_choices == ToolChoice.NONE:
92-
if response.tool_calls:
95+
if tool_calls:
9396
logger.warning(
9497
f"🤔 Hmm, {self.name} tried to use tools when they weren't available!"
9598
)
96-
if response.content:
97-
self.memory.add_message(Message.assistant_message(response.content))
99+
if content:
100+
self.memory.add_message(Message.assistant_message(content))
98101
return True
99102
return False
100103

101104
# Create and add assistant message
102105
assistant_msg = (
103-
Message.from_tool_calls(
104-
content=response.content, tool_calls=self.tool_calls
105-
)
106+
Message.from_tool_calls(content=content, tool_calls=self.tool_calls)
106107
if self.tool_calls
107-
else Message.assistant_message(response.content)
108+
else Message.assistant_message(content)
108109
)
109110
self.memory.add_message(assistant_msg)
110111

@@ -113,7 +114,7 @@ async def think(self) -> bool:
113114

114115
# For 'auto' mode, continue with content if no commands but content exists
115116
if self.tool_choices == ToolChoice.AUTO and not self.tool_calls:
116-
return bool(response.content)
117+
return bool(content)
117118

118119
return bool(self.tool_calls)
119120
except Exception as e:
@@ -209,7 +210,7 @@ async def execute_tool(self, command: ToolCall) -> str:
209210
return f"Error: {error_msg}"
210211
except Exception as e:
211212
error_msg = f"⚠️ Tool '{name}' encountered a problem: {str(e)}"
212-
logger.error(error_msg)
213+
logger.exception(error_msg)
213214
return f"Error: {error_msg}"
214215

215216
async def _handle_special_tool(self, name: str, result: Any, **kwargs):

app/llm.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
OpenAIError,
1111
RateLimitError,
1212
)
13+
from openai.types.chat.chat_completion_message import ChatCompletionMessage
1314
from tenacity import (
1415
retry,
1516
retry_if_exception_type,
@@ -653,7 +654,7 @@ async def ask_tool(
653654
tool_choice: TOOL_CHOICE_TYPE = ToolChoice.AUTO, # type: ignore
654655
temperature: Optional[float] = None,
655656
**kwargs,
656-
):
657+
) -> ChatCompletionMessage | None:
657658
"""
658659
Ask LLM using functions/tools and return the response.
659660
@@ -731,12 +732,15 @@ async def ask_tool(
731732
temperature if temperature is not None else self.temperature
732733
)
733734

734-
response = await self.client.chat.completions.create(**params)
735+
response: ChatCompletion = await self.client.chat.completions.create(
736+
**params, stream=False
737+
)
735738

736739
# Check if response is valid
737740
if not response.choices or not response.choices[0].message:
738741
print(response)
739-
raise ValueError("Invalid or empty response from LLM")
742+
# raise ValueError("Invalid or empty response from LLM")
743+
return None
740744

741745
# Update token counts
742746
self.update_token_count(

0 commit comments

Comments
 (0)