Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 68 additions & 1 deletion packages/derisk-core/src/derisk/agent/core/base_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -666,6 +666,7 @@ def function_callning_reply_messages(
from derisk.core import ModelMessageRoleType

## 历史消息
has_tool_calls = llm_out and llm_out.tool_calls
if llm_out:
llm_content = llm_out.content or ""
if llm_out.thinking_content:
Expand All @@ -681,7 +682,11 @@ def function_callning_reply_messages(
}
)

if action_outs:
# 只有当 assistant 消息中含有 tool_calls 时,才追加 tool 结果消息。
# 若 tool_calls 为 None/空,说明 LLM 未发起工具调用,此时追加 tool 消息会
# 导致 API 报错:messages with role "tool" must be a response to a
# preceeding message with "tool_calls"
if action_outs and has_tool_calls:
## 准备当前轮次的ToolMessage
for action_out in action_outs:
function_call_reply_messages.append(
Expand All @@ -691,6 +696,12 @@ def function_callning_reply_messages(
"content": action_out.content,
}
)
elif action_outs and not has_tool_calls:
logger.warning(
f"[function_callning_reply_messages] Skipping {len(action_outs)} tool result(s) "
f"because the preceding assistant message has no tool_calls. "
f"This prevents invalid message sequences being sent to the LLM."
)

return function_call_reply_messages

Expand Down Expand Up @@ -1403,6 +1414,11 @@ async def thinking(
if tool_messages:
llm_messages.extend(tool_messages)

# 过滤非法消息序列:移除没有匹配 tool_calls 的孤立 tool 角色消息
# 否则 API 会报错:messages with role "tool" must be a response
# to a preceeding message with "tool_calls"
llm_messages = _sanitize_tool_messages(llm_messages)

if not self.llm_client:
raise ValueError("LLM client is not initialized!")

Expand Down Expand Up @@ -2719,5 +2735,56 @@ def _new_system_message(content):
return [{"content": content, "role": ModelMessageRoleType.SYSTEM}]


def _sanitize_tool_messages(messages: List[dict]) -> List[dict]:
"""Remove orphaned 'tool' role messages that have no matching preceding
assistant message with 'tool_calls'.

OpenAI-compatible APIs require that every message with role='tool' is
immediately preceded (in the message sequence) by an assistant message
that contains a non-empty 'tool_calls' list. Sending orphaned tool
messages causes a 400 error:
"messages with role 'tool' must be a response to a preceeding message
with 'tool_calls'."

This helper scans the list in one pass and drops any tool message whose
preceding assistant message has no tool_calls.
"""
if not messages:
return messages

sanitized: List[dict] = []
orphan_count = 0

for msg in messages:
role = msg.get("role", "")
if role == ModelMessageRoleType.TOOL:
# Check that the last assistant message has tool_calls
prev_assistant = None
for m in reversed(sanitized):
if m.get("role") == ModelMessageRoleType.AI:
prev_assistant = m
break
# Stop if we hit any non-assistant message after the last AI msg
if prev_assistant and prev_assistant.get("tool_calls"):
sanitized.append(msg)
else:
orphan_count += 1
logger.warning(
f"[_sanitize_tool_messages] Dropped orphaned tool message "
f"(tool_call_id={msg.get('tool_call_id')!r}) — "
f"no preceding assistant message with tool_calls."
)
else:
sanitized.append(msg)

if orphan_count:
logger.warning(
f"[_sanitize_tool_messages] Removed {orphan_count} orphaned tool "
f"message(s) from LLM input to prevent API 400 errors."
)

return sanitized


def _is_list_of_type(lst: List[Any], type_cls: type) -> bool:
return all(isinstance(item, type_cls) for item in lst)
29 changes: 12 additions & 17 deletions packages/derisk-core/src/derisk/core/interface/media.py
Original file line number Diff line number Diff line change
Expand Up @@ -296,21 +296,24 @@ def to_chat_ai_message(
cls,
role,
content: Union[str, "MediaContent", List["MediaContent"]],
tool_calls: Optional[str] = None,
tool_calls: Optional[List[Dict]] = None,
support_media_content: bool = True,
type_mapping: Optional[Dict[str, str]] = None,
replace_url_func: Optional[Callable[[str], str]] = None,
) -> ChatCompletionMessageParam:
"""Convert the media contents to chat completion message."""
# Build base message, only include tool_calls when present
def build_message(role_val, content_val, tool_calls_val=None):
msg: Dict[str, Any] = {"role": role_val, "content": content_val}
if tool_calls_val is not None:
msg["tool_calls"] = tool_calls_val
return cast(ChatCompletionMessageParam, msg)

if not content:
return cast(ChatCompletionMessageParam, {
"role": role,
"tool_calls": tool_calls,
"content": "",
})
return build_message(role, "", tool_calls)

if isinstance(content, str):
return cast(ChatCompletionMessageParam, {"role": role, "content": content, "tool_calls": tool_calls})
return build_message(role, content, tool_calls)
if isinstance(content, MediaContent):
content = [content]
new_content = [
Expand All @@ -324,16 +327,8 @@ def to_chat_ai_message(
if not text_content:
raise ValueError("No text content found in the media contents")
# Not support media content, just pass the string text as content
return cast(ChatCompletionMessageParam, {
"role": role,
"tool_calls": tool_calls,
"content": text_content[0],
})
return cast(ChatCompletionMessageParam, {
"role": role,
"tool_calls": tool_calls,
"content": new_content,
})
return build_message(role, text_content[0], tool_calls)
return build_message(role, new_content, tool_calls)

@classmethod
def to_chat_tool_message(
Expand Down