Skip to content

Commit b55a6fe

Browse files
fix: files messages stack
1 parent fe16c22 commit b55a6fe

3 files changed

Lines changed: 95 additions & 47 deletions

File tree

src/uipath_langchain/agent/react/llm_with_files.py

Lines changed: 34 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
from typing import Any
55

66
from langchain_core.language_models import BaseChatModel
7-
from langchain_core.messages import AIMessage, AnyMessage, HumanMessage
7+
from langchain_core.messages import HumanMessage
88

99
from .file_type_handler import build_message_content_part_from_data
1010

@@ -44,33 +44,43 @@ async def create_part_for_file(
4444
)
4545

4646

47-
async def llm_call_with_files(
48-
messages: list[AnyMessage],
47+
async def add_files_to_message(
48+
message: HumanMessage,
4949
files: list[FileInfo],
5050
model: BaseChatModel,
51-
) -> AIMessage:
52-
"""Invoke an LLM with file attachments.
51+
) -> HumanMessage:
52+
"""Add file attachments to a HumanMessage.
5353
54-
Downloads files, creates provider-specific content parts, and appends them
55-
as a HumanMessage. If no files are provided, equivalent to model.ainvoke().
54+
Downloads files, creates provider-specific content parts, and merges them
55+
with the existing message content.
56+
57+
Args:
58+
message: The HumanMessage to add files to
59+
files: List of files to add
60+
model: The LLM model (used to determine provider-specific format)
61+
62+
Returns:
63+
A new HumanMessage with file content parts merged with original content
5664
"""
5765
if not files:
58-
response = await model.ainvoke(messages)
59-
if not isinstance(response, AIMessage):
60-
raise TypeError(
61-
f"LLM returned {type(response).__name__} instead of AIMessage"
62-
)
63-
return response
64-
65-
content_parts: list[str | dict[Any, Any]] = []
66+
return message
67+
68+
file_content_parts: list[dict[Any, Any]] = []
6669
for file_info in files:
6770
content_part = await create_part_for_file(file_info, model)
68-
content_parts.append(content_part)
69-
70-
file_message = HumanMessage(content=content_parts)
71-
all_messages = list(messages) + [file_message]
72-
73-
response = await model.ainvoke(all_messages)
74-
if not isinstance(response, AIMessage):
75-
raise TypeError(f"LLM returned {type(response).__name__} instead of AIMessage")
76-
return response
71+
file_content_parts.append(content_part)
72+
73+
existing_content = message.content
74+
if isinstance(existing_content, str):
75+
# convert string content to text content part
76+
merged_content: list[str | dict[Any, Any]] = [
77+
{"type": "text", "text": existing_content}
78+
]
79+
elif isinstance(existing_content, list):
80+
merged_content = list(existing_content)
81+
else:
82+
merged_content = []
83+
84+
merged_content.extend(file_content_parts)
85+
86+
return HumanMessage(content=merged_content)

src/uipath_langchain/agent/tools/internal_tools/analyze_files_tool.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
from uipath.platform import UiPath
1212

1313
from uipath_langchain.agent.react.jsonschema_pydantic_converter import create_model
14-
from uipath_langchain.agent.react.llm_with_files import FileInfo, llm_call_with_files
14+
from uipath_langchain.agent.react.llm_with_files import FileInfo, add_files_to_message
1515
from uipath_langchain.agent.tools.structured_tool_with_output_type import (
1616
StructuredToolWithOutputType,
1717
)
@@ -62,11 +62,14 @@ async def tool_fn(**kwargs: Any):
6262
if not files:
6363
return {"analysisResult": "No attachments provided to analyze."}
6464

65+
human_message = HumanMessage(content=analysisTask)
66+
human_message_with_files = await add_files_to_message(human_message, files, llm)
67+
6568
messages: list[AnyMessage] = [
6669
SystemMessage(content=ANALYZE_FILES_SYSTEM_MESSAGE),
67-
HumanMessage(content=analysisTask),
70+
human_message_with_files,
6871
]
69-
result = await llm_call_with_files(messages, files, llm)
72+
result = await llm.ainvoke(messages)
7073
return result
7174

7275
wrapper = get_job_attachment_wrapper(output_type=output_model)

tests/agent/tools/internal_tools/test_analyze_files_tool.py

Lines changed: 55 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
from unittest.mock import AsyncMock, Mock, patch
55

66
import pytest
7-
from langchain_core.messages import AIMessage
7+
from langchain_core.messages import AIMessage, HumanMessage
88
from pydantic import BaseModel, ConfigDict, Field
99
from uipath.agent.models.agent import (
1010
AgentInternalToolProperties,
@@ -76,15 +76,15 @@ def resource_config(self):
7676
"uipath_langchain.agent.wrappers.job_attachment_wrapper.get_job_attachment_wrapper"
7777
)
7878
@patch(
79-
"uipath_langchain.agent.tools.internal_tools.analyze_files_tool.llm_call_with_files"
79+
"uipath_langchain.agent.tools.internal_tools.analyze_files_tool.add_files_to_message"
8080
)
8181
@patch(
8282
"uipath_langchain.agent.tools.internal_tools.analyze_files_tool._resolve_job_attachment_arguments"
8383
)
8484
async def test_create_analyze_file_tool_success(
8585
self,
8686
mock_resolve_attachments,
87-
mock_llm_call,
87+
mock_add_files,
8888
mock_get_wrapper,
8989
resource_config,
9090
mock_llm,
@@ -98,7 +98,16 @@ async def test_create_analyze_file_tool_success(
9898
mime_type="application/pdf",
9999
)
100100
]
101-
mock_llm_call.return_value = "Analysis complete"
101+
102+
# mock add_files_to_message to return a message with files added
103+
mock_message_with_files = HumanMessage(
104+
content=[
105+
{"type": "text", "text": "Summarize the document"},
106+
{"type": "file", "url": "https://example.com/file.pdf"},
107+
]
108+
)
109+
mock_add_files.return_value = mock_message_with_files
110+
102111
mock_wrapper = Mock()
103112
mock_get_wrapper.return_value = mock_wrapper
104113

@@ -121,18 +130,29 @@ async def test_create_analyze_file_tool_success(
121130
)
122131

123132
# Verify calls
124-
assert result == "Analysis complete"
133+
assert result == "Analyzed result"
125134
mock_resolve_attachments.assert_called_once()
126-
mock_llm_call.assert_called_once()
127-
128-
# Verify LLM call arguments
129-
call_args = mock_llm_call.call_args
130-
messages, files, llm = call_args[0]
131-
assert len(messages) == 2
132-
assert messages[0].content == ANALYZE_FILES_SYSTEM_MESSAGE
133-
assert messages[1].content == "Summarize the document"
134-
assert len(files) == 1
135-
assert files[0].url == "https://example.com/file.pdf"
135+
mock_add_files.assert_called_once()
136+
mock_llm.ainvoke.assert_called_once()
137+
138+
# Verify add_files_to_message was called correctly
139+
add_files_call_args = mock_add_files.call_args
140+
message_arg = add_files_call_args[0][0]
141+
files_arg = add_files_call_args[0][1]
142+
llm_arg = add_files_call_args[0][2]
143+
144+
assert isinstance(message_arg, HumanMessage)
145+
assert message_arg.content == "Summarize the document"
146+
assert len(files_arg) == 1
147+
assert files_arg[0].url == "https://example.com/file.pdf"
148+
assert llm_arg == mock_llm
149+
150+
# Verify llm.ainvoke was called with correct messages
151+
ainvoke_call_args = mock_llm.ainvoke.call_args
152+
messages_arg = ainvoke_call_args[0][0]
153+
assert len(messages_arg) == 2
154+
assert messages_arg[0].content == ANALYZE_FILES_SYSTEM_MESSAGE
155+
assert messages_arg[1] == mock_message_with_files
136156

137157
@patch(
138158
"uipath_langchain.agent.wrappers.job_attachment_wrapper.get_job_attachment_wrapper"
@@ -176,15 +196,15 @@ async def test_create_analyze_file_tool_missing_attachments(
176196
"uipath_langchain.agent.wrappers.job_attachment_wrapper.get_job_attachment_wrapper"
177197
)
178198
@patch(
179-
"uipath_langchain.agent.tools.internal_tools.analyze_files_tool.llm_call_with_files"
199+
"uipath_langchain.agent.tools.internal_tools.analyze_files_tool.add_files_to_message"
180200
)
181201
@patch(
182202
"uipath_langchain.agent.tools.internal_tools.analyze_files_tool._resolve_job_attachment_arguments"
183203
)
184204
async def test_create_analyze_file_tool_with_multiple_attachments(
185205
self,
186206
mock_resolve_attachments,
187-
mock_llm_call,
207+
mock_add_files,
188208
mock_get_wrapper,
189209
resource_config,
190210
mock_llm,
@@ -202,10 +222,25 @@ async def test_create_analyze_file_tool_with_multiple_attachments(
202222
mime_type="application/vnd.openxmlformats-officedocument.wordprocessingml.document",
203223
),
204224
]
205-
mock_llm_call.return_value = "Multiple files analyzed"
225+
226+
# mock add_files_to_message to return a message with multiple files
227+
mock_message_with_files = HumanMessage(
228+
content=[
229+
{"type": "text", "text": "Compare these documents"},
230+
{"type": "file", "url": "https://example.com/file1.pdf"},
231+
{"type": "file", "url": "https://example.com/file2.docx"},
232+
]
233+
)
234+
mock_add_files.return_value = mock_message_with_files
235+
206236
mock_wrapper = Mock()
207237
mock_get_wrapper.return_value = mock_wrapper
208238

239+
# setup llm to return analyzed result
240+
mock_llm.ainvoke = AsyncMock(
241+
return_value=AIMessage(content="Multiple files analyzed")
242+
)
243+
209244
tool = create_analyze_file_tool(resource_config, mock_llm)
210245

211246
mock_attachments = [
@@ -227,8 +262,8 @@ async def test_create_analyze_file_tool_with_multiple_attachments(
227262
assert result == "Multiple files analyzed"
228263
mock_resolve_attachments.assert_called_once()
229264

230-
# Verify LLM received both files
231-
call_args = mock_llm_call.call_args
265+
# Verify add_files_to_message received both files
266+
call_args = mock_add_files.call_args
232267
files = call_args[0][1]
233268
assert len(files) == 2
234269

0 commit comments

Comments
 (0)