Skip to content

Commit 34abd58

Browse files
committed
bump
1 parent e2ebbde commit 34abd58

File tree

3 files changed

+74
-86
lines changed

3 files changed

+74
-86
lines changed

libs/partners/ollama/langchain_ollama/chat_models.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -839,19 +839,19 @@ def _iterate_over_stream(
839839
if "message" in stream_resp and "content" in stream_resp["message"]
840840
else ""
841841
)
842-
842+
843843
# Skip responses with done_reason: 'load' and empty content
844844
# These indicate the model was loaded but no actual generation occurred
845845
is_load_response_with_empty_content = (
846-
stream_resp.get("done") is True and
847-
stream_resp.get("done_reason") == "load" and
848-
not content.strip()
846+
stream_resp.get("done") is True
847+
and stream_resp.get("done_reason") == "load"
848+
and not content.strip()
849849
)
850-
850+
851851
if is_load_response_with_empty_content:
852-
# Skip this chunk - don't yield anything for load responses with empty content
852+
# Skip chunk - don't yield for load responses with empty content
853853
continue
854-
854+
855855
if stream_resp.get("done") is True:
856856
generation_info = dict(stream_resp)
857857
if "model" in generation_info:
@@ -911,19 +911,19 @@ async def _aiterate_over_stream(
911911
if "message" in stream_resp and "content" in stream_resp["message"]
912912
else ""
913913
)
914-
914+
915915
# Skip responses with done_reason: 'load' and empty content
916916
# These indicate the model was loaded but no actual generation occurred
917917
is_load_response_with_empty_content = (
918-
stream_resp.get("done") is True and
919-
stream_resp.get("done_reason") == "load" and
920-
not content.strip()
918+
stream_resp.get("done") is True
919+
and stream_resp.get("done_reason") == "load"
920+
and not content.strip()
921921
)
922-
922+
923923
if is_load_response_with_empty_content:
924-
# Skip this chunk - don't yield anything for load responses with empty content
924+
# Skip chunk - don't yield for load responses with empty content
925925
continue
926-
926+
927927
if stream_resp.get("done") is True:
928928
generation_info = dict(stream_resp)
929929
if "model" in generation_info:

libs/partners/ollama/tests/unit_tests/test_chat_models.py

Lines changed: 58 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
from collections.abc import Generator
55
from contextlib import contextmanager
66
from typing import Any
7-
from unittest.mock import patch
7+
from unittest.mock import MagicMock, patch
88

99
import pytest
1010
from httpx import Client, Request, Response
@@ -146,113 +146,101 @@ def test_load_response_with_empty_content_is_skipped() -> None:
146146
"""Test that load responses with empty content are skipped."""
147147
load_only_response = [
148148
{
149-
'model': 'test-model',
150-
'created_at': '2025-01-01T00:00:00.000000000Z',
151-
'done': True,
152-
'done_reason': 'load',
153-
'message': {
154-
'role': 'assistant',
155-
'content': ''
156-
}
149+
"model": "test-model",
150+
"created_at": "2025-01-01T00:00:00.000000000Z",
151+
"done": True,
152+
"done_reason": "load",
153+
"message": {"role": "assistant", "content": ""},
157154
}
158155
]
159-
160-
with patch('langchain_ollama.chat_models.Client') as mock_client_class:
156+
157+
with patch("langchain_ollama.chat_models.Client") as mock_client_class:
161158
mock_client = MagicMock()
162159
mock_client_class.return_value = mock_client
163160
mock_client.chat.return_value = load_only_response
164-
165-
llm = ChatOllama(model='test-model')
166-
161+
162+
llm = ChatOllama(model="test-model")
163+
167164
with pytest.raises(ValueError, match="No data received from Ollama stream"):
168-
llm.invoke([HumanMessage('Hello')])
165+
llm.invoke([HumanMessage("Hello")])
169166

170167

171168
def test_load_response_with_whitespace_content_is_skipped() -> None:
172169
"""Test that load responses with only whitespace content are skipped."""
173170
load_whitespace_response = [
174171
{
175-
'model': 'test-model',
176-
'created_at': '2025-01-01T00:00:00.000000000Z',
177-
'done': True,
178-
'done_reason': 'load',
179-
'message': {
180-
'role': 'assistant',
181-
'content': ' \n \t '
182-
}
172+
"model": "test-model",
173+
"created_at": "2025-01-01T00:00:00.000000000Z",
174+
"done": True,
175+
"done_reason": "load",
176+
"message": {"role": "assistant", "content": " \n \t "},
183177
}
184178
]
185-
186-
with patch('langchain_ollama.chat_models.Client') as mock_client_class:
179+
180+
with patch("langchain_ollama.chat_models.Client") as mock_client_class:
187181
mock_client = MagicMock()
188182
mock_client_class.return_value = mock_client
189183
mock_client.chat.return_value = load_whitespace_response
190-
191-
llm = ChatOllama(model='test-model')
192-
184+
185+
llm = ChatOllama(model="test-model")
186+
193187
with pytest.raises(ValueError, match="No data received from Ollama stream"):
194-
llm.invoke([HumanMessage('Hello')])
188+
llm.invoke([HumanMessage("Hello")])
195189

196190

197191
def test_load_followed_by_content_response() -> None:
198192
"""Test that load responses are skipped when followed by actual content."""
199193
load_then_content_response = [
200194
{
201-
'model': 'test-model',
202-
'created_at': '2025-01-01T00:00:00.000000000Z',
203-
'done': True,
204-
'done_reason': 'load',
205-
'message': {
206-
'role': 'assistant',
207-
'content': ''
208-
}
195+
"model": "test-model",
196+
"created_at": "2025-01-01T00:00:00.000000000Z",
197+
"done": True,
198+
"done_reason": "load",
199+
"message": {"role": "assistant", "content": ""},
209200
},
210201
{
211-
'model': 'test-model',
212-
'created_at': '2025-01-01T00:00:01.000000000Z',
213-
'done': True,
214-
'done_reason': 'stop',
215-
'message': {
216-
'role': 'assistant',
217-
'content': 'Hello! How can I help you today?'
218-
}
219-
}
202+
"model": "test-model",
203+
"created_at": "2025-01-01T00:00:01.000000000Z",
204+
"done": True,
205+
"done_reason": "stop",
206+
"message": {
207+
"role": "assistant",
208+
"content": "Hello! How can I help you today?",
209+
},
210+
},
220211
]
221-
222-
with patch('langchain_ollama.chat_models.Client') as mock_client_class:
212+
213+
with patch("langchain_ollama.chat_models.Client") as mock_client_class:
223214
mock_client = MagicMock()
224215
mock_client_class.return_value = mock_client
225216
mock_client.chat.return_value = load_then_content_response
226-
227-
llm = ChatOllama(model='test-model')
228-
result = llm.invoke([HumanMessage('Hello')])
229-
230-
assert result.content == 'Hello! How can I help you today?'
231-
assert result.response_metadata.get('done_reason') == 'stop'
217+
218+
llm = ChatOllama(model="test-model")
219+
result = llm.invoke([HumanMessage("Hello")])
220+
221+
assert result.content == "Hello! How can I help you today?"
222+
assert result.response_metadata.get("done_reason") == "stop"
232223

233224

234225
def test_load_response_with_actual_content_is_not_skipped() -> None:
235226
"""Test that load responses with actual content are NOT skipped."""
236227
load_with_content_response = [
237228
{
238-
'model': 'test-model',
239-
'created_at': '2025-01-01T00:00:00.000000000Z',
240-
'done': True,
241-
'done_reason': 'load',
242-
'message': {
243-
'role': 'assistant',
244-
'content': 'This is actual content'
245-
}
229+
"model": "test-model",
230+
"created_at": "2025-01-01T00:00:00.000000000Z",
231+
"done": True,
232+
"done_reason": "load",
233+
"message": {"role": "assistant", "content": "This is actual content"},
246234
}
247235
]
248-
249-
with patch('langchain_ollama.chat_models.Client') as mock_client_class:
236+
237+
with patch("langchain_ollama.chat_models.Client") as mock_client_class:
250238
mock_client = MagicMock()
251239
mock_client_class.return_value = mock_client
252240
mock_client.chat.return_value = load_with_content_response
253-
254-
llm = ChatOllama(model='test-model')
255-
result = llm.invoke([HumanMessage('Hello')])
256-
257-
assert result.content == 'This is actual content'
258-
assert result.response_metadata.get('done_reason') == 'load'
241+
242+
llm = ChatOllama(model="test-model")
243+
result = llm.invoke([HumanMessage("Hello")])
244+
245+
assert result.content == "This is actual content"
246+
assert result.response_metadata.get("done_reason") == "load"

libs/partners/ollama/uv.lock

Lines changed: 2 additions & 2 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)