|
4 | 4 | from collections.abc import Generator |
5 | 5 | from contextlib import contextmanager |
6 | 6 | from typing import Any |
7 | | -from unittest.mock import patch |
| 7 | +from unittest.mock import MagicMock, patch |
8 | 8 |
|
9 | 9 | import pytest |
10 | 10 | from httpx import Client, Request, Response |
@@ -146,113 +146,101 @@ def test_load_response_with_empty_content_is_skipped() -> None: |
146 | 146 | """Test that load responses with empty content are skipped.""" |
147 | 147 | load_only_response = [ |
148 | 148 | { |
149 | | - 'model': 'test-model', |
150 | | - 'created_at': '2025-01-01T00:00:00.000000000Z', |
151 | | - 'done': True, |
152 | | - 'done_reason': 'load', |
153 | | - 'message': { |
154 | | - 'role': 'assistant', |
155 | | - 'content': '' |
156 | | - } |
| 149 | + "model": "test-model", |
| 150 | + "created_at": "2025-01-01T00:00:00.000000000Z", |
| 151 | + "done": True, |
| 152 | + "done_reason": "load", |
| 153 | + "message": {"role": "assistant", "content": ""}, |
157 | 154 | } |
158 | 155 | ] |
159 | | - |
160 | | - with patch('langchain_ollama.chat_models.Client') as mock_client_class: |
| 156 | + |
| 157 | + with patch("langchain_ollama.chat_models.Client") as mock_client_class: |
161 | 158 | mock_client = MagicMock() |
162 | 159 | mock_client_class.return_value = mock_client |
163 | 160 | mock_client.chat.return_value = load_only_response |
164 | | - |
165 | | - llm = ChatOllama(model='test-model') |
166 | | - |
| 161 | + |
| 162 | + llm = ChatOllama(model="test-model") |
| 163 | + |
167 | 164 | with pytest.raises(ValueError, match="No data received from Ollama stream"): |
168 | | - llm.invoke([HumanMessage('Hello')]) |
| 165 | + llm.invoke([HumanMessage("Hello")]) |
169 | 166 |
|
170 | 167 |
|
171 | 168 | def test_load_response_with_whitespace_content_is_skipped() -> None: |
172 | 169 | """Test that load responses with only whitespace content are skipped.""" |
173 | 170 | load_whitespace_response = [ |
174 | 171 | { |
175 | | - 'model': 'test-model', |
176 | | - 'created_at': '2025-01-01T00:00:00.000000000Z', |
177 | | - 'done': True, |
178 | | - 'done_reason': 'load', |
179 | | - 'message': { |
180 | | - 'role': 'assistant', |
181 | | - 'content': ' \n \t ' |
182 | | - } |
| 172 | + "model": "test-model", |
| 173 | + "created_at": "2025-01-01T00:00:00.000000000Z", |
| 174 | + "done": True, |
| 175 | + "done_reason": "load", |
| 176 | + "message": {"role": "assistant", "content": " \n \t "}, |
183 | 177 | } |
184 | 178 | ] |
185 | | - |
186 | | - with patch('langchain_ollama.chat_models.Client') as mock_client_class: |
| 179 | + |
| 180 | + with patch("langchain_ollama.chat_models.Client") as mock_client_class: |
187 | 181 | mock_client = MagicMock() |
188 | 182 | mock_client_class.return_value = mock_client |
189 | 183 | mock_client.chat.return_value = load_whitespace_response |
190 | | - |
191 | | - llm = ChatOllama(model='test-model') |
192 | | - |
| 184 | + |
| 185 | + llm = ChatOllama(model="test-model") |
| 186 | + |
193 | 187 | with pytest.raises(ValueError, match="No data received from Ollama stream"): |
194 | | - llm.invoke([HumanMessage('Hello')]) |
| 188 | + llm.invoke([HumanMessage("Hello")]) |
195 | 189 |
|
196 | 190 |
|
197 | 191 | def test_load_followed_by_content_response() -> None: |
198 | 192 | """Test that load responses are skipped when followed by actual content.""" |
199 | 193 | load_then_content_response = [ |
200 | 194 | { |
201 | | - 'model': 'test-model', |
202 | | - 'created_at': '2025-01-01T00:00:00.000000000Z', |
203 | | - 'done': True, |
204 | | - 'done_reason': 'load', |
205 | | - 'message': { |
206 | | - 'role': 'assistant', |
207 | | - 'content': '' |
208 | | - } |
| 195 | + "model": "test-model", |
| 196 | + "created_at": "2025-01-01T00:00:00.000000000Z", |
| 197 | + "done": True, |
| 198 | + "done_reason": "load", |
| 199 | + "message": {"role": "assistant", "content": ""}, |
209 | 200 | }, |
210 | 201 | { |
211 | | - 'model': 'test-model', |
212 | | - 'created_at': '2025-01-01T00:00:01.000000000Z', |
213 | | - 'done': True, |
214 | | - 'done_reason': 'stop', |
215 | | - 'message': { |
216 | | - 'role': 'assistant', |
217 | | - 'content': 'Hello! How can I help you today?' |
218 | | - } |
219 | | - } |
| 202 | + "model": "test-model", |
| 203 | + "created_at": "2025-01-01T00:00:01.000000000Z", |
| 204 | + "done": True, |
| 205 | + "done_reason": "stop", |
| 206 | + "message": { |
| 207 | + "role": "assistant", |
| 208 | + "content": "Hello! How can I help you today?", |
| 209 | + }, |
| 210 | + }, |
220 | 211 | ] |
221 | | - |
222 | | - with patch('langchain_ollama.chat_models.Client') as mock_client_class: |
| 212 | + |
| 213 | + with patch("langchain_ollama.chat_models.Client") as mock_client_class: |
223 | 214 | mock_client = MagicMock() |
224 | 215 | mock_client_class.return_value = mock_client |
225 | 216 | mock_client.chat.return_value = load_then_content_response |
226 | | - |
227 | | - llm = ChatOllama(model='test-model') |
228 | | - result = llm.invoke([HumanMessage('Hello')]) |
229 | | - |
230 | | - assert result.content == 'Hello! How can I help you today?' |
231 | | - assert result.response_metadata.get('done_reason') == 'stop' |
| 217 | + |
| 218 | + llm = ChatOllama(model="test-model") |
| 219 | + result = llm.invoke([HumanMessage("Hello")]) |
| 220 | + |
| 221 | + assert result.content == "Hello! How can I help you today?" |
| 222 | + assert result.response_metadata.get("done_reason") == "stop" |
232 | 223 |
|
233 | 224 |
|
234 | 225 | def test_load_response_with_actual_content_is_not_skipped() -> None: |
235 | 226 | """Test that load responses with actual content are NOT skipped.""" |
236 | 227 | load_with_content_response = [ |
237 | 228 | { |
238 | | - 'model': 'test-model', |
239 | | - 'created_at': '2025-01-01T00:00:00.000000000Z', |
240 | | - 'done': True, |
241 | | - 'done_reason': 'load', |
242 | | - 'message': { |
243 | | - 'role': 'assistant', |
244 | | - 'content': 'This is actual content' |
245 | | - } |
| 229 | + "model": "test-model", |
| 230 | + "created_at": "2025-01-01T00:00:00.000000000Z", |
| 231 | + "done": True, |
| 232 | + "done_reason": "load", |
| 233 | + "message": {"role": "assistant", "content": "This is actual content"}, |
246 | 234 | } |
247 | 235 | ] |
248 | | - |
249 | | - with patch('langchain_ollama.chat_models.Client') as mock_client_class: |
| 236 | + |
| 237 | + with patch("langchain_ollama.chat_models.Client") as mock_client_class: |
250 | 238 | mock_client = MagicMock() |
251 | 239 | mock_client_class.return_value = mock_client |
252 | 240 | mock_client.chat.return_value = load_with_content_response |
253 | | - |
254 | | - llm = ChatOllama(model='test-model') |
255 | | - result = llm.invoke([HumanMessage('Hello')]) |
256 | | - |
257 | | - assert result.content == 'This is actual content' |
258 | | - assert result.response_metadata.get('done_reason') == 'load' |
| 241 | + |
| 242 | + llm = ChatOllama(model="test-model") |
| 243 | + result = llm.invoke([HumanMessage("Hello")]) |
| 244 | + |
| 245 | + assert result.content == "This is actual content" |
| 246 | + assert result.response_metadata.get("done_reason") == "load" |
0 commit comments