-
-
Notifications
You must be signed in to change notification settings - Fork 932
Closed
Labels
bugSomething isn't workingSomething isn't working
Description
- This is actually a bug report.
What Model are you using?
- gpt-4o-mini
Describe the bug
Adding stream=True encounters exception: 'Stream' object has no attribute 'choices'
in OpenAISchema.from_response(cls, completion, validation_context, strict, mode)
195 return cls.parse_responses_tools(
196 completion,
197 validation_context,
198 strict,
199 )
--> 201 if not completion.choices:
202 # This helps catch errors from OpenRouter
203 if hasattr(completion, "error"):
Longer traceback below.
To Reproduce
import instructor
from openai import OpenAI
from pydantic import BaseModel
class Item(BaseModel):
name: str
price: float
quantity: int
class Receipt(BaseModel):
items: list[Item]
total: float
client = instructor.from_openai(OpenAI())
def extract(url: str) -> Receipt:
return client.chat.completions.create(
model="gpt-4o-mini",
response_model=Receipt,
messages=[
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {"url": url},
},
{
"type": "text",
"text": "Analyze the image and return the items in the receipt and the total amount.",
},
],
}
],
stream=True,
)
url = "https://templates.mediamodifier.com/645124ff36ed2f5227cbf871/supermarket-receipt-template.jpg"
for chunk in extract(url):
print(chunk)Expected behavior
https://python.useinstructor.com/examples/extracting_receipts/?h=image
Fully streamed:
"""
items=[Item(name='Lorem ipsum', price=9.2, quantity=1), Item(name='Lorem ipsum dolor sit', price=19.2, quantity=1), Item(name='Lorem ipsum dolor sit amet', price=15.0, quantity=1), Item(name='Lorem ipsum', price=15.0, quantity=1), Item(name='Lorem ipsum', price=15.0, quantity=1), Item(name='Lorem ipsum dolor sit', price=15.0, quantity=1), Item(name='Lorem ipsum', price=19.2, quantity=1)] total=107.6
"""
Screenshots
File ~/miniconda3/envs/lumen/lib/python3.12/site-packages/instructor/processing/function_calls.py:201, in OpenAISchema.from_response(cls, completion, validation_context, strict, mode)
195 return cls.parse_responses_tools(
196 completion,
197 validation_context,
198 strict,
199 )
--> 201 if not completion.choices:
202 # This helps catch errors from OpenRouter
203 if hasattr(completion, "error"):
AttributeError: 'Stream' object has no attribute 'choices'
The above exception was the direct cause of the following exception:
RetryError Traceback (most recent call last)
File ~/miniconda3/envs/lumen/lib/python3.12/site-packages/instructor/core/retry.py:188, in retry_sync(func, response_model, args, kwargs, context, max_retries, strict, mode, hooks)
187 response = None
--> 188 for attempt in max_retries:
189 with attempt:
File ~/miniconda3/envs/lumen/lib/python3.12/site-packages/tenacity/__init__.py:443, in BaseRetrying.__iter__(self)
442 while True:
--> 443 do = self.iter(retry_state=retry_state)
444 if isinstance(do, DoAttempt):
File ~/miniconda3/envs/lumen/lib/python3.12/site-packages/tenacity/__init__.py:376, in BaseRetrying.iter(self, retry_state)
375 for action in self.iter_state.actions:
--> 376 result = action(retry_state)
377 return result
File ~/miniconda3/envs/lumen/lib/python3.12/site-packages/tenacity/__init__.py:419, in BaseRetrying._post_stop_check_actions.<locals>.exc_check(rs)
418 raise retry_exc.reraise()
--> 419 raise retry_exc from fut.exception()
RetryError: RetryError[<Future at 0x31905ec00 state=finished raised AttributeError>]
The above exception was the direct cause of the following exception:
InstructorRetryException Traceback (most recent call last)
Cell In[3], line 46
21 return client.chat.completions.create(
22 model="gpt-4o-mini",
23 response_model=Receipt,
(...)
39 stream=True,
40 )
43 url = "https://templates.mediamodifier.com/645124ff36ed2f5227cbf871/supermarket-receipt-template.jpg"
---> 46 for chunk in extract(url):
47 print(chunk)
Cell In[3], line 21
20 def extract(url: str) -> Receipt:
---> 21 return client.chat.completions.create(
22 model="gpt-4o-mini",
23 response_model=Receipt,
24 messages=[
25 {
26 "role": "user",
27 "content": [
28 {
29 "type": "image_url",
30 "image_url": {"url": url},
31 },
32 {
33 "type": "text",
34 "text": "Analyze the image and return the items in the receipt and the total amount.",
35 },
36 ],
37 }
38 ],
39 stream=True,
40 )
File ~/miniconda3/envs/lumen/lib/python3.12/site-packages/instructor/core/client.py:376, in Instructor.create(self, response_model, messages, max_retries, validation_context, context, strict, hooks, **kwargs)
373 if hooks is not None:
374 combined_hooks = self.hooks + hooks
--> 376 return self.create_fn(
377 response_model=response_model,
378 messages=messages,
379 max_retries=max_retries,
380 validation_context=validation_context,
381 context=context,
382 strict=strict,
383 hooks=combined_hooks,
384 **kwargs,
385 )
File ~/miniconda3/envs/lumen/lib/python3.12/site-packages/instructor/core/patch.py:258, in patch.<locals>.new_create_sync(response_model, validation_context, context, max_retries, strict, hooks, *args, **kwargs)
255 if obj is not None:
256 return obj # type: ignore[return-value]
--> 258 response = retry_sync(
259 func=func, # type: ignore
260 response_model=response_model,
261 context=context,
262 max_retries=max_retries,
263 args=args,
264 hooks=hooks,
265 strict=strict,
266 kwargs=new_kwargs,
267 mode=mode,
268 )
270 # Save to cache
271 if cache is not None and response_model is not None:
File ~/miniconda3/envs/lumen/lib/python3.12/site-packages/instructor/core/retry.py:285, in retry_sync(func, response_model, args, kwargs, context, max_retries, strict, mode, hooks)
283 except RetryError as e:
284 logger.debug(f"Retry error: {e}")
--> 285 raise InstructorRetryException(
286 e.last_attempt._exception,
287 last_completion=response,
288 n_attempts=attempt.retry_state.attempt_number,
289 #! deprecate messages soon
290 messages=extract_messages(
291 kwargs
292 ), # Use the optimized function instead of nested lookups
293 create_kwargs=kwargs,
294 total_usage=total_usage,
295 failed_attempts=failed_attempts,
296 ) from e
InstructorRetryException: <failed_attempts>
<generation number="1">
<exception>
'Stream' object has no attribute 'choices'
</exception>
<completion>
<openai.Stream object at 0x31943e360>
</completion>
</generation>Reactions are currently unavailable
Metadata
Metadata
Assignees
Labels
bugSomething isn't workingSomething isn't working