Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 24 additions & 0 deletions instructor/process_response.py
Original file line number Diff line number Diff line change
Expand Up @@ -1110,6 +1110,30 @@ def handle_response_model(
if system_message:
new_kwargs["system"] = system_message

elif mode in {Mode.GENAI_TOOLS, Mode.GENAI_STRUCTURED_OUTPUTS}:
# Handle GenAI mode - convert messages to contents and extract system message
from instructor.utils import convert_to_genai_messages, extract_genai_system_message

# Convert OpenAI-style messages to GenAI-style contents
new_kwargs["contents"] = convert_to_genai_messages(messages)

# Extract multimodal content for GenAI
new_kwargs["contents"] = extract_genai_multimodal_content(
new_kwargs["contents"], autodetect_images
)

# Handle system message for GenAI
if "system" not in new_kwargs:
system_message = extract_genai_system_message(messages)
if system_message:
from google.genai import types
new_kwargs["config"] = types.GenerateContentConfig(
system_instruction=system_message
)

# Remove messages since we converted to contents
new_kwargs.pop("messages", None)

else:
if mode in {
Mode.RESPONSES_TOOLS,
Expand Down
57 changes: 57 additions & 0 deletions test_fix_local.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
#!/usr/bin/env python3
"""
Local test script to reproduce the issue from #1690.
This script demonstrates the fix for GenAI response_model=None.
"""

import instructor
from pydantic import BaseModel

# Example from the issue
model_name = "gemini-2.5-flash-lite-preview-06-17"
messages = [
{
"role": "user",
"content": "What is the capital of France?"
},
]

class MyModel(BaseModel):
answer: str

def test_with_response_model():
"""Test that worked before"""
print("Testing with response_model...")

client = instructor.from_provider(f"google/{model_name}")
result = client.chat.completions.create(
messages=messages,
response_model=MyModel
)
print(f"Result: {result}")

def test_without_response_model():
"""Test that was broken before the fix"""
print("Testing with response_model=None...")

client = instructor.from_provider(f"google/{model_name}")
result = client.chat.completions.create(
messages=messages,
response_model=None
)
print(f"Result: {result}")

if __name__ == "__main__":
print("Running GenAI response_model=None fix test...")

try:
test_with_response_model()
print("✓ response_model test passed")
except Exception as e:
print(f"✗ response_model test failed: {e}")

try:
test_without_response_model()
print("✓ response_model=None test passed")
except Exception as e:
print(f"✗ response_model=None test failed: {e}")
60 changes: 60 additions & 0 deletions tests/llm/test_genai/test_response_model_none.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
"""
Test cases for GenAI client with response_model=None.

This test verifies that the GenAI client properly handles the case when
response_model is set to None, ensuring that OpenAI-style messages are
correctly converted to GenAI-style contents.
"""

import pytest
from instructor.mode import Mode


@pytest.mark.parametrize("mode", [Mode.GENAI_TOOLS, Mode.GENAI_STRUCTURED_OUTPUTS])
def test_genai_response_model_none(genai_client, mode):
"""Test that GenAI client works with response_model=None"""

# This should not raise a "Models.generate_content() got an unexpected keyword argument 'messages'" error
messages = [
{
"role": "user",
"content": "What is the capital of France?"
}
]

# This should work without error and return the raw response
response = genai_client.chat.completions.create(
messages=messages,
response_model=None,
mode=mode
)

# We expect to get back a response object, not a parsed model
assert response is not None
# The response should not be a Pydantic model since response_model=None
from pydantic import BaseModel
assert not isinstance(response, BaseModel)


def test_genai_response_model_none_with_system_message(genai_client):
"""Test that GenAI client works with response_model=None and system message"""

messages = [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "What is the capital of France?"
}
]

# This should work without error and properly extract system message
response = genai_client.chat.completions.create(
messages=messages,
response_model=None,
mode=Mode.GENAI_TOOLS
)

assert response is not None
Loading