Skip to content

Commit d09d624

Browse files
Im An AIryaneggz
andauthored
FROM feat/758-remove-per-chat-model-select TO development (#767)
* feat: US-002 - Create read-only ModelBadge component to display current model * feat: US-003 - Replace SelectModel with ModelBadge in ChatNav * feat: US-004 - Remove SelectModel from NoAuthLayout * feat: US-005 - Update useModel hook to use settings default model consistently - Replace useQueryState('model') URL param sync with plain useState - Model now resolves from user settings default > system default - Remove nuqs dependency from useModel (no more URL query param pollution) - Keep setModel for internal use (thread/agent loading) - Keep updateQueryStateModel for agent-create-form compatibility - Add useCallback wrapper for stable setModel reference - Add 9 unit tests covering defaults, fallbacks, reset, and no-URL-param behavior * feat: US-006 - Ensure request payload uses settings default model consistently * feat: US-007 - Clean up unused SelectModel imports and dead code * feat: US-008 - Add integration tests and open PR - Added 7 integration tests covering: - ChatNav shows ModelBadge not SelectModel - ModelBadge is non-interactive (no buttons/inputs) - Settings DefaultModelSettings still exists - SelectModel still available for agent create form - NoAuthLayout no longer references SelectModel - All 228 tests pass, typecheck clean, build succeeds * Model driven by settings page selection and backend user default --------- Co-authored-by: ryaneggz <kre8mymedia@gmail.com>
1 parent 0fcea05 commit d09d624

29 files changed

Lines changed: 1413 additions & 200 deletions

backend/src/agents/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ def init_graph(
102102
tools: list[BaseTool] = [],
103103
subagents: list[SubAgent] = [],
104104
system_prompt: str = None,
105-
model: str = DEFAULT_CHAT_MODEL,
105+
model: str | None = None,
106106
context_schema: Type[ContextSchema] | None = None,
107107
checkpointer: BaseCheckpointSaver | None = None,
108108
store: BaseStore | None = None,

backend/src/controllers/llm.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
from src.utils.llm import resolve_api_key
1818
from src.utils.logger import logger
1919
from src.utils.format import get_time
20+
from src.constants.llm import DEFAULT_CHAT_MODEL
2021

2122

2223
class LLMController:
@@ -77,6 +78,9 @@ async def _resolve_user_settings(self, model: str) -> tuple[str, str | None]:
7778
and api_key is the resolved key for the provider.
7879
"""
7980
if not self.user_id:
81+
# Unauthenticated users: fall back to system default if no model
82+
if not model:
83+
model = DEFAULT_CHAT_MODEL
8084
return model, None
8185

8286
settings_repo = UserSettingsRepo(self.user_id, self.store)
@@ -87,6 +91,10 @@ async def _resolve_user_settings(self, model: str) -> tuple[str, str | None]:
8791
if not model and settings.default_model:
8892
model = settings.default_model
8993

94+
# Final fallback to system default
95+
if not model:
96+
model = DEFAULT_CHAT_MODEL
97+
9098
# Guard against None model before resolving API key
9199
if not model:
92100
return model, None

backend/src/schemas/entities/llm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -200,7 +200,7 @@ def serialize_dt(self, dt: Optional[datetime], _):
200200

201201
class LLMRequest(BaseModel):
202202
input: LLMInput
203-
model: Optional[str] = Field(default=DEFAULT_CHAT_MODEL)
203+
model: Optional[str] = Field(default=None)
204204
system_prompt: Optional[str] = Field(default=DEFAULT_SYSTEM_PROMPT, exclude=True)
205205
instructions: Optional[str] = Field(default="", exclude=True)
206206
tools: Optional[List[Any]] = Field(default_factory=list)

backend/src/workers/tasks.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -213,6 +213,26 @@ async def _execute_agent_stream(
213213
# Get assistant config if needed
214214
params = await service_context.llm_service.assistant(params)
215215

216+
# Resolve user default model and API key (mirrors LLMController._resolve_user_settings)
217+
from src.repos.user_settings_repo import UserSettingsRepo
218+
from src.utils.llm import resolve_api_key
219+
from src.constants.llm import DEFAULT_CHAT_MODEL
220+
221+
api_key = None
222+
if user_id:
223+
settings_repo = UserSettingsRepo(user_id, service_context.store)
224+
settings = await settings_repo._get_or_create()
225+
user_keys = settings_repo._decrypt_keys(settings)
226+
if not params.model and settings.default_model:
227+
params.model = settings.default_model
228+
if not params.model:
229+
params.model = DEFAULT_CHAT_MODEL
230+
if params.model:
231+
api_key = resolve_api_key(params.model, user_keys if user_keys else None)
232+
else:
233+
if not params.model:
234+
params.model = DEFAULT_CHAT_MODEL
235+
216236
# Load user memories and merge into files_map
217237
memory_files, memory_sources = await prepare_memory_files(
218238
user_id, service_context.memory_service
@@ -246,6 +266,7 @@ async def _execute_agent_stream(
246266
service_context=service_context,
247267
backend=backend,
248268
memory=memory_sources,
269+
api_key=api_key,
249270
)
250271
params.input.messages[-1].model = agent.model
251272

Lines changed: 139 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,139 @@
1+
"""Unit tests for LLM controller model resolution logic.
2+
3+
Tests that _resolve_user_settings correctly applies:
4+
1. Explicit model passes through unchanged
5+
2. Empty model resolves to user default
6+
3. No user default falls back to DEFAULT_CHAT_MODEL
7+
4. Unauthenticated user gets DEFAULT_CHAT_MODEL
8+
"""
9+
10+
import pytest
11+
from unittest.mock import AsyncMock, MagicMock, patch
12+
from src.controllers.llm import LLMController
13+
from src.constants.llm import DEFAULT_CHAT_MODEL
14+
15+
16+
class FakeSettings:
17+
"""Minimal stand-in for user settings."""
18+
19+
def __init__(self, default_model=None):
20+
self.default_model = default_model
21+
22+
23+
@pytest.fixture
24+
def mock_store():
25+
return MagicMock()
26+
27+
28+
@pytest.fixture
29+
def mock_config():
30+
return {"configurable": {}, "metadata": {}}
31+
32+
33+
class TestResolveUserSettings:
34+
"""Test _resolve_user_settings model resolution."""
35+
36+
@pytest.mark.asyncio
37+
async def test_explicit_model_passes_through(self, mock_store, mock_config):
38+
"""When client sends an explicit model, it should be used as-is."""
39+
controller = LLMController(user_id="user-1", store=mock_store, config=mock_config)
40+
41+
with patch.object(controller, "_resolve_user_settings", wraps=controller._resolve_user_settings):
42+
with patch("src.controllers.llm.UserSettingsRepo") as MockRepo:
43+
instance = MockRepo.return_value
44+
instance._get_or_create = AsyncMock(
45+
return_value=FakeSettings(default_model="anthropic:claude-sonnet-4")
46+
)
47+
instance._decrypt_keys = MagicMock(return_value={})
48+
49+
model, api_key = await controller._resolve_user_settings("openai:gpt-4o")
50+
51+
assert model == "openai:gpt-4o"
52+
53+
@pytest.mark.asyncio
54+
async def test_empty_model_resolves_to_user_default(self, mock_store, mock_config):
55+
"""When client sends empty model, user's default_model should be used."""
56+
controller = LLMController(user_id="user-1", store=mock_store, config=mock_config)
57+
58+
with patch("src.controllers.llm.UserSettingsRepo") as MockRepo:
59+
instance = MockRepo.return_value
60+
instance._get_or_create = AsyncMock(
61+
return_value=FakeSettings(default_model="anthropic:claude-sonnet-4")
62+
)
63+
instance._decrypt_keys = MagicMock(return_value={})
64+
65+
model, _ = await controller._resolve_user_settings("")
66+
67+
assert model == "anthropic:claude-sonnet-4"
68+
69+
@pytest.mark.asyncio
70+
async def test_no_user_default_falls_back_to_system(self, mock_store, mock_config):
71+
"""When user has no default_model, system DEFAULT_CHAT_MODEL should be used."""
72+
controller = LLMController(user_id="user-1", store=mock_store, config=mock_config)
73+
74+
with patch("src.controllers.llm.UserSettingsRepo") as MockRepo:
75+
instance = MockRepo.return_value
76+
instance._get_or_create = AsyncMock(
77+
return_value=FakeSettings(default_model=None)
78+
)
79+
instance._decrypt_keys = MagicMock(return_value={})
80+
81+
model, _ = await controller._resolve_user_settings("")
82+
83+
assert model == DEFAULT_CHAT_MODEL
84+
85+
@pytest.mark.asyncio
86+
async def test_unauthenticated_user_gets_system_default(self, mock_store, mock_config):
87+
"""Unauthenticated users (no user_id) should get DEFAULT_CHAT_MODEL."""
88+
controller = LLMController(user_id=None, store=mock_store, config=mock_config)
89+
90+
model, api_key = await controller._resolve_user_settings("")
91+
92+
assert model == DEFAULT_CHAT_MODEL
93+
assert api_key is None
94+
95+
@pytest.mark.asyncio
96+
async def test_unauthenticated_user_explicit_model_preserved(self, mock_store, mock_config):
97+
"""Unauthenticated users with explicit model should keep it."""
98+
controller = LLMController(user_id=None, store=mock_store, config=mock_config)
99+
100+
model, api_key = await controller._resolve_user_settings("openai:gpt-4o")
101+
102+
assert model == "openai:gpt-4o"
103+
assert api_key is None
104+
105+
106+
class TestLLMRequestModelDefault:
107+
"""Test that LLMRequest.model defaults to None."""
108+
109+
def test_model_defaults_to_none(self):
110+
"""LLMRequest without model field should default to None."""
111+
from src.schemas.entities.llm import LLMRequest
112+
113+
payload = {
114+
"input": {"messages": [{"role": "user", "content": "Hello"}]},
115+
}
116+
request = LLMRequest(**payload)
117+
assert request.model is None
118+
119+
def test_model_explicit_value_preserved(self):
120+
"""LLMRequest with explicit model should preserve it."""
121+
from src.schemas.entities.llm import LLMRequest
122+
123+
payload = {
124+
"input": {"messages": [{"role": "user", "content": "Hello"}]},
125+
"model": "openai:gpt-4o",
126+
}
127+
request = LLMRequest(**payload)
128+
assert request.model == "openai:gpt-4o"
129+
130+
def test_model_empty_string_preserved(self):
131+
"""LLMRequest with empty string model should preserve it."""
132+
from src.schemas.entities.llm import LLMRequest
133+
134+
payload = {
135+
"input": {"messages": [{"role": "user", "content": "Hello"}]},
136+
"model": "",
137+
}
138+
request = LLMRequest(**payload)
139+
assert request.model == ""

0 commit comments

Comments
 (0)