Skip to content

Commit af685c9

Browse files
authored
Refactor: Chat class and Integration test (#70)
* refactor: remove bind_tools override, add disable_params * test: has_tool_choice return False * style: fix lint
1 parent 9f769ed commit af685c9

File tree

2 files changed

+22
-74
lines changed

2 files changed

+22
-74
lines changed

libs/upstage/langchain_upstage/chat_models.py

Lines changed: 5 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,8 @@
33
import os
44
from typing import (
55
Any,
6-
Callable,
76
Dict,
87
List,
9-
Literal,
108
Mapping,
119
Optional,
1210
Sequence,
@@ -18,15 +16,11 @@
1816
AsyncCallbackManagerForLLMRun,
1917
CallbackManagerForLLMRun,
2018
)
21-
from langchain_core.language_models import LanguageModelInput
2219
from langchain_core.language_models.chat_models import LangSmithParams
23-
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
20+
from langchain_core.messages import BaseMessage, HumanMessage
2421
from langchain_core.messages.utils import convert_to_openai_messages
2522
from langchain_core.outputs import ChatResult
26-
from langchain_core.runnables import Runnable
27-
from langchain_core.tools import BaseTool
2823
from langchain_core.utils import from_env, secret_from_env
29-
from langchain_core.utils.function_calling import convert_to_openai_tool
3024
from langchain_openai.chat_models.base import BaseChatOpenAI
3125
from pydantic import Field, SecretStr, model_validator
3226
from tokenizers import Tokenizer
@@ -125,6 +119,10 @@ def _get_ls_params(
125119
default_headers: Union[Mapping[str, str], None] = DEFAULT_HEADERS
126120
"""add trace header."""
127121

122+
disabled_params: dict[str, Any] = Field(
123+
default_factory=lambda: {"parallel_tool_calls": None}
124+
)
125+
128126
@model_validator(mode="after")
129127
def validate_environment(self) -> Self:
130128
"""Validate that api key and python package exists in environment."""
@@ -269,69 +267,3 @@ def _parse_documents(self, file_path: str) -> str:
269267
file_title = file_titles[min(i, len(file_titles) - 1)]
270268
document_contents += f"{file_title}:\n{doc.page_content}\n\n"
271269
return document_contents
272-
273-
def bind_tools(
274-
self,
275-
tools: Sequence[dict[str, Any] | type | Callable | BaseTool],
276-
*,
277-
tool_choice: Optional[Union[dict, str, Literal["auto"], bool]] = None,
278-
**kwargs: Any,
279-
) -> Runnable[LanguageModelInput, AIMessage]:
280-
"""Bind tool-like objects to this chat model.
281-
282-
Assumes model is compatible with Upstage tool-calling API.
283-
284-
Args:
285-
tools: A list of tool definitions to bind to this chat model.
286-
Can be a dictionary, pydantic model, callable, or BaseTool. Pydantic
287-
models, callables, and BaseTools will be automatically converted to
288-
their schema dictionary representation.
289-
tool_choice: Which tool to require the model to call.
290-
Options are:
291-
name of the tool (str): calls corresponding tool;
292-
"auto": automatically selects a tool (including no tool);
293-
"none": does not call a tool;
294-
True: forces tool call (requires `tools` be length 1);
295-
False: no effect;
296-
or a dict of the form:
297-
{"type": "function", "function": {"name": <<tool_name>>}}.
298-
**kwargs: Any additional parameters to pass to the
299-
:class:`~langchain.runnable.Runnable` constructor.
300-
"""
301-
302-
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
303-
if tool_choice:
304-
if isinstance(tool_choice, str):
305-
# tool_choice is a tool/function name
306-
if tool_choice in ("any", "required", "auto"):
307-
tool_choice = "auto"
308-
elif tool_choice == "none":
309-
tool_choice = "none"
310-
else:
311-
tool_choice = {
312-
"type": "function",
313-
"function": {"name": tool_choice},
314-
}
315-
316-
elif isinstance(tool_choice, bool):
317-
tool_choice = "auto"
318-
elif isinstance(tool_choice, dict):
319-
tool_names = [
320-
formatted_tool["function"]["name"]
321-
for formatted_tool in formatted_tools
322-
]
323-
if not any(
324-
tool_name == tool_choice["function"]["name"]
325-
for tool_name in tool_names
326-
):
327-
raise ValueError(
328-
f"Tool choice {tool_choice} was specified, but the only "
329-
f"provided tools were {tool_names}."
330-
)
331-
else:
332-
raise ValueError(
333-
f"Unrecognized tool_choice type. Expected str, bool or dict. "
334-
f"Received: {tool_choice}"
335-
)
336-
kwargs["tool_choice"] = tool_choice
337-
return super().bind(tools=formatted_tools, **kwargs)

libs/upstage/tests/integration_tests/test_chat_models_standard.py

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,25 @@ def chat_model_class(self) -> Type[BaseChatModel]:
1717
@property
1818
def chat_model_params(self) -> dict:
1919
return {
20-
"model": "solar-mini",
20+
"model": "solar-pro2",
2121
}
2222

23+
@property
24+
def has_tool_choice(self) -> bool:
25+
"""Upstage API tool_choice support status.
26+
27+
Note: The Upstage API does support tool_choice parameters and actually
28+
calls tools correctly. However, there is a known bug where the API
29+
returns an incorrect finish_reason value ('stop' instead of 'tool_calls')
30+
when tool_choice is set to 'required' or a specific tool name.
31+
32+
The API team is aware of this issue and is currently working on a fix.
33+
Once fixed, this property should be changed to return True.
34+
35+
Therefore, we skip the test_tool_choice test until the API bug is resolved.
36+
"""
37+
return False
38+
2339
@pytest.mark.xfail(reason="Not implemented.")
2440
def test_usage_metadata_streaming(self, model: BaseChatModel) -> None:
2541
super().test_usage_metadata_streaming(model)

0 commit comments

Comments
 (0)