-
-
Notifications
You must be signed in to change notification settings - Fork 1.1k
fix: Implement Ollama sequential tool calling fix #879
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -277,15 +277,21 @@ def _is_ollama_provider(self) -> bool: | |
| # Direct ollama/ prefix | ||
| if self.model.startswith("ollama/"): | ||
| return True | ||
|
|
||
| # Check base_url if provided | ||
| if self.base_url and "ollama" in self.base_url.lower(): | ||
| return True | ||
|
|
||
| # Check environment variables for Ollama base URL | ||
| base_url = os.getenv("OPENAI_BASE_URL", "") | ||
| api_base = os.getenv("OPENAI_API_BASE", "") | ||
|
|
||
| # Common Ollama endpoints | ||
| ollama_endpoints = ["localhost:11434", "127.0.0.1:11434", ":11434"] | ||
| # Common Ollama endpoints (including custom ports) | ||
| if any(url and ("ollama" in url.lower() or ":11434" in url) | ||
| for url in [base_url, api_base, self.base_url or ""]): | ||
| return True | ||
|
|
||
| return any(endpoint in base_url or endpoint in api_base for endpoint in ollama_endpoints) | ||
| return False | ||
|
|
||
| def _process_stream_delta(self, delta, response_text: str, tool_calls: List[Dict], formatted_tools: Optional[List] = None) -> tuple: | ||
| """ | ||
|
|
@@ -812,11 +818,20 @@ def get_response( | |
| if tool_calls and execute_tool_fn: | ||
| # Convert tool_calls to a serializable format for all providers | ||
| serializable_tool_calls = self._serialize_tool_calls(tool_calls) | ||
| messages.append({ | ||
| "role": "assistant", | ||
| "content": response_text, | ||
| "tool_calls": serializable_tool_calls | ||
| }) | ||
| # Check if this is Ollama provider | ||
| if self._is_ollama_provider(): | ||
| # For Ollama, only include role and content | ||
| messages.append({ | ||
| "role": "assistant", | ||
| "content": response_text | ||
| }) | ||
| else: | ||
| # For other providers, include tool_calls | ||
| messages.append({ | ||
| "role": "assistant", | ||
| "content": response_text, | ||
| "tool_calls": serializable_tool_calls | ||
| }) | ||
|
|
||
| should_continue = False | ||
| tool_results = [] # Store all tool results | ||
|
|
@@ -842,11 +857,21 @@ def get_response( | |
| logging.debug(f"[TOOL_EXEC_DEBUG] About to display tool call with message: {display_message}") | ||
| display_tool_call(display_message, console=console) | ||
|
|
||
| messages.append({ | ||
| "role": "tool", | ||
| "tool_call_id": tool_call_id, | ||
| "content": json.dumps(tool_result) if tool_result is not None else "Function returned an empty output" | ||
| }) | ||
| # Check if this is Ollama provider | ||
| if self._is_ollama_provider(): | ||
| # For Ollama, use user role and format as natural language | ||
| tool_result_content = json.dumps(tool_result) if tool_result is not None else "an empty output" | ||
| messages.append({ | ||
| "role": "user", | ||
| "content": f"The {function_name} function returned: {tool_result_content}" | ||
| }) | ||
| else: | ||
| # For other providers, use tool role with tool_call_id | ||
| messages.append({ | ||
| "role": "tool", | ||
| "tool_call_id": tool_call_id, | ||
| "content": json.dumps(tool_result) if tool_result is not None else "Function returned an empty output" | ||
| }) | ||
|
|
||
| # Check if we should continue (for tools like sequential thinking) | ||
| # This mimics the logic from agent.py lines 1004-1007 | ||
|
|
@@ -1303,11 +1328,20 @@ async def get_response_async( | |
| if tools and execute_tool_fn and tool_calls: | ||
| # Convert tool_calls to a serializable format for all providers | ||
| serializable_tool_calls = self._serialize_tool_calls(tool_calls) | ||
| messages.append({ | ||
| "role": "assistant", | ||
| "content": response_text, | ||
| "tool_calls": serializable_tool_calls | ||
| }) | ||
| # Check if it's Ollama provider | ||
| if self._is_ollama_provider(): | ||
| # For Ollama, only include role and content | ||
| messages.append({ | ||
| "role": "assistant", | ||
| "content": response_text | ||
| }) | ||
| else: | ||
| # For other providers, include tool_calls | ||
| messages.append({ | ||
| "role": "assistant", | ||
| "content": response_text, | ||
| "tool_calls": serializable_tool_calls | ||
| }) | ||
|
Comment on lines
+1331
to
+1344
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This logic is a duplicate of the one found in the synchronous # Create the base assistant message
assistant_message = {
"role": "assistant",
"content": response_text,
}
# Add tool_calls for non-Ollama providers
if not self._is_ollama_provider():
assistant_message["tool_calls"] = serializable_tool_calls
messages.append(assistant_message) |
||
|
|
||
| tool_results = [] # Store all tool results | ||
| for tool_call in tool_calls: | ||
|
|
@@ -1325,11 +1359,21 @@ async def get_response_async( | |
| else: | ||
| display_message += "Function returned no output" | ||
| display_tool_call(display_message, console=console) | ||
| messages.append({ | ||
| "role": "tool", | ||
| "tool_call_id": tool_call_id, | ||
| "content": json.dumps(tool_result) if tool_result is not None else "Function returned an empty output" | ||
| }) | ||
| # Check if it's Ollama provider | ||
| if self._is_ollama_provider(): | ||
| # For Ollama, use user role and natural language format | ||
| content = f"The {function_name} function returned: {json.dumps(tool_result) if tool_result is not None else 'an empty output'}" | ||
| messages.append({ | ||
| "role": "user", | ||
| "content": content | ||
| }) | ||
| else: | ||
| # For other providers, use tool role with tool_call_id | ||
| messages.append({ | ||
| "role": "tool", | ||
| "tool_call_id": tool_call_id, | ||
| "content": json.dumps(tool_result) if tool_result is not None else "Function returned an empty output" | ||
| }) | ||
|
|
||
| # Get response after tool calls | ||
| response_text = "" | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This block for appending the assistant message introduces code duplication. It can be simplified by constructing a base message dictionary and then conditionally adding the
tool_callskey for non-Ollama providers. This approach enhances conciseness and maintainability.