1+ """
2+ Comprehensive test suite for Issue #467: base_url to api_base mapping for litellm compatibility
3+
4+ This test ensures that when users provide 'base_url' in their llm dictionary,
5+ it properly maps to 'api_base' for litellm, enabling OpenAI-compatible endpoints
6+ like KoboldCPP to work correctly.
7+ """
8+
9+ import pytest
10+ import sys
11+ import os
12+ from unittest .mock import Mock , patch , MagicMock
13+ from typing import Dict , Any
14+
15+ # Add the source path to sys.path for imports
16+ sys .path .insert (0 , os .path .join (os .path .dirname (__file__ ), '..' , '..' , 'src' , 'praisonai-agents' ))
17+
18+ try :
19+ from praisonaiagents .agent .agent import Agent
20+ from praisonaiagents .llm .llm import LLM
21+ from praisonaiagents .agent .image_agent import ImageAgent
22+ except ImportError as e :
23+ pytest .skip (f"Could not import required modules: { e } " , allow_module_level = True )
24+
25+
26+ class TestBaseUrlApiBaseMapping :
27+ """Test suite for base_url to api_base parameter mapping in litellm integration."""
28+
29+ def test_llm_class_maps_base_url_to_api_base (self ):
30+ """Test that LLM class properly maps base_url to api_base for litellm."""
31+ with patch ('praisonaiagents.llm.llm.litellm' ) as mock_litellm :
32+ mock_litellm .completion .return_value = {
33+ 'choices' : [{'message' : {'content' : 'Test response' }}]
34+ }
35+
36+ llm = LLM (
37+ model = 'openai/mistral' ,
38+ base_url = 'http://localhost:4000' ,
39+ api_key = 'sk-test'
40+ )
41+
42+ # Trigger a completion to see the parameters passed to litellm
43+ llm .chat ([{'role' : 'user' , 'content' : 'test' }])
44+
45+ # Verify litellm.completion was called with both base_url and api_base
46+ call_args = mock_litellm .completion .call_args
47+ assert call_args is not None , "litellm.completion should have been called"
48+
49+ # Check that both parameters are present
50+ kwargs = call_args [1 ]
51+ assert 'base_url' in kwargs , "base_url should be passed to litellm"
52+ assert 'api_base' in kwargs , "api_base should be passed to litellm"
53+ assert kwargs ['base_url' ] == 'http://localhost:4000'
54+ assert kwargs ['api_base' ] == 'http://localhost:4000'
55+
56+ def test_agent_with_llm_dict_base_url_parameter (self ):
57+ """Test that Agent properly handles base_url in llm dictionary - Issue #467 scenario."""
58+ llm_config = {
59+ 'model' : 'openai/mistral' ,
60+ 'base_url' : 'http://localhost:4000' , # This is the key parameter from the issue
61+ 'api_key' : 'sk-1234'
62+ }
63+
64+ with patch ('praisonaiagents.llm.llm.litellm' ) as mock_litellm :
65+ mock_litellm .completion .return_value = {
66+ 'choices' : [{'message' : {'content' : 'Test response' }}]
67+ }
68+
69+ agent = Agent (
70+ name = "Test Agent" ,
71+ llm = llm_config
72+ )
73+
74+ # Execute a simple task to trigger LLM usage
75+ with patch .object (agent , 'execute_task' ) as mock_execute :
76+ mock_execute .return_value = "Task completed"
77+ result = agent .execute_task ("Test task" )
78+
79+ # Verify the agent was created successfully
80+ assert agent .name == "Test Agent"
81+ assert agent .llm is not None
82+ assert isinstance (agent .llm , LLM )
83+ assert agent .llm .base_url == 'http://localhost:4000'
84+
85+ def test_image_agent_base_url_consistency (self ):
86+ """Test that ImageAgent maintains parameter consistency with base_url."""
87+ with patch ('praisonaiagents.agent.image_agent.litellm' ) as mock_litellm :
88+ mock_litellm .image_generation .return_value = {
89+ 'data' : [{'url' : 'http://example.com/image.png' }]
90+ }
91+
92+ image_agent = ImageAgent (
93+ base_url = 'http://localhost:4000' ,
94+ api_key = 'sk-test'
95+ )
96+
97+ # Generate an image to trigger the API call
98+ result = image_agent .generate_image ("test prompt" )
99+
100+ # Verify litellm.image_generation was called with proper parameters
101+ call_args = mock_litellm .image_generation .call_args
102+ assert call_args is not None
103+
104+ kwargs = call_args [1 ]
105+ # Check that base_url is mapped to api_base for image generation
106+ assert 'api_base' in kwargs or 'base_url' in kwargs , "Either api_base or base_url should be present"
107+
108+ def test_koboldcpp_specific_scenario (self ):
109+ """Test the specific KoboldCPP scenario mentioned in Issue #467."""
110+ KOBOLD_V1_BASE_URL = "http://127.0.0.1:5001/v1"
111+ CHAT_MODEL_NAME = "koboldcpp-model"
112+
113+ llm_config = {
114+ 'model' : f'openai/{ CHAT_MODEL_NAME } ' ,
115+ 'base_url' : KOBOLD_V1_BASE_URL ,
116+ 'api_key' : "sk-1234"
117+ }
118+
119+ with patch ('praisonaiagents.llm.llm.litellm' ) as mock_litellm :
120+ # Mock successful response (not OpenAI key error)
121+ mock_litellm .completion .return_value = {
122+ 'choices' : [{'message' : {'content' : 'KoboldCPP response' }}]
123+ }
124+
125+ llm = LLM (** llm_config )
126+
127+ # This should not raise an OpenAI key error
128+ response = llm .chat ([{'role' : 'user' , 'content' : 'test' }])
129+
130+ # Verify the call was made with correct parameters
131+ call_args = mock_litellm .completion .call_args [1 ]
132+ assert call_args ['model' ] == f'openai/{ CHAT_MODEL_NAME } '
133+ assert call_args ['api_base' ] == KOBOLD_V1_BASE_URL
134+ assert call_args ['base_url' ] == KOBOLD_V1_BASE_URL
135+ assert call_args ['api_key' ] == "sk-1234"
136+
137+ def test_litellm_documentation_example_compatibility (self ):
138+ """Test compatibility with the litellm documentation example from Issue #467."""
139+ # This is the exact example from litellm docs mentioned in the issue
140+ with patch ('praisonaiagents.llm.llm.litellm' ) as mock_litellm :
141+ mock_litellm .completion .return_value = {
142+ 'choices' : [{'message' : {'content' : 'Documentation example response' }}]
143+ }
144+
145+ llm = LLM (
146+ model = "openai/mistral" ,
147+ api_key = "sk-1234" ,
148+ base_url = "http://0.0.0.0:4000" # This should map to api_base
149+ )
150+
151+ response = llm .chat ([{
152+ "role" : "user" ,
153+ "content" : "Hey, how's it going?" ,
154+ }])
155+
156+ # Verify the parameters match litellm expectations
157+ call_args = mock_litellm .completion .call_args [1 ]
158+ assert call_args ['model' ] == "openai/mistral"
159+ assert call_args ['api_key' ] == "sk-1234"
160+ assert call_args ['api_base' ] == "http://0.0.0.0:4000"
161+
162+ def test_backward_compatibility_with_api_base (self ):
163+ """Test that existing code using api_base still works."""
164+ with patch ('praisonaiagents.llm.llm.litellm' ) as mock_litellm :
165+ mock_litellm .completion .return_value = {
166+ 'choices' : [{'message' : {'content' : 'Backward compatibility response' }}]
167+ }
168+
169+ # Test direct api_base parameter (if supported)
170+ llm_config = {
171+ 'model' : 'openai/test' ,
172+ 'api_key' : 'sk-test'
173+ }
174+
175+ # If the LLM class has an api_base parameter, test it
176+ try :
177+ llm_config ['api_base' ] = 'http://localhost:4000'
178+ llm = LLM (** llm_config )
179+ response = llm .chat ([{'role' : 'user' , 'content' : 'test' }])
180+ except TypeError :
181+ # If api_base is not a direct parameter, that's fine
182+ # The important thing is that base_url works
183+ pass
184+
185+ def test_ollama_environment_variable_compatibility (self ):
186+ """Test Ollama compatibility with OLLAMA_API_BASE environment variable."""
187+ with patch .dict (os .environ , {'OLLAMA_API_BASE' : 'http://localhost:11434' }):
188+ with patch ('praisonaiagents.llm.llm.litellm' ) as mock_litellm :
189+ mock_litellm .completion .return_value = {
190+ 'choices' : [{'message' : {'content' : 'Ollama response' }}]
191+ }
192+
193+ llm = LLM (
194+ model = 'ollama/llama2' ,
195+ api_key = 'not-needed-for-ollama'
196+ )
197+
198+ response = llm .chat ([{'role' : 'user' , 'content' : 'test' }])
199+
200+ # Should work without errors when environment variable is set
201+ assert response is not None
202+
203+
204+ if __name__ == '__main__' :
205+ # Run the tests
206+ pytest .main ([__file__ , '-v' ])
0 commit comments