@@ -152,3 +152,121 @@ async def test_invalid_model_handling():
152152 # Test invalid provider prefix
153153 with pytest .raises ((BadRequestError , NotFoundError )):
154154 await llm ("invalid-provider/gpt-4" , "Hi" , tokens = 1 )
155+
156+
157+ @pytest .mark .unit
158+ @pytest .mark .asyncio
159+ async def test_litellm_error_types ():
160+ """Test handling of specific LiteLLM error types."""
161+ from unittest .mock import AsyncMock , patch
162+
163+ # Test AuthenticationError (401) - Invalid API key
164+ with patch ('litellm.acompletion' , new_callable = AsyncMock ) as mock_completion :
165+ mock_completion .side_effect = AuthenticationError (
166+ message = "Invalid API key provided" ,
167+ llm_provider = "openai" ,
168+ model = "gpt-4o-mini"
169+ )
170+
171+ with pytest .raises (AuthenticationError ) as exc_info :
172+ await llm ("gpt-4o-mini" , "Hi" , tokens = 1 )
173+
174+ assert "Invalid API key" in str (exc_info .value )
175+ assert hasattr (exc_info .value , "llm_provider" )
176+ assert exc_info .value .llm_provider == "openai"
177+
178+ # Test NotFoundError (404) - Model not found
179+ with patch ('litellm.acompletion' , new_callable = AsyncMock ) as mock_completion :
180+ mock_completion .side_effect = NotFoundError (
181+ message = "The model 'gpt-8' does not exist" ,
182+ llm_provider = "openai" ,
183+ model = "gpt-8"
184+ )
185+
186+ with pytest .raises (NotFoundError ) as exc_info :
187+ await llm ("gpt-8" , "Hi" , tokens = 1 )
188+
189+ assert "does not exist" in str (exc_info .value )
190+
191+ # Test RateLimitError (429) - Rate limit exceeded
192+ with patch ('litellm.acompletion' , new_callable = AsyncMock ) as mock_completion :
193+ mock_completion .side_effect = RateLimitError (
194+ message = "Rate limit exceeded. Please retry after 60 seconds" ,
195+ llm_provider = "openai" ,
196+ model = "gpt-4o-mini"
197+ )
198+
199+ with pytest .raises (RateLimitError ) as exc_info :
200+ await llm ("gpt-4o-mini" , "Hi" , tokens = 1 )
201+
202+ assert "Rate limit" in str (exc_info .value )
203+
204+ # Test BadRequestError (400) - Invalid request
205+ with patch ('litellm.acompletion' , new_callable = AsyncMock ) as mock_completion :
206+ mock_completion .side_effect = BadRequestError (
207+ message = "Invalid request parameters" ,
208+ llm_provider = "anthropic" ,
209+ model = "claude-sonnet-4"
210+ )
211+
212+ with pytest .raises (BadRequestError ) as exc_info :
213+ await llm ("claude-sonnet-4" , "Hi" , tokens = 1 )
214+
215+ assert "Invalid request" in str (exc_info .value )
216+
217+ # Test APIConnectionError (500) - Connection issues
218+ with patch ('litellm.acompletion' , new_callable = AsyncMock ) as mock_completion :
219+ mock_completion .side_effect = APIConnectionError (
220+ message = "Failed to connect to API server" ,
221+ llm_provider = "ollama" ,
222+ model = "ollama/deepseek-r1"
223+ )
224+
225+ with pytest .raises (APIConnectionError ) as exc_info :
226+ await llm ("ollama/deepseek-r1" , "Hi" , tokens = 1 )
227+
228+ assert "Failed to connect" in str (exc_info .value )
229+
230+
231+ @pytest .mark .unit
232+ @pytest .mark .asyncio
233+ async def test_context_window_exceeded_error ():
234+ """Test specific handling of context window exceeded errors."""
235+ from unittest .mock import AsyncMock , patch
236+ from litellm import ContextWindowExceededError
237+
238+ with patch ('litellm.acompletion' , new_callable = AsyncMock ) as mock_completion :
239+ mock_completion .side_effect = ContextWindowExceededError (
240+ message = "This model's maximum context length is 4096 tokens" ,
241+ model = "gpt-3.5-turbo" ,
242+ llm_provider = "openai"
243+ )
244+
245+ with pytest .raises (ContextWindowExceededError ) as exc_info :
246+ await llm ("gpt-3.5-turbo" , "Very long prompt..." * 1000 , tokens = 1000 )
247+
248+ assert "context length" in str (exc_info .value ).lower ()
249+ # ContextWindowExceededError is a subclass of BadRequestError
250+ assert isinstance (exc_info .value , BadRequestError )
251+
252+
253+ @pytest .mark .unit
254+ @pytest .mark .asyncio
255+ async def test_content_policy_violation_error ():
256+ """Test specific handling of content policy violation errors."""
257+ from unittest .mock import AsyncMock , patch
258+ from litellm import ContentPolicyViolationError
259+
260+ with patch ('litellm.acompletion' , new_callable = AsyncMock ) as mock_completion :
261+ mock_completion .side_effect = ContentPolicyViolationError (
262+ message = "Your request was rejected due to content policy violations" ,
263+ model = "gpt-4o-mini" ,
264+ llm_provider = "openai"
265+ )
266+
267+ with pytest .raises (ContentPolicyViolationError ) as exc_info :
268+ await llm ("gpt-4o-mini" , "Inappropriate content" , tokens = 100 )
269+
270+ assert "content policy" in str (exc_info .value ).lower ()
271+ # ContentPolicyViolationError is a subclass of BadRequestError
272+ assert isinstance (exc_info .value , BadRequestError )
0 commit comments