Skip to content

Commit b69e6f3

Browse files
jinsoojinsoo
authored andcommitted
add: add test_failover.py to test failover openai->gemini
1 parent 7b41081 commit b69e6f3

File tree

1 file changed

+26
-0
lines changed

1 file changed

+26
-0
lines changed

tests/test_failover.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
# tests/test_failover.py (예시)
2+
import pytest
3+
from unittest.mock import patch, MagicMock
4+
from openai import RateLimitError
5+
6+
def test_failover_on_429():
7+
from src.llm_failover_router import LLMFailoverRouter, build_openai_provider, build_gemini_provider
8+
from src.config import Config
9+
10+
# OpenAI provider가 항상 429를 던지게 mock
11+
def failing_openai_sync(*, messages, **kwargs):
12+
raise RateLimitError("rate_limit", response=MagicMock(status_code=429))
13+
14+
async def failing_openai_async(*, messages, **kwargs):
15+
raise RateLimitError("rate_limit", response=MagicMock(status_code=429))
16+
17+
from src.llm_failover_router import Provider
18+
openai_provider = Provider("openai", failing_openai_sync, failing_openai_async, tier=0)
19+
gemini_provider = build_gemini_provider(Config.GEMINI_API_KEY, Config.GEMINI_MODEL)
20+
21+
router = LLMFailoverRouter(providers=[openai_provider, gemini_provider])
22+
result = router.chat(messages=[{"role": "user", "content": "hi"}])
23+
assert result # Gemini 응답이 와야 성공
24+
25+
if __name__ == "__main__":
26+
test_failover_on_429()

0 commit comments

Comments
 (0)