-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathconfig.py
More file actions
98 lines (79 loc) · 4.78 KB
/
config.py
File metadata and controls
98 lines (79 loc) · 4.78 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
"""
config.py
─────────
Central configuration loader.
All settings are read from environment variables / .env file.
No credentials are ever hardcoded here.
"""
import os
from pathlib import Path
from dotenv import load_dotenv
# ── Load .env file if present ──────────────────────────────────────────────
BASE_DIR = Path(__file__).resolve().parent
load_dotenv(BASE_DIR / ".env")
def _require(key: str) -> str:
"""Return env-var value; raise a clear error if missing."""
val = os.getenv(key)
if not val:
raise EnvironmentError(
f"Required environment variable '{key}' is not set.\n"
f"Copy .env.example to .env and fill in your values."
)
return val
def _get(key: str, default: str = "") -> str:
return os.getenv(key, default)
# ── LLM ───────────────────────────────────────────────────────────────────────
# Primary provider (default: groq — fast + free tier)
LLM_PROVIDER: str = _get("LLM_PROVIDER", "groq").lower()
# Fallback provider used when primary fails (default: google)
LLM_FALLBACK_PROVIDER: str = _get("LLM_FALLBACK_PROVIDER", "google").lower()
# OpenAI
OPENAI_API_KEY: str = _get("OPENAI_API_KEY")
OPENAI_MODEL: str = _get("OPENAI_MODEL", "gpt-4o-mini")
# Anthropic
ANTHROPIC_API_KEY: str = _get("ANTHROPIC_API_KEY")
ANTHROPIC_MODEL: str = _get("ANTHROPIC_MODEL", "claude-3-haiku-20240307")
# Google Gemini (fallback)
GOOGLE_API_KEY: str = _get("GOOGLE_API_KEY")
GOOGLE_MODEL: str = _get("GOOGLE_MODEL", "gemini-2.0-flash-lite")
# Groq (primary)
GROQ_API_KEY: str = _get("GROQ_API_KEY")
GROQ_MODEL: str = _get("GROQ_MODEL", "llama-3.1-8b-instant")
# Token budgets — sum used as the single-call max_tokens in solver.py
# Increase if complex problems get truncated (Sudoku, Permutations, Graph etc.)
PHASE1_MAX_TOKENS: int = int(_get("PHASE1_MAX_TOKENS", "1000")) # metadata + explanation + data_structure + workflow + tip_trick
PHASE2_MAX_TOKENS: int = int(_get("PHASE2_MAX_TOKENS", "1100")) # python code
PHASE3_MAX_TOKENS: int = int(_get("PHASE3_MAX_TOKENS", "1500")) # java + cpp
# Combined budget: PHASE1 + PHASE2 + PHASE3 = 3600 tokens per call
LLM_MAX_TOKENS: int = int(_get("LLM_MAX_TOKENS", "2048"))
# temperature=0 causes greedy decoding which can trigger repetition loops;
# 0.1 adds just enough randomness to break them while keeping output deterministic.
LLM_TEMPERATURE: float = float(_get("LLM_TEMPERATURE", "0.1"))
LLM_RETRY_COUNT: int = int(_get("LLM_RETRY_COUNT", "3"))
LLM_RETRY_DELAY: float = float(_get("LLM_RETRY_DELAY", "5"))
LLM_REQUEST_DELAY: float = float(_get("LLM_REQUEST_DELAY", "1"))
# ── Cache ─────────────────────────────────────────────────────────────────────
# File-based JSON cache avoids re-calling LLM for already-solved problems
LLM_CACHE_ENABLED: bool = _get("LLM_CACHE_ENABLED", "true").lower() == "true"
LLM_CACHE_FILE: str = str(BASE_DIR / _get("LLM_CACHE_FILE", "llm_cache.json"))
# ── Database ──────────────────────────────────────────────────────────────────
DB_HOST: str = _get("DB_HOST", "localhost")
DB_PORT: int = int(_get("DB_PORT", "5432"))
DB_NAME: str = _get("DB_NAME", "striver_dsa")
DB_USER: str = _get("DB_USER", "postgres")
DB_PASSWORD: str = _get("DB_PASSWORD", "")
DATABASE_URL: str = (
f"postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_NAME}"
)
# ── Scraper ───────────────────────────────────────────────────────────────────
STRIVER_URL: str = (
"https://takeuforward.org/interviews/strivers-sde-sheet-top-coding-interview-problems/"
)
SCRAPER_HEADLESS: bool = _get("SCRAPER_HEADLESS", "true").lower() == "true"
SCRAPER_TIMEOUT: int = int(_get("SCRAPER_TIMEOUT", "30"))
SCRAPER_RETRY_COUNT: int = int(_get("SCRAPER_RETRY_COUNT", "3"))
# ── Output ────────────────────────────────────────────────────────────────────
OUTPUT_EXCEL_PATH: str = str(BASE_DIR / _get("OUTPUT_EXCEL_PATH", "Striver_SDE_Auto_Solved.xlsx"))
LOG_FILE: str = str(BASE_DIR / _get("LOG_FILE", "log.txt"))
PROGRESS_FILE: str = str(BASE_DIR / "progress.json")
CACHE_FILE: str = LLM_CACHE_FILE # alias for import convenience