|
| 1 | +/** |
| 2 | + * dry run: tests each LLM provider matching the fallback chain in +server.ts |
| 3 | + * reads keys from .env, never logs them. |
| 4 | + * |
| 5 | + * usage: node scripts/test-providers.mjs |
| 6 | + */ |
| 7 | + |
| 8 | +import { readFileSync } from 'fs'; |
| 9 | + |
| 10 | +const envFile = readFileSync('.env', 'utf-8'); |
| 11 | +const envVars = Object.fromEntries( |
| 12 | + envFile |
| 13 | + .split('\n') |
| 14 | + .filter((l) => l && !l.startsWith('#')) |
| 15 | + .map((l) => { |
| 16 | + const eq = l.indexOf('='); |
| 17 | + return eq > 0 ? [l.slice(0, eq).trim(), l.slice(eq + 1).trim()] : null; |
| 18 | + }) |
| 19 | + .filter(Boolean) |
| 20 | +); |
| 21 | + |
| 22 | +const GEMINI_KEY = envVars.GEMINI_API_KEY; |
| 23 | +const GROQ_KEY = envVars.GROQ_API_KEY; |
| 24 | + |
| 25 | +function extractJSON(raw) { |
| 26 | + const trimmed = raw.trim(); |
| 27 | + try { return JSON.parse(trimmed); } catch {} |
| 28 | + const cleaned = trimmed.replace(/```json\n?|\n?```/g, '').trim(); |
| 29 | + try { return JSON.parse(cleaned); } catch {} |
| 30 | + const s = cleaned.indexOf('{'), e = cleaned.lastIndexOf('}'); |
| 31 | + if (s !== -1 && e > s) { try { return JSON.parse(cleaned.slice(s, e + 1)); } catch {} } |
| 32 | + return null; |
| 33 | +} |
| 34 | + |
| 35 | +const SMALL_PROMPT = 'Return ONLY valid JSON: {"test": true, "score": 85}'; |
| 36 | + |
| 37 | +// ~6K token resume prompt matching real usage |
| 38 | +const BIG_RESUME = ( |
| 39 | + 'Experienced software engineer with expertise in distributed systems, cloud computing, and full-stack development. ' + |
| 40 | + 'Built scalable microservices handling 10M+ requests per day using Go, Kubernetes, and AWS. Led team of 5 engineers. ' |
| 41 | +).repeat(60); |
| 42 | +const BIG_PROMPT = `You are an ATS scoring engine. Analyze this resume against 6 ATS platforms (Workday, Taleo, iCIMS, Greenhouse, Lever, SuccessFactors). Return ONLY valid JSON with a "results" array containing objects with "system", "overallScore", and "passesFilter" fields. Resume: ${BIG_RESUME}`; |
| 43 | + |
| 44 | +const PROVIDERS = [ |
| 45 | + { |
| 46 | + name: 'gemma-3-27b (Google)', |
| 47 | + key: GEMINI_KEY, |
| 48 | + build: (prompt) => ({ |
| 49 | + url: `https://generativelanguage.googleapis.com/v1beta/models/gemma-3-27b-it:generateContent?key=${GEMINI_KEY}`, |
| 50 | + opts: { |
| 51 | + method: 'POST', |
| 52 | + headers: { 'Content-Type': 'application/json' }, |
| 53 | + body: JSON.stringify({ |
| 54 | + contents: [{ parts: [{ text: prompt }] }], |
| 55 | + generationConfig: { temperature: 0.3, topP: 0.85, maxOutputTokens: 16384 } |
| 56 | + }) |
| 57 | + } |
| 58 | + }), |
| 59 | + extract: (d) => d.candidates?.[0]?.content?.parts?.[0]?.text ?? '' |
| 60 | + }, |
| 61 | + { |
| 62 | + name: 'llama-3.3-70b (Groq)', |
| 63 | + key: GROQ_KEY, |
| 64 | + build: (prompt) => ({ |
| 65 | + url: 'https://api.groq.com/openai/v1/chat/completions', |
| 66 | + opts: { |
| 67 | + method: 'POST', |
| 68 | + headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${GROQ_KEY}` }, |
| 69 | + body: JSON.stringify({ |
| 70 | + model: 'llama-3.3-70b-versatile', |
| 71 | + messages: [{ role: 'user', content: prompt }], |
| 72 | + temperature: 0.3, top_p: 0.85, max_tokens: 16384, |
| 73 | + response_format: { type: 'json_object' } |
| 74 | + }) |
| 75 | + } |
| 76 | + }), |
| 77 | + extract: (d) => d.choices?.[0]?.message?.content ?? '' |
| 78 | + }, |
| 79 | + { |
| 80 | + name: 'gemini-2.5-flash (Google)', |
| 81 | + key: GEMINI_KEY, |
| 82 | + build: (prompt) => ({ |
| 83 | + url: `https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent?key=${GEMINI_KEY}`, |
| 84 | + opts: { |
| 85 | + method: 'POST', |
| 86 | + headers: { 'Content-Type': 'application/json' }, |
| 87 | + body: JSON.stringify({ |
| 88 | + contents: [{ parts: [{ text: prompt }] }], |
| 89 | + generationConfig: { temperature: 0.3, topP: 0.85, maxOutputTokens: 16384, responseMimeType: 'application/json' } |
| 90 | + }) |
| 91 | + } |
| 92 | + }), |
| 93 | + extract: (d) => d.candidates?.[0]?.content?.parts?.[0]?.text ?? '' |
| 94 | + } |
| 95 | +]; |
| 96 | + |
| 97 | +async function callProvider(provider, prompt, timeoutMs = 30000) { |
| 98 | + if (!provider.key) return { status: 'SKIP', ms: 0, detail: 'no key' }; |
| 99 | + |
| 100 | + const { url, opts } = provider.build(prompt); |
| 101 | + const t = performance.now(); |
| 102 | + try { |
| 103 | + const ctrl = new AbortController(); |
| 104 | + const timer = setTimeout(() => ctrl.abort(), timeoutMs); |
| 105 | + const res = await fetch(url, { ...opts, signal: ctrl.signal }); |
| 106 | + clearTimeout(timer); |
| 107 | + const ms = Math.round(performance.now() - t); |
| 108 | + |
| 109 | + if (!res.ok) { |
| 110 | + const err = await res.text().catch(() => ''); |
| 111 | + return { status: 'HTTP_ERR', ms, httpStatus: res.status, detail: err.slice(0, 150) }; |
| 112 | + } |
| 113 | + |
| 114 | + const data = await res.json(); |
| 115 | + const text = provider.extract(data); |
| 116 | + if (!text) return { status: 'EMPTY', ms }; |
| 117 | + |
| 118 | + const parsed = extractJSON(text); |
| 119 | + if (!parsed || typeof parsed !== 'object') return { status: 'BAD_JSON', ms, detail: text.slice(0, 150) }; |
| 120 | + |
| 121 | + return { status: 'OK', ms, keys: Object.keys(parsed).slice(0, 5) }; |
| 122 | + } catch (err) { |
| 123 | + const ms = Math.round(performance.now() - t); |
| 124 | + const isTimeout = err.name === 'AbortError'; |
| 125 | + return { status: isTimeout ? 'TIMEOUT' : 'ERROR', ms, detail: err.message }; |
| 126 | + } |
| 127 | +} |
| 128 | + |
| 129 | +function log(name, r) { |
| 130 | + const tag = r.status === 'OK' ? 'OK' : r.status === 'SKIP' ? 'SKIP' : 'FAIL'; |
| 131 | + const info = r.status === 'OK' ? `keys: [${r.keys}]` : (r.detail || r.httpStatus || ''); |
| 132 | + console.log(` ${tag.padEnd(4)} ${name.padEnd(28)} ${String(r.ms).padStart(5)}ms ${info}`); |
| 133 | +} |
| 134 | + |
| 135 | +console.log('=== test 1: small prompt (connectivity) ===\n'); |
| 136 | +for (const p of PROVIDERS) log(p.name, await callProvider(p, SMALL_PROMPT)); |
| 137 | + |
| 138 | +console.log('\n=== test 2: large prompt (~6K tokens, realistic resume) ===\n'); |
| 139 | +console.log(` prompt size: ${BIG_PROMPT.length} chars (~${Math.round(BIG_PROMPT.length / 4)} tokens)\n`); |
| 140 | +for (const p of PROVIDERS) log(p.name, await callProvider(p, BIG_PROMPT, 45000)); |
| 141 | + |
| 142 | +console.log('\n=== test 3: fallback chain simulation ===\n'); |
| 143 | +let resolved = false; |
| 144 | +for (const p of PROVIDERS) { |
| 145 | + const r = await callProvider(p, BIG_PROMPT, 45000); |
| 146 | + if (r.status === 'OK') { |
| 147 | + console.log(` resolved: ${p.name} (${r.ms}ms)`); |
| 148 | + resolved = true; |
| 149 | + break; |
| 150 | + } |
| 151 | + console.log(` ${p.name}: ${r.status} (${r.ms}ms) → next`); |
| 152 | +} |
| 153 | +if (!resolved) console.log(' ALL FAILED → 503'); |
| 154 | + |
| 155 | +console.log('\n=== done ==='); |
0 commit comments