Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
280 changes: 278 additions & 2 deletions containers/api-proxy/server.js
Original file line number Diff line number Diff line change
Expand Up @@ -1145,6 +1145,273 @@
});
}

/**
* Make an HTTPS/HTTP request through the proxy and return parsed JSON response.
* Returns null on any error, non-2xx status, or parse failure.
*
* @param {string} url
* @param {{ method: string, headers: Record<string,string> }} opts
* @param {number} timeoutMs
* @returns {Promise<object|null>}
*/
function fetchJson(url, opts, timeoutMs) {
return new Promise((resolve) => {
let parsed;
try {
parsed = new URL(url);
} catch {
resolve(null);
return;
}
const isHttps = parsed.protocol === 'https:';
const mod = isHttps ? https : http;
const reqOpts = {
hostname: parsed.hostname,
port: parsed.port || (isHttps ? 443 : 80),
path: parsed.pathname + parsed.search,
method: opts.method,
headers: { ...opts.headers },
...(isHttps && proxyAgent ? { agent: proxyAgent } : {}),
timeout: timeoutMs,
};

let settled = false;
const resolveOnce = (value) => {
if (settled) return;
settled = true;
resolve(value);
};

const req = mod.request(reqOpts, (res) => {
if (res.statusCode < 200 || res.statusCode >= 300) {
res.resume();
resolveOnce(null);
return;
}
const chunks = [];
res.on('data', (chunk) => chunks.push(chunk));
res.on('end', () => {
try {
resolveOnce(JSON.parse(Buffer.concat(chunks).toString()));
} catch {
resolveOnce(null);
}
});
res.on('error', (err) => {
logRequest('debug', 'fetch_json_error', { url: sanitizeForLog(url), error: String(err && err.message ? err.message : err) });
resolveOnce(null);
});
// Guard against connection drops mid-body that never emit 'end' or 'error'
res.on('close', () => resolveOnce(null));
});
Comment on lines +1185 to +1206
Copy link

Copilot AI Apr 28, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fetchJson() can hang indefinitely if the upstream response stream closes/aborts before emitting end or error (e.g., connection drop mid-body). Unlike httpProbe(), it doesn't handle res.close/res.aborted, so the Promise may never resolve and modelFetchComplete may never flip to true. Consider adding handlers for res.on('aborted'...) and/or res.on('close'...) that resolveOnce(null) (and optionally req.destroy()) to guarantee settlement.

Copilot uses AI. Check for mistakes.

req.on('timeout', () => {
const err = new Error(`fetchJson timed out after ${timeoutMs}ms`);
logRequest('debug', 'fetch_json_timeout', { url: sanitizeForLog(url), timeout_ms: timeoutMs });
req.destroy(err);
});
req.on('error', (err) => {
logRequest('debug', 'fetch_json_error', { url: sanitizeForLog(url), error: String(err && err.message ? err.message : err) });
resolveOnce(null);
});
req.end();
});
}

/**
* Prefix used by the Gemini models API in model name fields.
* Example: { name: "models/gemini-1.5-pro" } → "gemini-1.5-pro"
*/
const GEMINI_MODEL_NAME_PREFIX = 'models/';

/**
* Extract model IDs from a provider API response.
* Handles:
* - OpenAI / Anthropic / Copilot format: { data: [{ id }, ...] }
* - Gemini format: { models: [{ name: "models/gemini-1.5-pro" }, ...] }
*
* @param {object|null} json - Parsed API response
* @returns {string[]|null} Sorted array of model IDs, or null if unavailable
*/
function extractModelIds(json) {
if (!json || typeof json !== 'object') return null;

// OpenAI / Anthropic / Copilot format: { data: [{ id: "..." }, ...] }
if (Array.isArray(json.data)) {
const ids = json.data
.map((m) => m && (m.id || m.name))
.filter(Boolean);
return ids.length > 0 ? ids.sort() : null;
}

// Gemini format: { models: [{ name: "models/gemini-1.5-pro", ... }, ...] }
if (Array.isArray(json.models)) {
const ids = json.models
.map((m) => m && m.name && m.name.startsWith(GEMINI_MODEL_NAME_PREFIX)
? m.name.slice(GEMINI_MODEL_NAME_PREFIX.length)
: (m && m.name) || null)
.filter(Boolean);
return ids.length > 0 ? ids.sort() : null;
}

return null;
}

/**
* Cache for available models per provider, populated at startup by fetchStartupModels.
* null = not yet fetched or fetch failed for this provider.
* @type {Record<string, string[]|null>}
*/
const cachedModels = {};

/** Set to true once fetchStartupModels() has run (regardless of success). */
let modelFetchComplete = false;

/** Reset model cache state (used in tests). */
function resetModelCacheState() {
for (const key of Object.keys(cachedModels)) {
delete cachedModels[key];
}
modelFetchComplete = false;
}

/**
* Fetch available models for each configured provider and cache them.
* Called at startup alongside key validation.
*
* Accepts the same override map as validateApiKeys() so tests can inject
* custom keys and targets without touching process.env.
*
* @param {object} [overrides={}] - Optional key/target overrides (used in tests)
*/
async function fetchStartupModels(overrides = {}) {
const ov = (key, fallback) => key in overrides ? overrides[key] : fallback;
const openaiKey = ov('openaiKey', OPENAI_API_KEY);
const openaiTarget = ov('openaiTarget', OPENAI_API_TARGET);
const anthropicKey = ov('anthropicKey', ANTHROPIC_API_KEY);
const anthropicTarget = ov('anthropicTarget', ANTHROPIC_API_TARGET);
const copilotGithubToken = ov('copilotGithubToken', COPILOT_GITHUB_TOKEN);
const copilotAuthToken = ov('copilotAuthToken', COPILOT_AUTH_TOKEN);

Check notice

Code scanning / CodeQL

Unused variable, import, function or class Note

Unused variable copilotAuthToken.
const copilotTarget = ov('copilotTarget', COPILOT_API_TARGET);
const copilotIntegrationId = ov('copilotIntegrationId', COPILOT_INTEGRATION_ID);
const geminiKey = ov('geminiKey', GEMINI_API_KEY);
const geminiTarget = ov('geminiTarget', GEMINI_API_TARGET);
const TIMEOUT_MS = ov('timeoutMs', 10_000);

const fetches = [];

if (openaiKey) {
fetches.push(
fetchJson(`https://${openaiTarget}/v1/models`, {
method: 'GET',
headers: { 'Authorization': `Bearer ${openaiKey}` },
}, TIMEOUT_MS).then((json) => {
cachedModels.openai = extractModelIds(json);
})
);
}

if (anthropicKey) {
fetches.push(
fetchJson(`https://${anthropicTarget}/v1/models`, {
method: 'GET',
headers: { 'x-api-key': anthropicKey, 'anthropic-version': '2023-06-01' },
}, TIMEOUT_MS).then((json) => {
cachedModels.anthropic = extractModelIds(json);
})
);
}

// Only use COPILOT_GITHUB_TOKEN (GitHub OAuth) for /models — COPILOT_API_KEY (BYOK) is not
// accepted by the Copilot /models endpoint (consistent with validateApiKeys behaviour).
if (copilotGithubToken) {
fetches.push(
fetchJson(`https://${copilotTarget}/models`, {
method: 'GET',
headers: {
'Authorization': `Bearer ${copilotGithubToken}`,
'Copilot-Integration-Id': copilotIntegrationId,
},
}, TIMEOUT_MS).then((json) => {
cachedModels.copilot = extractModelIds(json);
})
);
}

if (geminiKey) {
fetches.push(
fetchJson(`https://${geminiTarget}/v1beta/models`, {
method: 'GET',
headers: { 'x-goog-api-key': geminiKey },
}, TIMEOUT_MS).then((json) => {
cachedModels.gemini = extractModelIds(json);
})
);
}

await Promise.allSettled(fetches);
modelFetchComplete = true;
}

/**
* Build the reflection response describing all proxy endpoints and their available models.
*
* The reflection endpoint allows agent harnesses to dynamically discover which
* LLM providers are configured and what models are available, enabling intelligent
* provider and model selection based on the task at hand.
*
* @returns {{ endpoints: Array<object>, models_fetch_complete: boolean }}
*/
function reflectEndpoints() {
const opencodeConfigured = !!(OPENAI_API_KEY || ANTHROPIC_API_KEY || COPILOT_AUTH_TOKEN);
return {
endpoints: [
{
provider: 'openai',
port: 10000,
base_url: 'http://api-proxy:10000',
configured: !!OPENAI_API_KEY,
models: cachedModels.openai || null,
models_url: 'http://api-proxy:10000/v1/models',
},
{
provider: 'anthropic',
port: 10001,
base_url: 'http://api-proxy:10001',
configured: !!ANTHROPIC_API_KEY,
models: cachedModels.anthropic || null,
models_url: 'http://api-proxy:10001/v1/models',
},
{
provider: 'copilot',
port: 10002,
base_url: 'http://api-proxy:10002',
configured: !!COPILOT_AUTH_TOKEN,
models: cachedModels.copilot || null,
models_url: 'http://api-proxy:10002/models',
},
{
provider: 'gemini',
port: 10003,
base_url: 'http://api-proxy:10003',
configured: !!GEMINI_API_KEY,
models: cachedModels.gemini || null,
models_url: 'http://api-proxy:10003/v1beta/models',
},
{
provider: 'opencode',
port: 10004,
base_url: 'http://api-proxy:10004',
configured: opencodeConfigured,
// OpenCode routes to one of the above providers; query them directly for models
models: null,
models_url: null,
},
],
models_fetch_complete: modelFetchComplete,
};
}

function healthResponse() {
return {
status: 'healthy',
Expand All @@ -1166,7 +1433,7 @@
}

/**
* Handle management endpoints on port 10000 (/health, /metrics).
* Handle management endpoints on port 10000 (/health, /metrics, /reflect).
* Returns true if the request was handled, false otherwise.
*/
function handleManagementEndpoint(req, res) {
Expand All @@ -1180,6 +1447,11 @@
res.end(JSON.stringify(metrics.getMetrics()));
return true;
}
if (req.method === 'GET' && req.url === '/reflect') {
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(reflectEndpoints()));
return true;
}
return false;
}

Expand All @@ -1205,6 +1477,10 @@
logRequest('error', 'key_validation_error', { message: 'Unexpected error during key validation', error: String(err) });
keyValidationComplete = true;
});
fetchStartupModels().catch((err) => {
logRequest('error', 'model_fetch_error', { message: 'Unexpected error fetching startup models', error: String(err) });
modelFetchComplete = true;
});
}
}

Expand Down Expand Up @@ -1506,4 +1782,4 @@
}

// Export for testing
module.exports = { normalizeApiTarget, deriveCopilotApiTarget, deriveGitHubApiTarget, deriveGitHubApiBasePath, normalizeBasePath, buildUpstreamPath, proxyWebSocket, resolveCopilotAuthToken, resolveOpenCodeRoute, shouldStripHeader, stripGeminiKeyParam, validateApiKeys, probeProvider, httpProbe, keyValidationResults, resetKeyValidationState };
module.exports = { normalizeApiTarget, deriveCopilotApiTarget, deriveGitHubApiTarget, deriveGitHubApiBasePath, normalizeBasePath, buildUpstreamPath, proxyWebSocket, resolveCopilotAuthToken, resolveOpenCodeRoute, shouldStripHeader, stripGeminiKeyParam, validateApiKeys, probeProvider, httpProbe, keyValidationResults, resetKeyValidationState, fetchJson, extractModelIds, fetchStartupModels, reflectEndpoints, cachedModels, resetModelCacheState };
Loading
Loading