diff --git a/mcpjam-inspector/client/src/App.tsx b/mcpjam-inspector/client/src/App.tsx index 246850fbf..8d3018049 100644 --- a/mcpjam-inspector/client/src/App.tsx +++ b/mcpjam-inspector/client/src/App.tsx @@ -101,6 +101,7 @@ import { getChatboxPathTokenFromLocation, } from "./components/hosted/ChatboxChatPage"; import { useHostedApiContext } from "./hooks/hosted/use-hosted-api-context"; +import { useLocalStateMigration } from "./hooks/use-local-state-migration"; import { AppReadyProvider } from "./hooks/use-app-ready"; import { useInspectorCommandBus } from "./hooks/use-inspector-command-bus"; import { HOSTED_MODE, NON_PROD_LOCKDOWN } from "./lib/config"; @@ -802,6 +803,13 @@ export default function App() { : undefined, }); useInspectorCommandBus(); + // One-time migration from legacy localStorage state to Convex. No-op in + // hosted mode and after the first successful run; safe to keep in the tree. + useLocalStateMigration({ + isAuthenticated, + isUserBootstrapping: isEnsuringUser, + organizationId: activeOrganizationId, + }); const oauthDebuggerServersRef = useRef(appState.servers); oauthDebuggerServersRef.current = appState.servers; const projectServersRef = useRef(projectServers); diff --git a/mcpjam-inspector/client/src/hooks/__tests__/use-server-state.hosted-oauth.test.tsx b/mcpjam-inspector/client/src/hooks/__tests__/use-server-state.hosted-oauth.test.tsx index 813bc76b9..07456b001 100644 --- a/mcpjam-inspector/client/src/hooks/__tests__/use-server-state.hosted-oauth.test.tsx +++ b/mcpjam-inspector/client/src/hooks/__tests__/use-server-state.hosted-oauth.test.tsx @@ -63,6 +63,8 @@ vi.mock("@/lib/oauth/mcp-oauth", () => ({ vi.mock("@/lib/apis/web/context", () => ({ injectHostedServerMapping: vi.fn(), + tryGetHostedServerDisplayName: vi.fn(), + tryResolveProjectServer: vi.fn(() => null), })); vi.mock("@/lib/session-token", () => ({ diff --git a/mcpjam-inspector/client/src/hooks/__tests__/use-server-state.test.tsx b/mcpjam-inspector/client/src/hooks/__tests__/use-server-state.test.tsx index 74d1db292..11d8316b7 100644 --- a/mcpjam-inspector/client/src/hooks/__tests__/use-server-state.test.tsx +++ b/mcpjam-inspector/client/src/hooks/__tests__/use-server-state.test.tsx @@ -72,6 +72,8 @@ vi.mock("@/lib/oauth/mcp-oauth", () => ({ vi.mock("@/lib/apis/web/context", () => ({ injectHostedServerMapping: vi.fn(), + tryGetHostedServerDisplayName: vi.fn(), + tryResolveProjectServer: vi.fn(() => null), })); vi.mock("@/lib/session-token", () => ({ diff --git a/mcpjam-inspector/client/src/hooks/hosted/use-hosted-api-context.ts b/mcpjam-inspector/client/src/hooks/hosted/use-hosted-api-context.ts index 5a0748801..bfd098bd0 100644 --- a/mcpjam-inspector/client/src/hooks/hosted/use-hosted-api-context.ts +++ b/mcpjam-inspector/client/src/hooks/hosted/use-hosted-api-context.ts @@ -1,5 +1,4 @@ import { useLayoutEffect } from "react"; -import { HOSTED_MODE } from "@/lib/config"; import { setHostedApiContext } from "@/lib/apis/web/context"; interface UseHostedApiContextOptions { @@ -35,11 +34,6 @@ export function useHostedApiContext({ // between this effect's cleanup (which nulls the context) and its setup, // causing "Hosted server not found" errors for shared-chat OAuth servers. useLayoutEffect(() => { - if (!HOSTED_MODE) { - setHostedApiContext(null); - return; - } - if (!enabled) { return; } diff --git a/mcpjam-inspector/client/src/hooks/use-local-state-migration.ts b/mcpjam-inspector/client/src/hooks/use-local-state-migration.ts new file mode 100644 index 000000000..de902eed8 --- /dev/null +++ b/mcpjam-inspector/client/src/hooks/use-local-state-migration.ts @@ -0,0 +1,268 @@ +import { useEffect, useRef, useState } from "react"; +import { useMutation } from "convex/react"; +import { + hasMigrationCompleted, + runLocalStateMigration, +} from "@/lib/local-state-migration"; +import { HOSTED_MODE } from "@/lib/config"; +import { useLogger } from "./use-logger"; + +/** + * localStorage key holding a short-lived lease (timestamp ms) so two + * inspector tabs opened before either marks the migration complete don't + * both call `projects:createProject` for the same legacy project. The lease + * is bounded by `MIGRATION_LEASE_TTL_MS` so a tab that crashed mid-migration + * doesn't block another tab forever. + */ +const MIGRATION_LEASE_KEY = "mcp-inspector-migration-lease"; +const MIGRATION_LEASE_TTL_MS = 5 * 60 * 1000; + +/** + * Backoff used when a migration attempt returns `ok: false` (e.g., Convex + * was briefly unreachable, or one project failed) or the cross-tab lease + * is currently held. Without an explicit retry trigger the `useEffect` + * dependencies are stable and the failed migration would effectively wait + * for a page reload. + */ +const RETRY_DELAY_MS = 30 * 1000; + +/** + * Hard cap on retry attempts so a permanent error (billing limit reached, + * forbidden, validation) doesn't spam the console every 30s forever. After + * this many retries the hook stops scheduling new ones; the user can still + * trigger a fresh attempt by reloading the tab. + */ +const MAX_RETRY_ATTEMPTS = 3; + +/** + * Convex error codes that the migration cannot recover from by retrying. + * If any per-project failure carries one of these codes, the hook treats + * the whole batch as terminal — no retry, no log spam. The user reloads + * (or fixes the underlying account state) to try again. + */ +const PERMANENT_ERROR_CODES = new Set([ + "billing_limit_reached", + "FORBIDDEN", + "UNAUTHORIZED", + "VALIDATION_ERROR", +]); + +function looksPermanent(errorMessage: string): boolean { + for (const code of PERMANENT_ERROR_CODES) { + if (errorMessage.includes(code)) return true; + } + return false; +} + +interface UseLocalStateMigrationOptions { + /** True when Convex auth has resolved (signed-in user OR guest). */ + isAuthenticated: boolean; + /** + * True while `users:ensureUser` is still running. The migration depends + * on the actor's `users` row + default org existing in Convex; firing + * before bootstrap can race the row creation and surface as a + * `createProject` failure with no automatic retry. Wait until this is + * false before attempting migration. + */ + isUserBootstrapping: boolean; + /** + * Optional org id to migrate into. Undefined defers to Convex's + * resolveProjectOrganizationId which falls back to the actor's default + * organization (provisioned by `users:ensureUser`). + */ + organizationId?: string; +} + +function tryAcquireMigrationLease(): boolean { + if (typeof window === "undefined") return false; + try { + const now = Date.now(); + const existing = localStorage.getItem(MIGRATION_LEASE_KEY); + if (existing) { + const ts = Number(existing); + if (Number.isFinite(ts) && now - ts < MIGRATION_LEASE_TTL_MS) { + return false; + } + } + localStorage.setItem(MIGRATION_LEASE_KEY, String(now)); + return true; + } catch { + // localStorage blocked — caller falls back to in-memory ref guard. + return true; + } +} + +function releaseMigrationLease(): void { + if (typeof window === "undefined") return; + try { + localStorage.removeItem(MIGRATION_LEASE_KEY); + } catch { + // best-effort + } +} + +/** + * Runs the legacy-localStorage → Convex migration exactly once per install. + * + * Skips when: + * - HOSTED_MODE (hosted users never had localStorage state) + * - The migration flag is already set + * - Convex auth hasn't resolved + * - User bootstrap (`users:ensureUser`) is still running + * - Migration is already in flight (in-tab) or another tab holds the lease + * + * On `ok: false` (partial failure) or contention with another tab, + * schedules a retry tick after `RETRY_DELAY_MS`. Because `useMutation` + * returns a stable reference and the gate inputs rarely change, the prior + * "retry on next render" approach effectively waited for a page reload — + * the explicit tick state forces the effect to re-evaluate. + */ +export function useLocalStateMigration({ + isAuthenticated, + isUserBootstrapping, + organizationId, +}: UseLocalStateMigrationOptions): void { + const logger = useLogger("LocalStateMigration"); + const inFlightRef = useRef(false); + const doneRef = useRef(false); + const retryTimerRef = useRef | null>(null); + const retryCountRef = useRef(0); + const [retryTick, setRetryTick] = useState(0); + const createProject = useMutation("projects:createProject" as any); + + useEffect(() => { + // Per-effect-run cancel flag. The migration is async — the component can + // unmount (or the effect can re-run on dep change) while the promise is + // still in flight. Without this flag, the trailing `.then/.catch/.finally` + // would call `scheduleRetry()` after unmount, scheduling a `setTimeout` + // that the cleanup function has already had its chance to clear. Result: + // an orphaned 30s timer that fires after the component is gone and calls + // `setRetryTick` on a dead instance. + let cancelled = false; + + // Defined inside the effect so it closes over the same `logger` reference + // the effect itself uses. Hoisting it to the component body would let the + // async .then/.catch callbacks reach a stale `logger` if `useLogger` + // returns a new object on a subsequent render. + const scheduleRetry = (): void => { + if (cancelled) return; + if (retryTimerRef.current !== null) return; + if (retryCountRef.current >= MAX_RETRY_ATTEMPTS) { + logger.warn( + "Local state migration retry limit reached; will not retry until reload", + { attempts: retryCountRef.current }, + ); + doneRef.current = true; + return; + } + retryCountRef.current += 1; + retryTimerRef.current = setTimeout(() => { + retryTimerRef.current = null; + setRetryTick((t) => t + 1); + }, RETRY_DELAY_MS); + }; + + if (HOSTED_MODE) return; + if (!isAuthenticated) return; + if (isUserBootstrapping) return; + if (doneRef.current) return; + if (inFlightRef.current) return; + if (hasMigrationCompleted()) { + doneRef.current = true; + return; + } + + if (!tryAcquireMigrationLease()) { + logger.info("Another tab holds the migration lease; will retry", { + retryDelayMs: RETRY_DELAY_MS, + }); + scheduleRetry(); + return () => { + cancelled = true; + }; + } + + inFlightRef.current = true; + runLocalStateMigration({ + createProject: createProject as any, + organizationId, + logger, + }) + .then((result) => { + if (cancelled) return; + if (result.ok) { + doneRef.current = true; + if (result.projectsMigrated > 0) { + logger.info("Local state migration completed", { + projectsMigrated: result.projectsMigrated, + }); + } + return; + } + // Permanent backend errors (billing limit, forbidden, validation) + // won't recover by retrying. Mark done so the user can fix the + // account state and reload, instead of retrying every 30s. + const hasPermanentError = result.errors.some((e) => + looksPermanent(e.error), + ); + if (hasPermanentError) { + logger.error( + "Local state migration hit a permanent error; not retrying", + { errors: result.errors }, + ); + doneRef.current = true; + return; + } + logger.warn( + "Local state migration partially failed; scheduling retry", + { + errors: result.errors, + retryDelayMs: RETRY_DELAY_MS, + attempt: retryCountRef.current + 1, + }, + ); + scheduleRetry(); + }) + .catch((error) => { + if (cancelled) return; + const message = error instanceof Error ? error.message : String(error); + if (looksPermanent(message)) { + logger.error( + "Local state migration threw a permanent error; not retrying", + { error: message }, + ); + doneRef.current = true; + return; + } + logger.error("Local state migration threw; scheduling retry", { + error: message, + retryDelayMs: RETRY_DELAY_MS, + attempt: retryCountRef.current + 1, + }); + scheduleRetry(); + }) + .finally(() => { + inFlightRef.current = false; + releaseMigrationLease(); + }); + + return () => { + // Block any trailing async resolutions from scheduling new work, and + // clear the retry timer if one was already scheduled. The other refs + // (`inFlightRef`, `doneRef`) intentionally persist so a re-mount or a + // dep-change re-run picks up where we left off. + cancelled = true; + if (retryTimerRef.current !== null) { + clearTimeout(retryTimerRef.current); + retryTimerRef.current = null; + } + }; + }, [ + isAuthenticated, + isUserBootstrapping, + organizationId, + createProject, + logger, + retryTick, + ]); +} diff --git a/mcpjam-inspector/client/src/hooks/use-server-state.ts b/mcpjam-inspector/client/src/hooks/use-server-state.ts index d280d656a..329d1db5c 100644 --- a/mcpjam-inspector/client/src/hooks/use-server-state.ts +++ b/mcpjam-inspector/client/src/hooks/use-server-state.ts @@ -39,6 +39,7 @@ import { HOSTED_MODE } from "@/lib/config"; import { injectHostedServerMapping, tryGetHostedServerDisplayName, + tryResolveProjectServer, } from "@/lib/apis/web/context"; import type { OAuthTestProfile } from "@/lib/oauth/profile"; import { authFetch } from "@/lib/session-token"; @@ -730,20 +731,74 @@ export function useServerState({ return true; }, [isClientConfigSyncPending]); + // Extract runtime overlay applied by `withProjectConnectionDefaults` so the + // resolver path can reproduce them server-side. Without this, the resolver + // sees only the Convex-stored per-server config and loses project-level + // header overlays / timeout / capabilities. HTTP servers' Authorization is + // omitted here — OAuth bearer is reattached server-side from the Convex + // token store. + const buildResolverConnectionDefaults = useCallback( + (serverConfig: MCPServerConfig) => { + const defaults: { + headers?: Record; + timeoutMs?: number; + clientCapabilities?: Record; + } = {}; + if ("url" in serverConfig) { + const headers = omitAuthorizationHeader( + extractRequestHeaders(serverConfig.requestInit) + ); + if (headers && Object.keys(headers).length > 0) { + defaults.headers = headers; + } + } + if (typeof serverConfig.timeout === "number") { + defaults.timeoutMs = serverConfig.timeout; + } + const caps = serverConfig.clientCapabilities as + | Record + | undefined; + if (caps && typeof caps === "object") defaults.clientCapabilities = caps; + return Object.keys(defaults).length > 0 ? defaults : undefined; + }, + [] + ); + const guardedTestConnection = useCallback( async (serverConfig: MCPServerConfig, serverName: string) => { assertClientConfigSynced(); + // Opt into the resolver path when both projectId and a Convex serverId + // are populated in the API context; otherwise fall back to legacy + // {serverConfig, serverId} so brand-new servers (not yet synced to + // Convex) keep working. The 2-arg call signature is preserved when no + // resolver context is available so existing test mocks keep matching. + const resolved = tryResolveProjectServer(serverName); + if (resolved) { + return testConnection(serverConfig, resolved.serverId, { + projectId: resolved.projectId, + serverName, + connectionDefaults: buildResolverConnectionDefaults(serverConfig), + }); + } return testConnection(serverConfig, serverName); }, - [assertClientConfigSynced] + [assertClientConfigSynced, buildResolverConnectionDefaults] ); const guardedReconnectServer = useCallback( async (serverName: string, serverConfig: MCPServerConfig) => { assertClientConfigSynced(); + const resolved = tryResolveProjectServer(serverName); + if (resolved) { + return reconnectServer(resolved.serverId, serverConfig, { + projectId: resolved.projectId, + serverName, + connectionDefaults: buildResolverConnectionDefaults(serverConfig), + }); + } return reconnectServer(serverName, serverConfig); }, - [assertClientConfigSynced] + [assertClientConfigSynced, buildResolverConnectionDefaults] ); const validateForm = (formData: ServerFormData): string | null => { @@ -1683,54 +1738,45 @@ export function useServerState({ oauthFlowProfile: formOAuthProfile, hasClientSecret: nextHasClientSecret, }; - if (HOSTED_MODE) { - let syncErr: unknown; - try { - const serverId = await syncServerToConvex( - formData.name, - serverEntryForSave, - clientSecretSyncOptions - ); - if (serverId) { - hostedServerId = serverId; - injectHostedServerMapping(formData.name, serverId); - } - } catch (err) { - syncErr = err; - logger.warn("Sync to Convex failed (pre-connection)", { - serverName: formData.name, - err, - }); - } - // OAuth in hosted mode requires a Convex serverId to bind credentials - // to. Without it, prepareHostedProjectOAuthRedirect is a no-op and the - // local fallback can't persist tokens either (mcp-oauth.saveTokens is - // gated on !HOSTED_MODE), so the OAuth dance would complete without a - // durable credential. Fail loudly instead. - if (formData.useOAuth && !hostedServerId) { - const errorMessage = - syncErr instanceof Error - ? `Could not save the hosted server before starting OAuth: ${syncErr.message}` - : "Could not save the hosted server before starting OAuth. Please try again."; - dispatch({ - type: "CONNECT_FAILURE", - name: formData.name, - error: errorMessage, - }); - toast.error(errorMessage); - return; - } - } else { - syncServerToConvex( + // Both modes: await Convex sync so the returned serverId is available + // for OAuth binding (hosted) and for the new {projectId, serverId} + // request shape (local mode resolver path). Failure is non-fatal in + // local mode — the legacy {serverConfig, serverId: name} body still + // works as a fallback. + let syncErr: unknown; + try { + const serverId = await syncServerToConvex( formData.name, serverEntryForSave, clientSecretSyncOptions - ).catch((err) => - logger.warn("Background sync to Convex failed (pre-connection)", { - serverName: formData.name, - err, - }) ); + if (serverId) { + hostedServerId = serverId; + injectHostedServerMapping(formData.name, serverId); + } + } catch (err) { + syncErr = err; + logger.warn("Sync to Convex failed (pre-connection)", { + serverName: formData.name, + err, + }); + } + if (HOSTED_MODE && formData.useOAuth && !hostedServerId) { + // OAuth in hosted mode requires a Convex serverId to bind credentials + // to; without it the OAuth dance would complete without a durable + // credential. Local-mode OAuth follows the same constraint post- + // unification but the legacy localStorage fallback still catches it. + const errorMessage = + syncErr instanceof Error + ? `Could not save the hosted server before starting OAuth: ${syncErr.message}` + : "Could not save the hosted server before starting OAuth. Please try again."; + dispatch({ + type: "CONNECT_FAILURE", + name: formData.name, + error: errorMessage, + }); + toast.error(errorMessage); + return; } if (!isAuthenticated) { const project = appState.projects[appState.activeProjectId]; diff --git a/mcpjam-inspector/client/src/lib/__tests__/local-state-migration.test.ts b/mcpjam-inspector/client/src/lib/__tests__/local-state-migration.test.ts new file mode 100644 index 000000000..a296369cb --- /dev/null +++ b/mcpjam-inspector/client/src/lib/__tests__/local-state-migration.test.ts @@ -0,0 +1,410 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { + MIGRATION_FLAG_KEY, + MIGRATION_PROGRESS_KEY, + hasMigrationCompleted, + readLegacyMigrationPayload, + runLocalStateMigration, +} from "../local-state-migration"; + +function seedLegacyProjects() { + localStorage.setItem( + "mcp-inspector-projects", + JSON.stringify({ + activeProjectId: "proj-a", + projects: { + "proj-a": { + id: "proj-a", + name: "Default", + description: "default", + createdAt: 1700000000000, + updatedAt: 1700000000000, + isDefault: true, + servers: { + stdio_one: { + name: "stdio_one", + enabled: true, + useOAuth: false, + config: { + command: "node", + args: ["server.js"], + env: { FOO: "from-config" }, + }, + connectionStatus: "disconnected", + retryCount: 0, + lastConnectionTime: 1700000000000, + }, + http_one: { + name: "http_one", + enabled: true, + useOAuth: false, + config: { + url: "http://localhost:3000/mcp", + requestInit: { headers: { "X-Test": "1" } }, + }, + connectionStatus: "disconnected", + retryCount: 0, + lastConnectionTime: 1700000000000, + }, + }, + }, + }, + }) + ); +} + +describe("local-state-migration", () => { + beforeEach(() => { + localStorage.clear(); + }); + + afterEach(() => { + localStorage.clear(); + }); + + describe("hasMigrationCompleted", () => { + it("returns false when flag is unset", () => { + expect(hasMigrationCompleted()).toBe(false); + }); + + it("returns true when flag is set", () => { + localStorage.setItem(MIGRATION_FLAG_KEY, "1"); + expect(hasMigrationCompleted()).toBe(true); + }); + }); + + describe("readLegacyMigrationPayload", () => { + it("returns null when no legacy data exists", () => { + expect(readLegacyMigrationPayload()).toBeNull(); + }); + + it("parses projects + servers", () => { + seedLegacyProjects(); + const payload = readLegacyMigrationPayload(); + expect(payload).not.toBeNull(); + expect(payload!.projects).toHaveLength(1); + expect(payload!.projects[0].name).toBe("Default"); + expect(Object.keys(payload!.projects[0].servers)).toEqual( + expect.arrayContaining(["stdio_one", "http_one"]) + ); + }); + + it("captures mcp-env-${name} into envByName", () => { + seedLegacyProjects(); + localStorage.setItem( + "mcp-env-stdio_one", + JSON.stringify({ FOO: "from-env-key", BAR: "extra" }) + ); + const payload = readLegacyMigrationPayload(); + expect(payload!.envByName.stdio_one).toEqual({ + FOO: "from-env-key", + BAR: "extra", + }); + }); + + it("survives malformed mcp-env entries", () => { + seedLegacyProjects(); + localStorage.setItem("mcp-env-stdio_one", "not-json"); + const payload = readLegacyMigrationPayload(); + // mcp-env-stdio_one was unparseable; envByName should not have it, + // but the projects should still parse. + expect(payload!.envByName.stdio_one).toBeUndefined(); + expect(payload!.projects).toHaveLength(1); + }); + + it("falls back to mcp-inspector-state when no projects/workspaces present", () => { + // Pre-projects format: servers live at the top level of `mcp-inspector-state`. + // Without this fallback, users on this format would have the migration + // mark itself complete with nothing pushed to Convex, then later have + // their state cleared. + localStorage.setItem( + "mcp-inspector-state", + JSON.stringify({ + servers: { + legacy_stdio: { + name: "legacy_stdio", + enabled: true, + useOAuth: false, + config: { + command: "python", + args: ["-m", "server"], + }, + connectionStatus: "disconnected", + retryCount: 0, + lastConnectionTime: 1700000000000, + }, + }, + }) + ); + const payload = readLegacyMigrationPayload(); + expect(payload).not.toBeNull(); + expect(payload!.projects).toHaveLength(1); + expect(payload!.projects[0].isDefault).toBe(true); + expect(Object.keys(payload!.projects[0].servers)).toContain("legacy_stdio"); + }); + + it("ignores mcp-inspector-state when projects already exist", () => { + seedLegacyProjects(); + localStorage.setItem( + "mcp-inspector-state", + JSON.stringify({ servers: { stale: { name: "stale", config: {} } } }) + ); + const payload = readLegacyMigrationPayload(); + // Projects-format wins; STATE servers are not lifted. + expect(payload!.projects).toHaveLength(1); + expect(Object.keys(payload!.projects[0].servers)).not.toContain("stale"); + }); + + it("treats unreadable projects store as terminal even when STATE is valid", async () => { + // If `mcp-inspector-projects` is corrupt but `mcp-inspector-state` has + // a valid pre-projects payload, we must NOT silently migrate the STATE + // data and then clear all legacy keys — that would delete the corrupt + // projects store along with the rest, losing data a future parser fix + // could have recovered. Surface as unreadable so the runner records + // the failure and leaves every legacy key in place. + localStorage.setItem("mcp-inspector-projects", "{not valid json"); + localStorage.setItem( + "mcp-inspector-state", + JSON.stringify({ + servers: { + legacy_stdio: { + name: "legacy_stdio", + enabled: true, + useOAuth: false, + config: { command: "python", args: ["-m", "server"] }, + connectionStatus: "disconnected", + retryCount: 0, + lastConnectionTime: 1700000000000, + }, + }, + }) + ); + + // The convenience wrapper coerces unreadable to null. + expect(readLegacyMigrationPayload()).toBeNull(); + + const createProject = vi.fn().mockResolvedValue("convex-id"); + const result = await runLocalStateMigration({ + createProject: createProject as any, + }); + expect(result.ok).toBe(false); + expect(createProject).not.toHaveBeenCalled(); + // All legacy keys preserved for a future retry / parser fix. + expect(localStorage.getItem("mcp-inspector-projects")).toBe( + "{not valid json" + ); + expect(localStorage.getItem("mcp-inspector-state")).not.toBeNull(); + expect(hasMigrationCompleted()).toBe(false); + }); + + it("returns null when STATE has no servers field", () => { + localStorage.setItem( + "mcp-inspector-state", + JSON.stringify({ selectedServer: "none" }) + ); + expect(readLegacyMigrationPayload()).toBeNull(); + }); + }); + + describe("OAuth key cleanup", () => { + it("clears mcp-discovery-* keys (current OAuth code) on success", async () => { + seedLegacyProjects(); + localStorage.setItem( + "mcp-discovery-stdio_one", + JSON.stringify({ resource: "https://example.com" }) + ); + localStorage.setItem( + "mcp-oauth-flow-state-stdio_one", + JSON.stringify({ state: "abc" }) + ); + const createProject = vi.fn().mockResolvedValue("convex-id"); + const result = await runLocalStateMigration({ + createProject: createProject as any, + }); + expect(result.ok).toBe(true); + expect(localStorage.getItem("mcp-discovery-stdio_one")).toBeNull(); + expect(localStorage.getItem("mcp-oauth-flow-state-stdio_one")).toBeNull(); + }); + }); + + describe("runLocalStateMigration", () => { + it("no-ops when no legacy data, sets flag", async () => { + const createProject = vi.fn(); + const result = await runLocalStateMigration({ + createProject: createProject as any, + }); + expect(result).toEqual({ ok: true, projectsMigrated: 0, errors: [] }); + expect(createProject).not.toHaveBeenCalled(); + expect(hasMigrationCompleted()).toBe(true); + }); + + it("migrates each project, merges mcp-env into stdio config, clears legacy keys", async () => { + seedLegacyProjects(); + localStorage.setItem( + "mcp-env-stdio_one", + JSON.stringify({ TOKEN: "abc" }) + ); + localStorage.setItem("mcp-tokens-http_one", JSON.stringify({ a: 1 })); + localStorage.setItem("mcp-oauth-pending", "stdio_one"); + + const createProject = vi.fn().mockResolvedValue("convex-proj-id"); + const result = await runLocalStateMigration({ + createProject: createProject as any, + organizationId: "org_xyz", + }); + + expect(result.ok).toBe(true); + expect(result.projectsMigrated).toBe(1); + expect(createProject).toHaveBeenCalledTimes(1); + + const args = createProject.mock.calls[0][0]; + expect(args.name).toBe("Default"); + expect(args.organizationId).toBe("org_xyz"); + + const stdio = args.servers.stdio_one; + expect(stdio.config.command).toBe("node"); + // env merged: from-config plus from-env-key + expect(stdio.config.env).toEqual({ + FOO: "from-config", + TOKEN: "abc", + }); + + const http = args.servers.http_one; + expect(http.config.url).toBe("http://localhost:3000/mcp"); + expect(http.config.requestInit?.headers).toEqual({ "X-Test": "1" }); + + // Legacy keys cleared + expect(localStorage.getItem("mcp-inspector-projects")).toBeNull(); + expect(localStorage.getItem("mcp-env-stdio_one")).toBeNull(); + expect(localStorage.getItem("mcp-tokens-http_one")).toBeNull(); + expect(localStorage.getItem("mcp-oauth-pending")).toBeNull(); + // Flag set + expect(hasMigrationCompleted()).toBe(true); + }); + + it("preserves user-configured HTTP Authorization header through persistence", async () => { + // A self-hosted MCP server authenticated with a static bearer the user + // typed into the headers form. The persistence path must keep the + // Authorization entry, otherwise the migrated Convex project lacks the + // credential and reconnects fail after legacy localStorage is cleared. + // Sharing payloads still strip Authorization — covered separately by + // the share/clone flow. + localStorage.setItem( + "mcp-inspector-projects", + JSON.stringify({ + activeProjectId: "proj-a", + projects: { + "proj-a": { + id: "proj-a", + name: "Default", + createdAt: 1700000000000, + updatedAt: 1700000000000, + servers: { + http_static_auth: { + name: "http_static_auth", + enabled: true, + useOAuth: false, + config: { + url: "http://localhost:3000/mcp", + requestInit: { + headers: { + Authorization: "Bearer user-static-token", + "X-Other": "keep", + }, + }, + }, + connectionStatus: "disconnected", + retryCount: 0, + lastConnectionTime: 1700000000000, + }, + }, + }, + }, + }) + ); + + const createProject = vi.fn().mockResolvedValue("convex-id"); + const result = await runLocalStateMigration({ + createProject: createProject as any, + }); + expect(result.ok).toBe(true); + + const args = createProject.mock.calls[0][0]; + const http = args.servers.http_static_auth; + expect(http.config.requestInit?.headers).toEqual({ + Authorization: "Bearer user-static-token", + "X-Other": "keep", + }); + }); + + it("does NOT clear legacy keys on partial failure", async () => { + seedLegacyProjects(); + const createProject = vi + .fn() + .mockRejectedValue(new Error("Convex unreachable")); + const result = await runLocalStateMigration({ + createProject: createProject as any, + }); + + expect(result.ok).toBe(false); + expect(result.errors).toHaveLength(1); + expect(result.errors[0].projectName).toBe("Default"); + // Legacy data still there for retry + expect(localStorage.getItem("mcp-inspector-projects")).not.toBeNull(); + expect(hasMigrationCompleted()).toBe(false); + }); + + it("skips already-migrated projects on retry after partial failure", async () => { + // Seed two projects. First attempt: proj-a succeeds, proj-b fails. + localStorage.setItem( + "mcp-inspector-projects", + JSON.stringify({ + activeProjectId: "proj-a", + projects: { + "proj-a": { + id: "proj-a", + name: "Alpha", + createdAt: 1700000000000, + updatedAt: 1700000000000, + servers: {}, + }, + "proj-b": { + id: "proj-b", + name: "Beta", + createdAt: 1700000000000, + updatedAt: 1700000000000, + servers: {}, + }, + }, + }) + ); + + const createProjectFirst = vi + .fn() + .mockResolvedValueOnce("convex-a") + .mockRejectedValueOnce(new Error("Convex unreachable")); + const firstResult = await runLocalStateMigration({ + createProject: createProjectFirst as any, + }); + expect(firstResult.ok).toBe(false); + expect(firstResult.projectsMigrated).toBe(1); + expect(createProjectFirst).toHaveBeenCalledTimes(2); + // Progress recorded for the project that succeeded. + expect(localStorage.getItem(MIGRATION_PROGRESS_KEY)).toContain("proj-a"); + + // Retry: only the failed project should be re-attempted. + const createProjectRetry = vi.fn().mockResolvedValue("convex-b"); + const retryResult = await runLocalStateMigration({ + createProject: createProjectRetry as any, + }); + expect(retryResult.ok).toBe(true); + expect(retryResult.projectsMigrated).toBe(1); + expect(createProjectRetry).toHaveBeenCalledTimes(1); + expect(createProjectRetry.mock.calls[0][0].name).toBe("Beta"); + // Successful retry clears legacy + progress keys. + expect(localStorage.getItem("mcp-inspector-projects")).toBeNull(); + expect(localStorage.getItem(MIGRATION_PROGRESS_KEY)).toBeNull(); + expect(hasMigrationCompleted()).toBe(true); + }); + }); +}); diff --git a/mcpjam-inspector/client/src/lib/__tests__/session-token.hosted-retry.test.ts b/mcpjam-inspector/client/src/lib/__tests__/session-token.hosted-retry.test.ts index 96abab529..bc237127d 100644 --- a/mcpjam-inspector/client/src/lib/__tests__/session-token.hosted-retry.test.ts +++ b/mcpjam-inspector/client/src/lib/__tests__/session-token.hosted-retry.test.ts @@ -114,6 +114,28 @@ describe("authFetch hosted 401 retry", () => { expect(posthog.capture).not.toHaveBeenCalled(); }); + it("does not retry when 401 is flagged X-MCP-Auth-Required: oauth", async () => { + // Resolver throws 401 when an OAuth-required server has no stored token. + // That's the upstream MCP server demanding the user complete its OAuth + // flow — refreshing the guest session would just hit the same 401. + vi.mocked(getHostedAuthorizationHeader).mockResolvedValueOnce( + "Bearer fine-token" + ); + + vi.mocked(global.fetch).mockResolvedValueOnce({ + status: 401, + ok: false, + headers: new Headers({ "X-MCP-Auth-Required": "oauth" }), + } as unknown as Response); + + const response = await authFetch("/api/mcp/connect", { method: "POST" }); + + expect(response.status).toBe(401); + expect(resetTokenCache).not.toHaveBeenCalled(); + expect(forceRefreshGuestSession).not.toHaveBeenCalled(); + expect(global.fetch).toHaveBeenCalledTimes(1); + }); + it("does not retry on non-401 errors", async () => { vi.mocked(getHostedAuthorizationHeader).mockResolvedValue( "Bearer some-token" diff --git a/mcpjam-inspector/client/src/lib/apis/web/context.ts b/mcpjam-inspector/client/src/lib/apis/web/context.ts index 43750cd22..2a538813b 100644 --- a/mcpjam-inspector/client/src/lib/apis/web/context.ts +++ b/mcpjam-inspector/client/src/lib/apis/web/context.ts @@ -52,7 +52,9 @@ function assertHostedClientConfigSynced() { } export function shouldRetryHostedAuth401(): boolean { - if (!HOSTED_MODE) return false; + // Retry the auth bootstrap on 401 whenever the actor isn't yet authenticated + // and no session is in flight — applies to both hosted guests and local CLI + // users post unification. return !hostedApiContext.isAuthenticated && !hostedApiContext.hasSession; } @@ -68,7 +70,9 @@ export function shouldRetryHostedAuth401(): boolean { * session is still resolving. */ function hasHostedGuestAccess(): boolean { - if (!HOSTED_MODE) return false; + // Now applies to both hosted and local: any actor without a WorkOS session + // gets guest access. The local CLI mints its own guest bearer via the same + // /api/web/guest-session endpoint hosted uses. if (hostedApiContext.isAuthenticated) return false; if (hostedApiContext.hasSession) return false; return true; @@ -91,10 +95,16 @@ export function setHostedApiContext(next: HostedApiContext | null): void { } /** - * Eagerly inject a server-name → server-ID mapping into the hosted context, + * Eagerly inject a server-name → server-ID mapping into the API context, * bridging the gap between when a Convex mutation completes and when the * reactive subscription propagates the update through React. * + * Applies to both hosted and local: post unification, local mode also drives + * connect/reconnect through the resolver path when a Convex serverId is known, + * so it benefits from the same eager injection. Without this, the immediate + * post-save connect would fall back to the legacy `{serverConfig, serverId}` + * shape for one tick. + * * The next `setHostedApiContext` call from the subscription will overwrite * this with identical data, so there is no risk of stale entries. */ @@ -102,7 +112,6 @@ export function injectHostedServerMapping( serverName: string, serverId: string, ): void { - if (!HOSTED_MODE) return; hostedApiContext = { ...hostedApiContext, serverIdsByName: { @@ -126,6 +135,30 @@ export function getHostedProjectId(): string { return projectId; } +/** + * Mode-agnostic project + server resolution used by code paths that need to + * opt into the new `{projectId, serverId}` shape when context is populated, + * but fall back to legacy when it isn't (e.g., during the post-migration + * window or when a brand-new server hasn't been pushed to Convex yet). + * + * Returns null when either projectId is missing or the server name doesn't + * resolve to a Convex Id. Callers handle null by using the legacy shape. + */ +export function tryResolveProjectServer( + serverNameOrId: string, +): { projectId: string; serverId: string } | null { + const projectId = hostedApiContext.projectId; + if (!projectId) return null; + const direct = hostedApiContext.serverIdsByName[serverNameOrId]; + if (direct) return { projectId, serverId: direct }; + if ( + Object.values(hostedApiContext.serverIdsByName).includes(serverNameOrId) + ) { + return { projectId, serverId: serverNameOrId }; + } + return null; +} + /** * Long alphanumeric refs are usually Convex/legacy document ids. Never echo * them in user-visible error strings; short names and slugs may still be shown. @@ -398,8 +431,9 @@ export function buildHostedOAuthTokensMap( } export async function getHostedAuthorizationHeader(): Promise { - if (!HOSTED_MODE) return null; - + // Single bearer-resolution path for hosted and local. authFetch decides + // whether to attach the result based on the request's loopback/origin and + // whether a token is available; this function never short-circuits on mode. const now = Date.now(); if (cachedBearerToken && cachedBearerToken.expiresAt > now) { return `Bearer ${cachedBearerToken.token}`; diff --git a/mcpjam-inspector/client/src/lib/local-state-migration.ts b/mcpjam-inspector/client/src/lib/local-state-migration.ts new file mode 100644 index 000000000..95cc39fbe --- /dev/null +++ b/mcpjam-inspector/client/src/lib/local-state-migration.ts @@ -0,0 +1,511 @@ +/** + * One-time migration shim from legacy localStorage state to Convex. + * + * Reads, in priority order: `mcp-inspector-projects`, the older + * `mcp-inspector-workspaces`, and the still-older `mcp-inspector-state` + * (pre-projects format whose servers live at the top level). For each, pushes + * the resulting project(s) + servers to Convex via `projects:createProject` + * (which materializes the flat `servers` rows via `syncProjectServers`), then + * clears the legacy keys. + * + * **OAuth tokens are NOT migrated.** Hosted Convex stores tokens via the + * vault-backed `hostedOAuthCredentials` table and there is no bulk-import + * endpoint; the legacy `mcp-tokens-${name}` etc. localStorage tokens are + * cleared during migration and users re-authenticate OAuth-enabled servers + * on first connect post-migration. One-time UX cost vs. the cost of a new + * import endpoint + vault encryption path. + * + * Runs once per install. Gated by `mcp-inspector-migrated-to-convex` flag. + */ +import { serializeServersForPersistence } from "@/lib/project-serialization"; +import { + createLocalDefaultProject, + type Project, + type ServerWithName, +} from "@/state/app-types"; +import { isProjectClientConfig } from "@/lib/client-config"; + +export const MIGRATION_FLAG_KEY = "mcp-inspector-migrated-to-convex"; + +// Tracks IDs of projects that have already been pushed to Convex within the +// current migration attempt. Used so a partial failure can be retried without +// re-creating the projects that already succeeded (which would duplicate them +// in Convex on the next boot). +export const MIGRATION_PROGRESS_KEY = "mcp-inspector-migration-progress"; + +const LEGACY_PROJECTS_KEY = "mcp-inspector-projects"; +const LEGACY_WORKSPACES_KEY = "mcp-inspector-workspaces"; +const LEGACY_STATE_KEY = "mcp-inspector-state"; + +// Per-server OAuth keys are name-scoped. Migration scans the full localStorage +// for these prefixes rather than enumerating known names — handles partial +// migrations from older inspector versions that may have used different keys. +// Keep `mcp-oauth-flow-state-` and `mcp-discovery-` in sync with the live keys +// in `client/src/lib/oauth/mcp-oauth.ts`; the legacy `mcp-oauth-discovery-` +// prefix is retained for installs that ran an older inspector build. +const LEGACY_OAUTH_PREFIXES = [ + "mcp-tokens-", + "mcp-client-", + "mcp-verifier-", + "mcp-serverUrl-", + "mcp-oauth-config-", + "mcp-oauth-discovery-", + "mcp-discovery-", + "mcp-oauth-flow-state-", + "mcp-env-", +]; + +const LEGACY_OAUTH_SINGLETONS = ["mcp-oauth-pending", "mcp-oauth-return-hash"]; + +export function hasMigrationCompleted(): boolean { + if (typeof window === "undefined") return true; + try { + return !!localStorage.getItem(MIGRATION_FLAG_KEY); + } catch { + // Treat localStorage-blocked environments as already-migrated; nothing + // we can do for them anyway and we shouldn't keep retrying. + return true; + } +} + +interface LegacyMigrationPayload { + projects: Project[]; + envByName: Record>; +} + +/** + * Distinguishes "no legacy data to migrate" from "legacy data exists but we + * couldn't parse it." The migration runner needs to mark itself complete in + * the first case (so we don't keep re-reading on every boot) but must NOT + * mark complete in the second case (so a fix to the parsing path or a manual + * recovery can still pick the data up later). + */ +export type LegacyReadResult = + | { kind: "empty" } + | { kind: "unreadable"; reason: string } + | { kind: "payload"; payload: LegacyMigrationPayload }; + + +function reviveServer(name: string, raw: any): ServerWithName | null { + if (!raw || typeof raw !== "object") return null; + const cfg: any = raw.config; + let nextCfg = cfg; + if (cfg && typeof cfg.url === "string") { + try { + nextCfg = { ...cfg, url: new URL(cfg.url) }; + } catch { + // ignore invalid URL — keep original string + } + } + return { + ...raw, + name: raw.name ?? name, + config: nextCfg, + connectionStatus: raw.connectionStatus || "disconnected", + retryCount: raw.retryCount || 0, + lastConnectionTime: raw.lastConnectionTime + ? new Date(raw.lastConnectionTime) + : new Date(), + enabled: raw.enabled !== false, + } as ServerWithName; +} + +/** + * Returns the parsed projects, or `null` when the legacy keys exist but + * couldn't be parsed (so the caller can surface "unreadable" instead of + * silently treating it as empty), or `[]` when neither key is present at all. + */ +function readProjectsFromLegacyStorage(): Project[] | null { + let raw: string | null; + try { + raw = + localStorage.getItem(LEGACY_PROJECTS_KEY) ?? + localStorage.getItem(LEGACY_WORKSPACES_KEY); + } catch { + return null; + } + if (!raw) return []; + + let parsed: any; + try { + parsed = JSON.parse(raw); + } catch { + return null; + } + + const rawProjects = parsed?.projects ?? parsed?.workspaces; + if (!rawProjects || typeof rawProjects !== "object") return null; + + const projects: Project[] = []; + for (const [id, projectRaw] of Object.entries(rawProjects)) { + const p: any = projectRaw; + if (!p || typeof p !== "object") continue; + const servers: Record = {}; + if (p.servers && typeof p.servers === "object") { + for (const [name, serverRaw] of Object.entries(p.servers)) { + const revived = reviveServer(name, serverRaw); + if (revived) servers[name] = revived; + } + } + projects.push({ + id, + name: typeof p.name === "string" && p.name.trim() ? p.name : "Untitled", + description: typeof p.description === "string" ? p.description : undefined, + icon: typeof p.icon === "string" ? p.icon : undefined, + clientConfig: isProjectClientConfig(p.clientConfig) + ? p.clientConfig + : undefined, + servers, + createdAt: p.createdAt ? new Date(p.createdAt) : new Date(), + updatedAt: p.updatedAt ? new Date(p.updatedAt) : new Date(), + isDefault: p.isDefault === true, + sharedProjectId: + typeof p.sharedProjectId === "string" ? p.sharedProjectId : undefined, + organizationId: + typeof p.organizationId === "string" ? p.organizationId : undefined, + visibility: p.visibility, + }); + } + return projects; +} + +// Pre-projects format: `mcp-inspector-state` stored servers at the top level. +// Mirrors the in-process lift in `state/storage.ts` so users on this format +// don't end up with the migration-complete flag set + nothing in Convex. +// +// Returns: +// - `Project` when STATE was readable and contained at least one server. +// - `null` when the STATE key wasn't present at all OR contained no servers. +// - the literal string "unreadable" when the key was present but unparseable. +function readProjectFromLegacyStateOnly(): + | Project + | null + | "unreadable" { + let raw: string | null; + try { + raw = localStorage.getItem(LEGACY_STATE_KEY); + } catch { + return "unreadable"; + } + if (!raw) return null; + + let parsed: any; + try { + parsed = JSON.parse(raw); + } catch { + return "unreadable"; + } + + const rawServers = parsed?.servers; + if (!rawServers || typeof rawServers !== "object") return null; + + const servers: Record = {}; + for (const [name, serverRaw] of Object.entries(rawServers)) { + const revived = reviveServer(name, serverRaw); + if (revived) servers[name] = revived; + } + if (Object.keys(servers).length === 0) return null; + + return createLocalDefaultProject({ servers }); +} + +/** + * Tri-state: distinguishes "no legacy data to migrate" (callers should mark + * complete so we stop retrying) from "legacy data exists but we can't read + * it" (callers must NOT mark complete — a parser fix or recovery should + * still be able to pick the data up later). + */ +export function readLegacyMigrationPayloadResult(): LegacyReadResult { + if (typeof window === "undefined") return { kind: "empty" }; + + const projectsRead = readProjectsFromLegacyStorage(); + + // If the newer projects/workspaces store is present but unparseable, stop + // here. Falling back to `mcp-inspector-state` and migrating that successfully + // would let `clearLegacyKeys()` delete the unreadable newer store along + // with the rest, losing data that a future parser fix or manual recovery + // could have salvaged. Surface as `unreadable` so the runner records the + // failure, leaves every legacy key in place, and retries later. + if (projectsRead === null) { + return { + kind: "unreadable", + reason: "legacy projects/workspaces store is unreadable", + }; + } + + let projects: Project[] = projectsRead; + let unreadable: string | null = null; + + if (projects.length === 0) { + const stateResult = readProjectFromLegacyStateOnly(); + if (stateResult === "unreadable") { + unreadable = "legacy state store is unreadable"; + } else if (stateResult) { + projects = [stateResult]; + } + } + + // Pull mcp-env-${name} into a side map so the migration can merge env into + // STDIO server config before serialization. + const envByName: Record> = {}; + try { + for (let i = 0; i < localStorage.length; i++) { + const key = localStorage.key(i); + if (!key || !key.startsWith("mcp-env-")) continue; + const name = key.slice("mcp-env-".length); + try { + const value = localStorage.getItem(key); + if (!value) continue; + const parsed = JSON.parse(value); + if (parsed && typeof parsed === "object") { + envByName[name] = parsed; + } + } catch { + // ignore malformed env entries + } + } + } catch { + // localStorage blocked + } + + if (projects.length > 0) { + return { kind: "payload", payload: { projects, envByName } }; + } + if (unreadable) { + return { kind: "unreadable", reason: unreadable }; + } + // No projects, no STATE servers, no parse failures — and we already returned + // early on no-window. If a legacy key file was present but contained an + // empty `projects: {}` map, treat it as truly empty so we can mark complete. + return { kind: "empty" }; +} + +/** + * Convenience wrapper preserved for tests and any caller that doesn't care + * about the absent-vs-unreadable distinction. New callers should prefer + * `readLegacyMigrationPayloadResult` so they can decide whether to mark + * the migration complete. + */ +export function readLegacyMigrationPayload(): LegacyMigrationPayload | null { + const result = readLegacyMigrationPayloadResult(); + return result.kind === "payload" ? result.payload : null; +} + +export function clearLegacyKeys(): void { + if (typeof window === "undefined") return; + try { + localStorage.removeItem(LEGACY_PROJECTS_KEY); + localStorage.removeItem(LEGACY_WORKSPACES_KEY); + localStorage.removeItem(LEGACY_STATE_KEY); + for (const key of LEGACY_OAUTH_SINGLETONS) { + localStorage.removeItem(key); + } + // Iterate by snapshotting keys first — removeItem mid-iteration shifts + // indices on some implementations. + const toRemove: string[] = []; + for (let i = 0; i < localStorage.length; i++) { + const key = localStorage.key(i); + if (!key) continue; + if (LEGACY_OAUTH_PREFIXES.some((prefix) => key.startsWith(prefix))) { + toRemove.push(key); + } + } + for (const key of toRemove) { + localStorage.removeItem(key); + } + } catch { + // best-effort + } +} + +export function markMigrationComplete(): void { + if (typeof window === "undefined") return; + try { + localStorage.setItem(MIGRATION_FLAG_KEY, String(Date.now())); + } catch { + // best-effort + } +} + +function readMigratedProjectIds(): Set { + if (typeof window === "undefined") return new Set(); + try { + const raw = localStorage.getItem(MIGRATION_PROGRESS_KEY); + if (!raw) return new Set(); + const parsed = JSON.parse(raw); + if (Array.isArray(parsed)) { + return new Set(parsed.filter((v): v is string => typeof v === "string")); + } + } catch { + // ignore — treat as empty set; worst case we re-attempt a project + } + return new Set(); +} + +function recordMigratedProjectId(id: string): void { + if (typeof window === "undefined") return; + try { + const current = readMigratedProjectIds(); + current.add(id); + localStorage.setItem( + MIGRATION_PROGRESS_KEY, + JSON.stringify(Array.from(current)), + ); + } catch { + // best-effort + } +} + +function clearMigrationProgress(): void { + if (typeof window === "undefined") return; + try { + localStorage.removeItem(MIGRATION_PROGRESS_KEY); + } catch { + // best-effort + } +} + +export interface MigrationDeps { + createProject: (args: { + name: string; + description?: string; + icon?: string; + clientConfig?: unknown; + servers: Record; + organizationId?: string; + visibility?: "public" | "private"; + }) => Promise; + /** + * Org id to migrate into. Pass `undefined` to let Convex pick the actor's + * default org (works for both guests and signed-in users without an + * explicit selection). + */ + organizationId?: string; + logger?: { + info: (message: string, meta?: Record) => void; + warn: (message: string, meta?: Record) => void; + error: (message: string, meta?: Record) => void; + }; +} + +export interface MigrationResult { + ok: boolean; + projectsMigrated: number; + errors: Array<{ projectName: string; error: string }>; +} + +/** + * Execute the migration. Caller is responsible for gating on + * `hasMigrationCompleted()` and Convex auth readiness. + * + * Behavior: + * - If no legacy data: marks migration complete (no-op success). + * - For each project, calls `createProject` with `servers` containing the + * serialized form (incl. STDIO env merged from `mcp-env-${name}`). + * - On any per-project failure, the others still proceed; result.ok is + * `false` if any project failed. + * - On overall success, clears legacy keys and sets the flag. + * - On partial success (some projects failed), leaves legacy keys in place + * so a subsequent boot can retry — a partial migration shouldn't drop + * user data. Successful projects are recorded under + * `MIGRATION_PROGRESS_KEY` so the retry skips them and we don't duplicate + * them in Convex. + */ +export async function runLocalStateMigration( + deps: MigrationDeps +): Promise { + const result = readLegacyMigrationPayloadResult(); + if (result.kind === "empty") { + clearMigrationProgress(); + markMigrationComplete(); + return { ok: true, projectsMigrated: 0, errors: [] }; + } + if (result.kind === "unreadable") { + // Legacy data exists but we can't parse it. Don't mark complete — a + // future build with a fixed parser (or a manual recovery) should still + // be able to migrate this user. Surface as a one-element error list so + // the runner reports `ok: false` and the caller logs/retries. + deps.logger?.warn("Legacy migration payload unreadable", { + reason: result.reason, + }); + return { + ok: false, + projectsMigrated: 0, + errors: [ + { projectName: "(unreadable legacy state)", error: result.reason }, + ], + }; + } + const payload = result.payload; + + const errors: MigrationResult["errors"] = []; + let projectsMigrated = 0; + const alreadyMigrated = readMigratedProjectIds(); + + for (const project of payload.projects) { + if (alreadyMigrated.has(project.id)) { + deps.logger?.info("Skipping already-migrated project", { + name: project.name, + id: project.id, + }); + continue; + } + // Merge mcp-env-${name} into per-server config so syncProjectServers picks + // it up. STDIO servers may have env in either place; the merge is + // last-write-wins favoring the localStorage env entry (which is what the + // user actually configured most recently). + const projectServers: Record = {}; + for (const [name, server] of Object.entries(project.servers)) { + const env = payload.envByName[name]; + if (env && env !== undefined && (server.config as any)?.command) { + projectServers[name] = { + ...server, + config: { + ...(server.config as any), + env: { ...((server.config as any).env ?? {}), ...env }, + } as any, + }; + } else { + projectServers[name] = server; + } + } + + const serializedServers = serializeServersForPersistence(projectServers); + try { + await deps.createProject({ + name: project.name, + description: project.description, + icon: project.icon, + clientConfig: project.clientConfig, + servers: serializedServers, + organizationId: deps.organizationId, + visibility: project.visibility, + }); + recordMigratedProjectId(project.id); + projectsMigrated++; + deps.logger?.info("Migrated local project to Convex", { + name: project.name, + serverCount: Object.keys(projectServers).length, + }); + } catch (error) { + const message = error instanceof Error ? error.message : String(error); + errors.push({ projectName: project.name, error: message }); + deps.logger?.error("Failed to migrate local project", { + name: project.name, + error: message, + }); + } + } + + if (errors.length === 0) { + clearLegacyKeys(); + clearMigrationProgress(); + markMigrationComplete(); + return { ok: true, projectsMigrated, errors }; + } + + // Partial success — keep legacy keys so we can retry. The + // MIGRATION_PROGRESS_KEY records which projects already succeeded so the + // next boot doesn't recreate them in Convex. + return { ok: false, projectsMigrated, errors }; +} diff --git a/mcpjam-inspector/client/src/lib/project-serialization.ts b/mcpjam-inspector/client/src/lib/project-serialization.ts index d13b1cd06..a58d10e59 100644 --- a/mcpjam-inspector/client/src/lib/project-serialization.ts +++ b/mcpjam-inspector/client/src/lib/project-serialization.ts @@ -1,7 +1,26 @@ import type { ServerWithName, ConnectionStatus } from "@/state/app-types"; -export function serializeServersForSharing( +type SerializeOptions = { + /** + * When true, drop secret-bearing fields (STDIO `env`, HTTP + * `Authorization` headers) from the output. When false, keep them + * verbatim. + * + * Sharing payloads MUST redact: STDIO `env` commonly carries API + * keys / DB credentials, and HTTP `Authorization` carries bearers. + * Persistence payloads (the legacy localStorage → Convex migration) + * MUST preserve these — without them, a migrated STDIO server is + * non-functional and the user has to re-enter every credential, and + * an HTTP server configured with a static `Authorization` header + * (self-hosted MCP with a long-lived bearer, etc.) silently fails + * to reconnect after migration clears the legacy localStorage copy. + */ + redactSecrets: boolean; +}; + +function serializeServersInternal( servers: Record, + options: SerializeOptions, ): Record { const result: Record = {}; @@ -25,6 +44,8 @@ export function serializeServersForSharing( config.command = (server.config as any).command; if ((server.config as any).args) config.args = (server.config as any).args; + if (!options.redactSecrets && (server.config as any).env) + config.env = (server.config as any).env; if ((server.config as any).timeout) config.timeout = (server.config as any).timeout; if ((server.config as any).clientCapabilities) @@ -37,9 +58,13 @@ export function serializeServersForSharing( for (const [key, value] of Object.entries( (server.config as any).requestInit.headers, )) { - if (key.toLowerCase() !== "authorization") { - headers[key] = value as string; + if ( + options.redactSecrets && + key.toLowerCase() === "authorization" + ) { + continue; } + headers[key] = value as string; } requestInit.headers = headers; } @@ -66,6 +91,34 @@ export function serializeServersForSharing( return result; } +/** + * Serialize servers for an outbound share/invite payload (`ShareProjectDialog`, + * `use-project-state` clone-to-org / fork flows). Drops STDIO `env` so secrets + * stay on the local machine. + */ +export function serializeServersForSharing( + servers: Record, +): Record { + return serializeServersInternal(servers, { redactSecrets: true }); +} + +/** + * Serialize servers for in-account persistence — currently only the + * legacy-localStorage → Convex migration. Preserves STDIO `env` because the + * migration target is the same actor's own Convex project (not a share + * recipient), and dropping env would leave migrated STDIO servers + * non-functional. + * + * Do NOT use this for any share/export/invite payload. If a future feature + * needs to copy a project across actors, route it through + * `serializeServersForSharing`. + */ +export function serializeServersForPersistence( + servers: Record, +): Record { + return serializeServersInternal(servers, { redactSecrets: false }); +} + export function deserializeServersFromConvex( servers: Record | any[], ): Record { diff --git a/mcpjam-inspector/client/src/lib/session-token.ts b/mcpjam-inspector/client/src/lib/session-token.ts index d34be0900..2be3477b8 100644 --- a/mcpjam-inspector/client/src/lib/session-token.ts +++ b/mcpjam-inspector/client/src/lib/session-token.ts @@ -18,6 +18,7 @@ import { resetTokenCache, shouldRetryHostedAuth401, } from "@/lib/apis/web/context"; +import { getConvexSiteUrl } from "@/lib/convex-site-url"; import { forceRefreshGuestSession } from "@/lib/guest-session"; import posthog from "posthog-js"; @@ -109,9 +110,10 @@ function buildAuthFetchInit( const sessionHeaders = shouldAttachSessionHeaders(input) ? getAuthHeaders() : undefined; - const hostedHeaders = hostedAuthorizationHeader - ? ({ Authorization: hostedAuthorizationHeader } as HeadersInit) - : undefined; + const hostedHeaders = + hostedAuthorizationHeader && shouldAttachHostedAuthorization(input) + ? ({ Authorization: hostedAuthorizationHeader } as HeadersInit) + : undefined; return { ...init, @@ -214,6 +216,20 @@ function isLoopbackHostname(hostname: string): boolean { ); } +function resolveRequestUrl(input: RequestInfo | URL): URL | null { + const baseOrigin = + typeof window !== "undefined" ? window.location.origin : "http://localhost"; + try { + return input instanceof URL + ? input + : typeof Request !== "undefined" && input instanceof Request + ? new URL(input.url, baseOrigin) + : new URL(String(input), baseOrigin); + } catch { + return null; + } +} + // The session token is a single-process secret for the local CLI/Inspector // build; only attach it to loopback `/api/*` calls. Non-hosted Inspector is // not supported behind a public origin — relaxing this would expose the token @@ -223,21 +239,86 @@ function shouldAttachSessionHeaders(input: RequestInfo | URL): boolean { return false; } - const baseOrigin = - typeof window !== "undefined" ? window.location.origin : "http://localhost"; + const parsed = resolveRequestUrl(input); + if (parsed) { + return isLoopbackHostname(parsed.hostname) && parsed.pathname.startsWith("/api/"); + } + return typeof input === "string" && input.startsWith("/api/"); +} - try { - const parsed = - input instanceof URL - ? input - : typeof Request !== "undefined" && input instanceof Request - ? new URL(input.url, baseOrigin) - : new URL(String(input), baseOrigin); +// Paths that need the hosted (Convex) Authorization bearer attached. In +// hosted mode every `/api/web/*` route is Convex-backed; in local mode the +// inspector forwards the bearer for routes that re-call Convex +// (`/web/authorize-batch-local`, OAuth bookkeeping). Anything not listed +// here — `/api/session-token`, `/api/health`, the local-only MCP read paths +// — does NOT participate in Convex auth, so we don't want to mint or refresh +// a guest session for those calls. +// +// `/api/web/*` paths are same-origin (proxied by the inspector's own Hono +// server). The `/web/oauth/` paths cover absolute Convex HTTP-action URLs +// (`https://*.convex.site/web/oauth/...`) that the OAuth flow hits directly +// — gated by the same-origin/Convex-host check below so the bearer never +// crosses to a foreign origin. +const HOSTED_AUTH_PATH_PREFIXES = [ + "/api/web/", + // Local resolver path that calls Convex /web/authorize-batch-local. + "/api/mcp/connect", + "/api/mcp/servers/reconnect", + // Convex HTTP actions called via absolute URL (OAuth completion, etc.). + "/web/oauth/", +]; - return isLoopbackHostname(parsed.hostname) && parsed.pathname.startsWith("/api/"); - } catch { - return typeof input === "string" && input.startsWith("/api/"); +/** + * Returns true when `parsed` is safe to receive a hosted Authorization + * header — same origin as the app, a loopback host, or the configured + * Convex `*.convex.site` hostname. Without this, an absolute foreign URL + * matching one of the path prefixes would receive the bearer (credential + * exfiltration risk). + */ +function isHostedAuthAllowedOrigin(parsed: URL): boolean { + if ( + typeof window !== "undefined" && + parsed.origin === window.location.origin + ) { + return true; + } + if (isLoopbackHostname(parsed.hostname)) return true; + const convexSite = getConvexSiteUrl(); + if (convexSite) { + try { + const convexHost = new URL(convexSite).hostname; + if (parsed.hostname === convexHost) return true; + } catch { + // Malformed configured URL — fall through to deny. + } + } + return false; +} + +function pathMatchesHostedPrefix(pathname: string): boolean { + return HOSTED_AUTH_PATH_PREFIXES.some((prefix) => { + if (prefix.endsWith("/")) return pathname.startsWith(prefix); + // Non-trailing-slash entries match the literal path AND any sub-path + // (`/api/mcp/connect`, `/api/mcp/connect/`, `/api/mcp/connect/foo`) so a + // browser/proxy normalization or future sub-route doesn't silently drop + // the bearer. `/api/mcp/connecting` still won't match — boundary is `/`. + return pathname === prefix || pathname.startsWith(`${prefix}/`); + }); +} + +function shouldAttachHostedAuthorization(input: RequestInfo | URL): boolean { + const parsed = resolveRequestUrl(input); + // Relative paths starting with "/" resolve same-origin via resolveRequestUrl + // (which uses window.location.origin). For odd inputs that don't parse, + // fall back to a literal pathname match — but only for relative paths, + // since an unparseable absolute URL shouldn't get credentials. + if (parsed) { + if (!isHostedAuthAllowedOrigin(parsed)) return false; + return pathMatchesHostedPrefix(parsed.pathname); } + if (typeof input !== "string" || !input.startsWith("/")) return false; + const pathname = input.split("?")[0]; + return pathMatchesHostedPrefix(pathname); } /** @@ -293,16 +374,30 @@ export async function authFetch( init?: RequestInit ): Promise { const surface = resolveAuthFetchSurface(input); - const hostedAuthHeader = await getHostedAuthorizationHeader(); const callerProvidedAuthorization = hasAuthorizationHeader(init?.headers); + // Only resolve the hosted bearer for paths that actually call Convex on + // the user's behalf. Skipping this for unrelated local paths + // (`/api/session-token`, `/api/health`, local-only MCP read paths) means + // those calls don't block on minting a guest session at cold boot and + // don't trigger guest refresh on unrelated 401s. + const hostedAuthEligible = shouldAttachHostedAuthorization(input); + const hostedAuthHeader = hostedAuthEligible + ? await getHostedAuthorizationHeader() + : null; const mergedInit = buildAuthFetchInit(input, init, hostedAuthHeader); const response = await fetch(input, mergedInit); + // Retry on 401 only for paths we actually attached a hosted bearer to — + // a 401 from `/api/health` shouldn't trigger a guest-session refresh. + // Also skip when the server flagged the 401 as OAuth-required: that's the + // upstream MCP server demanding the user complete its OAuth flow, not a + // session-auth failure, and a guest refresh would just hit the same 401. if ( response.status !== 401 || - !HOSTED_MODE || + !hostedAuthEligible || !shouldRetryHostedAuth401() || - callerProvidedAuthorization + callerProvidedAuthorization || + response.headers?.get("X-MCP-Auth-Required") === "oauth" ) { return response; } diff --git a/mcpjam-inspector/client/src/state/__tests__/mcp-api.local.test.ts b/mcpjam-inspector/client/src/state/__tests__/mcp-api.local.test.ts new file mode 100644 index 000000000..830f71b23 --- /dev/null +++ b/mcpjam-inspector/client/src/state/__tests__/mcp-api.local.test.ts @@ -0,0 +1,105 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import type { MCPServerConfig } from "@mcpjam/sdk/browser"; + +const authFetchMock = vi.fn(); + +vi.mock("@/lib/config", () => ({ + HOSTED_MODE: false, +})); + +vi.mock("@/lib/session-token", () => ({ + authFetch: (...args: unknown[]) => authFetchMock(...args), +})); + +import { reconnectServer, testConnection } from "../mcp-api"; + +function readBody(): Record { + expect(authFetchMock).toHaveBeenCalledTimes(1); + const init = authFetchMock.mock.calls[0]?.[1] as RequestInit | undefined; + return JSON.parse(String(init?.body ?? "{}")); +} + +describe("mcp-api local-mode legacy fallback (OAuth bearer present)", () => { + beforeEach(() => { + authFetchMock.mockReset(); + authFetchMock.mockResolvedValue( + new Response(JSON.stringify({ success: true }), { status: 200 }), + ); + }); + + it("testConnection sends the display name as serverId when falling back to legacy because of a local OAuth bearer", async () => { + const config = { + url: "http://localhost:8787/mcp", + requestInit: { + headers: { + Authorization: "Bearer local-access-token", + }, + }, + } as unknown as MCPServerConfig; + + await testConnection(config, "convex_id_abc123", { + projectId: "project_xyz", + serverName: "mcpjam local", + }); + + const body = readBody(); + // Resolver path is suppressed — the runtime config carries a local + // bearer that Convex doesn't yet hold. + expect(body.projectId).toBeUndefined(); + expect(body.serverConfig).toBeDefined(); + // The legacy server-side path uses serverId as the mcpClientManager key; + // it must be the display name, not the resolved Convex `_id`. + expect(body.serverId).toBe("mcpjam local"); + }); + + it("reconnectServer sends the display name as serverId when falling back to legacy because of a local OAuth bearer", async () => { + const config = { + url: "http://localhost:8787/mcp", + requestInit: { + headers: { + Authorization: "Bearer local-access-token", + }, + }, + } as unknown as MCPServerConfig; + + await reconnectServer("convex_id_abc123", config, { + projectId: "project_xyz", + serverName: "mcpjam local", + }); + + const body = readBody(); + expect(body.projectId).toBeUndefined(); + expect(body.serverConfig).toBeDefined(); + expect(body.serverId).toBe("mcpjam local"); + }); + + it("testConnection still uses the resolver body when no local OAuth bearer is present", async () => { + const config = { + url: "http://localhost:8787/mcp", + } as unknown as MCPServerConfig; + + await testConnection(config, "convex_id_abc123", { + projectId: "project_xyz", + serverName: "mcpjam local", + }); + + const body = readBody(); + expect(body.projectId).toBe("project_xyz"); + expect(body.serverId).toBe("convex_id_abc123"); + expect(body.serverName).toBe("mcpjam local"); + expect(body.serverConfig).toBeUndefined(); + }); + + it("testConnection without options preserves the legacy 2-arg shape (serverId == display name)", async () => { + const config = { + url: "http://localhost:8787/mcp", + } as unknown as MCPServerConfig; + + await testConnection(config, "mcpjam local"); + + const body = readBody(); + expect(body.serverId).toBe("mcpjam local"); + expect(body.serverConfig).toBeDefined(); + expect(body.projectId).toBeUndefined(); + }); +}); diff --git a/mcpjam-inspector/client/src/state/mcp-api.ts b/mcpjam-inspector/client/src/state/mcp-api.ts index aa5f91b7b..d01613039 100644 --- a/mcpjam-inspector/client/src/state/mcp-api.ts +++ b/mcpjam-inspector/client/src/state/mcp-api.ts @@ -7,6 +7,7 @@ import { type HostedServerValidateResponse, } from "@/lib/apis/web/servers-api"; import { BootstrapNotReadyError } from "@/lib/app-ready"; +import type { ConnectionDefaults } from "@/shared/connection-defaults"; const HOSTED_VALIDATE_TIMEOUT_MS = 20_000; @@ -118,20 +119,90 @@ async function withTimeout( }); } + +function buildResolverBody( + serverId: string, + options: { + projectId: string; + serverName?: string; + connectionDefaults?: ConnectionDefaults; + }, +): Record { + return { + projectId: options.projectId, + serverId, + ...(options.serverName ? { serverName: options.serverName } : {}), + ...(options.connectionDefaults + ? { connectionDefaults: options.connectionDefaults } + : {}), + }; +} + +/** + * Local OAuth tokens currently live in localStorage (the local OAuth provider + * has not been moved to Convex), but the resolver path expects Convex to + * supply `oauthAccessToken` and rejects `useOAuth` servers without one. To + * avoid breaking synced local OAuth servers, fall back to the legacy + * `{serverConfig, serverId}` body whenever the runtime config carries a local + * `Authorization: Bearer …` header. The legacy path forwards the header + * straight through to the spawned client. + */ +function hasLocalOAuthBearer(serverConfig: MCPServerConfig): boolean { + const headers = (serverConfig as { requestInit?: { headers?: unknown } }) + ?.requestInit?.headers; + if (!headers || typeof headers !== "object") return false; + for (const [key, value] of Object.entries(headers as Record)) { + if ( + key.toLowerCase() === "authorization" && + typeof value === "string" && + value.toLowerCase().startsWith("bearer ") + ) { + return true; + } + } + return false; +} + export async function testConnection( serverConfig: MCPServerConfig, serverId: string, + options?: { + projectId?: string; + serverName?: string; + connectionDefaults?: ConnectionDefaults; + }, ) { if (HOSTED_MODE) { return safeValidateHostedServer(serverId, serverConfig); } + // When projectId is provided, the server resolves config + tokens from + // Convex via /web/authorize-batch-local. Without it (or when the runtime + // serverConfig carries a local OAuth bearer that Convex doesn't yet hold), + // fall back to the legacy {serverConfig, serverId} body so the local token + // travels with the request. + const useResolver = + !!options?.projectId && !hasLocalOAuthBearer(serverConfig); + // The legacy server-side path uses `serverId` as the mcpClientManager key. + // When the caller resolved a Convex `_id` for the resolver path but we end + // up taking the legacy fallback (local OAuth bearer present), prefer the + // display name so the manager doesn't end up with a duplicate entry keyed + // by the Convex `_id` alongside the existing display-name entry. + const legacyServerId = options?.serverName ?? serverId; + const body: Record = useResolver + ? buildResolverBody(serverId, { + projectId: options!.projectId!, + serverName: options?.serverName, + connectionDefaults: options?.connectionDefaults, + }) + : { serverConfig, serverId: legacyServerId }; + const res = await authFetchWithTimeout( "/api/mcp/connect", { method: "POST", headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ serverConfig, serverId }), + body: JSON.stringify(body), }, 20000, // 20 second timeout ); @@ -165,17 +236,35 @@ export async function listServers() { export async function reconnectServer( serverId: string, serverConfig: MCPServerConfig, + options?: { + projectId?: string; + serverName?: string; + connectionDefaults?: ConnectionDefaults; + }, ) { if (HOSTED_MODE) { return safeValidateHostedServer(serverId, serverConfig); } + const useResolver = + !!options?.projectId && !hasLocalOAuthBearer(serverConfig); + // See testConnection: prefer the display name for the legacy body so we + // don't create a phantom mcpClientManager entry keyed by the Convex `_id`. + const legacyServerId = options?.serverName ?? serverId; + const body: Record = useResolver + ? buildResolverBody(serverId, { + projectId: options!.projectId!, + serverName: options?.serverName, + connectionDefaults: options?.connectionDefaults, + }) + : { serverId: legacyServerId, serverConfig }; + const res = await authFetchWithTimeout( "/api/mcp/servers/reconnect", { method: "POST", headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ serverId, serverConfig }), + body: JSON.stringify(body), }, 20000, // 20 second timeout ); diff --git a/mcpjam-inspector/server/routes/mcp/__tests__/connect.test.ts b/mcpjam-inspector/server/routes/mcp/__tests__/connect.test.ts index ed076ae79..f0dbf8c3b 100644 --- a/mcpjam-inspector/server/routes/mcp/__tests__/connect.test.ts +++ b/mcpjam-inspector/server/routes/mcp/__tests__/connect.test.ts @@ -1,4 +1,4 @@ -import { describe, it, expect, vi, beforeEach } from "vitest"; +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; import type { Hono } from "hono"; import { createMockMcpClientManager, @@ -6,212 +6,433 @@ import { type MockMCPClientManager, } from "./helpers/index.js"; +const PROJECT_ID = "proj_test"; +const SERVER_ID = "srv_test"; +const SERVER_NAME = "test-server"; + +function authHeaders() { + return { + "Content-Type": "application/json", + Authorization: "Bearer guest-bearer-test", + }; +} + +function mockBatchAuthorizeFetch( + responseBody: unknown, + init?: { status?: number } +) { + const status = init?.status ?? 200; + vi.stubGlobal( + "fetch", + vi.fn(async () => { + return new Response(JSON.stringify(responseBody), { + status, + headers: { "Content-Type": "application/json" }, + }); + }) + ); +} + describe("POST /api/mcp/connect", () => { let mcpClientManager: MockMCPClientManager; let app: Hono; + const originalConvexUrl = process.env.CONVEX_HTTP_URL; beforeEach(() => { vi.clearAllMocks(); + process.env.CONVEX_HTTP_URL = "https://convex.example"; mcpClientManager = createMockMcpClientManager(); app = createTestApp(mcpClientManager, "connect"); }); + afterEach(() => { + vi.unstubAllGlobals(); + if (originalConvexUrl === undefined) delete process.env.CONVEX_HTTP_URL; + else process.env.CONVEX_HTTP_URL = originalConvexUrl; + }); + describe("validation", () => { - it("returns 400 when serverConfig is missing", async () => { + it("returns 400 when serverId is missing", async () => { const res = await app.request("/api/mcp/connect", { method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ serverId: "test-server" }), + headers: authHeaders(), + body: JSON.stringify({ projectId: PROJECT_ID }), }); expect(res.status).toBe(400); - const data = await res.json(); - expect(data.success).toBe(false); - expect(data.error).toBe("serverConfig is required"); + const data = (await res.json()) as { error?: string }; + expect(data.error).toBe("serverId is required"); }); - it("returns 400 when serverId is missing", async () => { + it("returns 400 when resolver-path body is missing serverName", async () => { + const res = await app.request("/api/mcp/connect", { + method: "POST", + headers: authHeaders(), + body: JSON.stringify({ projectId: PROJECT_ID, serverId: SERVER_ID }), + }); + + expect(res.status).toBe(400); + const data = (await res.json()) as { error?: string }; + expect(data.error).toBe("serverName is required with projectId"); + }); + + it("returns 401 when projectId set but Authorization bearer is missing", async () => { const res = await app.request("/api/mcp/connect", { method: "POST", headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ serverConfig: { command: "node" } }), + body: JSON.stringify({ + projectId: PROJECT_ID, + serverId: SERVER_ID, + serverName: SERVER_NAME, + }), + }); + + expect(res.status).toBe(401); + }); + + it("returns 400 when neither projectId nor serverConfig is provided", async () => { + const res = await app.request("/api/mcp/connect", { + method: "POST", + headers: authHeaders(), + body: JSON.stringify({ serverId: SERVER_ID }), }); expect(res.status).toBe(400); - const data = await res.json(); - expect(data.success).toBe(false); - expect(data.error).toBe("serverId is required"); + const data = (await res.json()) as { error?: string }; + expect(data.error).toBe("serverConfig is required"); }); it("returns 400 when request body is invalid JSON", async () => { const res = await app.request("/api/mcp/connect", { method: "POST", - headers: { "Content-Type": "application/json" }, + headers: authHeaders(), body: "invalid-json", }); expect(res.status).toBe(400); - const data = await res.json(); - expect(data.success).toBe(false); - expect(data.error).toBe("Failed to parse request body"); }); }); - describe("STDIO connection", () => { - it("connects successfully with command config", async () => { + describe("legacy {serverConfig, serverId} body shape (transitional)", () => { + it("connects with legacy STDIO serverConfig", async () => { const res = await app.request("/api/mcp/connect", { method: "POST", headers: { "Content-Type": "application/json" }, body: JSON.stringify({ - serverId: "test-server", - serverConfig: { - command: "node", - args: ["server.js"], - }, + serverId: SERVER_ID, + serverConfig: { command: "node", args: ["server.js"] }, }), }); expect(res.status).toBe(200); - const data = await res.json(); - expect(data.success).toBe(true); - expect(data.status).toBe("connected"); - - // Verify MCPClientManager was called - expect(mcpClientManager.disconnectServer).toHaveBeenCalledWith( - "test-server", - ); expect(mcpClientManager.connectToServer).toHaveBeenCalledWith( - "test-server", - { command: "node", args: ["server.js"] }, + SERVER_ID, + { command: "node", args: ["server.js"] } ); }); - }); - describe("HTTP/SSE connection", () => { - it("connects successfully with URL string config", async () => { + it("connects with legacy HTTP serverConfig (URL string)", async () => { const res = await app.request("/api/mcp/connect", { method: "POST", headers: { "Content-Type": "application/json" }, body: JSON.stringify({ - serverId: "http-server", - serverConfig: { - url: "http://localhost:3000/mcp", - }, + serverId: SERVER_ID, + serverConfig: { url: "http://localhost:3000/mcp" }, }), }); expect(res.status).toBe(200); - const data = await res.json(); - expect(data.success).toBe(true); - expect(data.status).toBe("connected"); - - // Verify URL was converted to URL object - expect(mcpClientManager.connectToServer).toHaveBeenCalledWith( - "http-server", - expect.objectContaining({ - url: expect.any(URL), - }), - ); - const callArgs = mcpClientManager.connectToServer.mock.calls[0][1]; expect(callArgs.url.href).toBe("http://localhost:3000/mcp"); }); + }); + + describe("STDIO connection", () => { + it("resolves STDIO config from Convex and connects", async () => { + mockBatchAuthorizeFetch({ + results: { + [SERVER_ID]: { + ok: true, + role: "owner", + accessLevel: "project_member", + permissions: { chatOnly: false }, + serverConfig: { + transportType: "stdio", + command: "node", + args: ["server.js"], + env: { FOO: "bar" }, + }, + }, + }, + }); - it("connects successfully with URL object config (href)", async () => { const res = await app.request("/api/mcp/connect", { method: "POST", - headers: { "Content-Type": "application/json" }, + headers: authHeaders(), body: JSON.stringify({ - serverId: "http-server", - serverConfig: { - url: { href: "http://localhost:4000/api" }, - }, + projectId: PROJECT_ID, + serverId: SERVER_ID, + serverName: SERVER_NAME, }), }); expect(res.status).toBe(200); - const data = await res.json(); + const data = (await res.json()) as { success: boolean; status: string }; expect(data.success).toBe(true); + expect(data.status).toBe("connected"); - const callArgs = mcpClientManager.connectToServer.mock.calls[0][1]; - expect(callArgs.url.href).toBe("http://localhost:4000/api"); + // Manager is keyed by display name, not the Convex serverId — the rest + // of the local API surface (tools list/execute, status) passes display + // names from the UI. + expect(mcpClientManager.disconnectServer).toHaveBeenCalledWith( + SERVER_NAME + ); + const callArgs = mcpClientManager.connectToServer.mock.calls[0]; + expect(callArgs[0]).toBe(SERVER_NAME); + expect(callArgs[1]).toMatchObject({ + command: "node", + args: ["server.js"], + env: { FOO: "bar" }, + }); }); }); - describe("connection errors", () => { - it("returns 500 when connection fails", async () => { - mcpClientManager.connectToServer.mockRejectedValue( - new Error("Connection refused"), - ); + describe("HTTP connection", () => { + it("resolves HTTP config and attaches OAuth bearer when present", async () => { + mockBatchAuthorizeFetch({ + results: { + [SERVER_ID]: { + ok: true, + role: "owner", + accessLevel: "project_member", + permissions: { chatOnly: false }, + serverConfig: { + transportType: "http", + url: "http://localhost:3000/mcp", + headers: { "X-Foo": "bar" }, + useOAuth: true, + }, + oauthAccessToken: "oauth-token-123", + }, + }, + }); const res = await app.request("/api/mcp/connect", { method: "POST", - headers: { "Content-Type": "application/json" }, + headers: authHeaders(), body: JSON.stringify({ - serverId: "failing-server", - serverConfig: { command: "nonexistent" }, + projectId: PROJECT_ID, + serverId: SERVER_ID, + serverName: SERVER_NAME, }), }); - expect(res.status).toBe(500); - const data = await res.json(); - expect(data.success).toBe(false); - expect(data.error).toContain( - "Connection failed for server failing-server", - ); - expect(data.details).toBe("Connection refused"); - expect(mcpClientManager.removeServer).toHaveBeenCalledWith( - "failing-server", - ); + expect(res.status).toBe(200); + const callArgs = mcpClientManager.connectToServer.mock.calls[0]; + expect(callArgs[0]).toBe(SERVER_NAME); + // Resolver wraps the URL string in a URL object to match the legacy + // connect path's shape (`new URL(...)`), so assert against `.href`. + expect(callArgs[1].url).toBeInstanceOf(URL); + expect(callArgs[1].url.href).toBe("http://localhost:3000/mcp"); + // OAuth bearer merged into requestInit.headers + expect(callArgs[1].requestInit.headers).toMatchObject({ + "X-Foo": "bar", + Authorization: "Bearer oauth-token-123", + }); }); - it("disconnects existing connection before reconnecting", async () => { + it("merges connectionDefaults (project headers, timeout, capabilities) onto resolved config", async () => { + mockBatchAuthorizeFetch({ + results: { + [SERVER_ID]: { + ok: true, + role: "owner", + accessLevel: "project_member", + permissions: { chatOnly: false }, + serverConfig: { + transportType: "http", + url: "http://localhost:3000/mcp", + headers: { "X-Server-Default": "from-convex" }, + useOAuth: false, + }, + }, + }, + }); + const res = await app.request("/api/mcp/connect", { method: "POST", - headers: { "Content-Type": "application/json" }, + headers: authHeaders(), body: JSON.stringify({ - serverId: "existing-server", - serverConfig: { command: "node" }, + projectId: PROJECT_ID, + serverId: SERVER_ID, + serverName: SERVER_NAME, + connectionDefaults: { + headers: { + "X-Project-Default": "from-runtime", + "X-Server-Default": "overridden-by-runtime", + }, + timeoutMs: 12345, + clientCapabilities: { sampling: { strategy: "auto" } }, + }, }), }); expect(res.status).toBe(200); + const callArgs = mcpClientManager.connectToServer.mock.calls[0]; + const cfg = callArgs[1]; + // Project-default headers overlay Convex-stored server headers; the + // server-default value is overridden by the runtime overlay. + expect(cfg.requestInit.headers).toMatchObject({ + "X-Project-Default": "from-runtime", + "X-Server-Default": "overridden-by-runtime", + }); + expect(cfg.timeout).toBe(12345); + expect(cfg.clientCapabilities).toEqual({ + sampling: { strategy: "auto" }, + }); + }); - // Verify disconnect was called before connect - const disconnectOrder = - mcpClientManager.disconnectServer.mock.invocationCallOrder[0]; - const connectOrder = - mcpClientManager.connectToServer.mock.invocationCallOrder[0]; - expect(disconnectOrder).toBeLessThan(connectOrder); + it("returns 401 when server requires OAuth but no token resolved", async () => { + mockBatchAuthorizeFetch({ + results: { + [SERVER_ID]: { + ok: true, + role: "owner", + accessLevel: "project_member", + permissions: { chatOnly: false }, + serverConfig: { + transportType: "http", + url: "http://localhost:3000/mcp", + headers: {}, + useOAuth: true, + }, + // no oauthAccessToken + }, + }, + }); + + const res = await app.request("/api/mcp/connect", { + method: "POST", + headers: authHeaders(), + body: JSON.stringify({ projectId: PROJECT_ID, serverId: SERVER_ID, serverName: SERVER_NAME }), + }); + + expect(res.status).toBe(401); + const data = (await res.json()) as { oauthRequired?: boolean }; + expect(data.oauthRequired).toBe(true); }); }); - describe("edge cases", () => { - it("handles empty serverConfig gracefully", async () => { + describe("authorization failures", () => { + it("propagates 403 from Convex when actor lacks access", async () => { + mockBatchAuthorizeFetch({ + results: { + [SERVER_ID]: { + ok: false, + status: 403, + code: "FORBIDDEN", + message: "Not a member of project", + }, + }, + }); + const res = await app.request("/api/mcp/connect", { method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ - serverId: "test", - serverConfig: {}, - }), + headers: authHeaders(), + body: JSON.stringify({ projectId: PROJECT_ID, serverId: SERVER_ID, serverName: SERVER_NAME }), }); - expect(res.status).toBe(200); - expect(mcpClientManager.connectToServer).toHaveBeenCalledWith("test", {}); + expect(res.status).toBe(403); }); - it("handles serverConfig with undefined url", async () => { + it("returns 502 when Convex is unreachable", async () => { + vi.stubGlobal( + "fetch", + vi.fn(async () => { + throw new Error("network down"); + }) + ); + const res = await app.request("/api/mcp/connect", { method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ - serverId: "test", - serverConfig: { url: undefined, command: "node" }, - }), + headers: authHeaders(), + body: JSON.stringify({ projectId: PROJECT_ID, serverId: SERVER_ID, serverName: SERVER_NAME }), + }); + + expect(res.status).toBe(502); + }); + }); + + describe("connection errors", () => { + it("returns 500 and removes server when manager.connectToServer throws", async () => { + mockBatchAuthorizeFetch({ + results: { + [SERVER_ID]: { + ok: true, + role: "owner", + accessLevel: "project_member", + permissions: { chatOnly: false }, + serverConfig: { + transportType: "stdio", + command: "nonexistent", + args: [], + env: {}, + }, + }, + }, + }); + mcpClientManager.connectToServer.mockRejectedValue( + new Error("Connection refused") + ); + + const res = await app.request("/api/mcp/connect", { + method: "POST", + headers: authHeaders(), + body: JSON.stringify({ projectId: PROJECT_ID, serverId: SERVER_ID, serverName: SERVER_NAME }), + }); + + expect(res.status).toBe(500); + const data = (await res.json()) as { + error?: string; + details?: string; + }; + expect(data.error).toContain( + `Connection failed for server ${SERVER_NAME}` + ); + expect(data.details).toBe("Connection refused"); + expect(mcpClientManager.removeServer).toHaveBeenCalledWith(SERVER_NAME); + }); + + it("disconnects existing connection before reconnecting", async () => { + mockBatchAuthorizeFetch({ + results: { + [SERVER_ID]: { + ok: true, + role: "owner", + accessLevel: "project_member", + permissions: { chatOnly: false }, + serverConfig: { + transportType: "stdio", + command: "node", + args: [], + env: {}, + }, + }, + }, + }); + + const res = await app.request("/api/mcp/connect", { + method: "POST", + headers: authHeaders(), + body: JSON.stringify({ projectId: PROJECT_ID, serverId: SERVER_ID, serverName: SERVER_NAME }), }); expect(res.status).toBe(200); + const disconnectOrder = + mcpClientManager.disconnectServer.mock.invocationCallOrder[0]; + const connectOrder = + mcpClientManager.connectToServer.mock.invocationCallOrder[0]; + expect(disconnectOrder).toBeLessThan(connectOrder); }); }); }); diff --git a/mcpjam-inspector/server/routes/mcp/__tests__/servers.test.ts b/mcpjam-inspector/server/routes/mcp/__tests__/servers.test.ts index f6fe61df9..273f6fccd 100644 --- a/mcpjam-inspector/server/routes/mcp/__tests__/servers.test.ts +++ b/mcpjam-inspector/server/routes/mcp/__tests__/servers.test.ts @@ -1,4 +1,4 @@ -import { describe, it, expect, vi, beforeEach } from "vitest"; +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; import { Hono } from "hono"; import servers from "../servers.js"; @@ -266,135 +266,282 @@ describe("DELETE /api/mcp/servers/:serverId", () => { describe("POST /api/mcp/servers/reconnect", () => { let mcpClientManager: ReturnType; let app: Hono; + const originalConvexUrl = process.env.CONVEX_HTTP_URL; + const RECONNECT_PROJECT_ID = "proj_reconnect"; + const RECONNECT_SERVER_ID = "srv_doc_id"; + const RECONNECT_SERVER_NAME = "server-1"; + + function reconnectAuthHeaders() { + return { + "Content-Type": "application/json", + Authorization: "Bearer guest-bearer-test", + }; + } + + function mockBatchAuthorize(responseBody: unknown, init?: { status?: number }) { + const status = init?.status ?? 200; + vi.stubGlobal( + "fetch", + vi.fn(async () => { + return new Response(JSON.stringify(responseBody), { + status, + headers: { "Content-Type": "application/json" }, + }); + }) + ); + } beforeEach(() => { vi.clearAllMocks(); + process.env.CONVEX_HTTP_URL = "https://convex.example"; mcpClientManager = createMockMcpClientManager(); app = createApp(mcpClientManager); }); + afterEach(() => { + vi.unstubAllGlobals(); + if (originalConvexUrl === undefined) delete process.env.CONVEX_HTTP_URL; + else process.env.CONVEX_HTTP_URL = originalConvexUrl; + }); + describe("validation", () => { it("returns 400 when serverId is missing", async () => { const res = await app.request("/api/mcp/servers/reconnect", { method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ serverConfig: { command: "node" } }), + headers: reconnectAuthHeaders(), + body: JSON.stringify({}), }); expect(res.status).toBe(400); - const data = await res.json(); - expect(data.success).toBe(false); - expect(data.error).toBe("serverId and serverConfig are required"); }); - it("returns 400 when serverConfig is missing", async () => { + it("returns 400 when resolver-path body is missing serverName", async () => { + const res = await app.request("/api/mcp/servers/reconnect", { + method: "POST", + headers: reconnectAuthHeaders(), + body: JSON.stringify({ + projectId: RECONNECT_PROJECT_ID, + serverId: RECONNECT_SERVER_ID, + }), + }); + + expect(res.status).toBe(400); + const data = (await res.json()) as { error?: string }; + expect(data.error).toBe("serverName is required with projectId"); + }); + + it("returns 401 when projectId set but Authorization bearer is missing", async () => { + const res = await app.request("/api/mcp/servers/reconnect", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ + projectId: RECONNECT_PROJECT_ID, + serverId: RECONNECT_SERVER_ID, + serverName: RECONNECT_SERVER_NAME, + }), + }); + + expect(res.status).toBe(401); + }); + + it("returns 400 when neither projectId nor serverConfig is provided", async () => { const res = await app.request("/api/mcp/servers/reconnect", { method: "POST", headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ serverId: "server-1" }), + body: JSON.stringify({ serverId: RECONNECT_SERVER_ID }), }); expect(res.status).toBe(400); - const data = await res.json(); - expect(data.success).toBe(false); + const data = (await res.json()) as { error?: string }; + expect(data.error).toBe("serverId and serverConfig are required"); }); }); - describe("success cases", () => { - it("reconnects successfully with STDIO config", async () => { + describe("legacy {serverConfig} body shape (transitional)", () => { + it("reconnects with legacy STDIO serverConfig", async () => { const res = await app.request("/api/mcp/servers/reconnect", { method: "POST", headers: { "Content-Type": "application/json" }, body: JSON.stringify({ - serverId: "server-1", + serverId: RECONNECT_SERVER_ID, serverConfig: { command: "node", args: ["server.js"] }, }), }); expect(res.status).toBe(200); - const data = await res.json(); - expect(data.success).toBe(true); - expect(data.serverId).toBe("server-1"); - expect(data.status).toBe("connected"); - expect(data.message).toContain("Reconnected to server"); - - // Verify disconnect was called before connect - expect(mcpClientManager.disconnectServer).toHaveBeenCalledWith( - "server-1", - ); - expect(mcpClientManager.connectToServer).toHaveBeenCalledWith( - "server-1", - { command: "node", args: ["server.js"] }, - ); + const callArgs = mcpClientManager.connectToServer.mock.calls[0]; + expect(callArgs[1]).toMatchObject({ + command: "node", + args: ["server.js"], + }); }); + }); + + describe("success cases", () => { + it("reconnects successfully with STDIO config from Convex", async () => { + mockBatchAuthorize({ + results: { + [RECONNECT_SERVER_ID]: { + ok: true, + role: "owner", + accessLevel: "project_member", + permissions: { chatOnly: false }, + serverConfig: { + transportType: "stdio", + command: "node", + args: ["server.js"], + env: {}, + }, + }, + }, + }); - it("passes URL string as-is", async () => { const res = await app.request("/api/mcp/servers/reconnect", { method: "POST", - headers: { "Content-Type": "application/json" }, + headers: reconnectAuthHeaders(), body: JSON.stringify({ - serverId: "http-server", - serverConfig: { url: "http://localhost:3000/mcp" }, + projectId: RECONNECT_PROJECT_ID, + serverId: RECONNECT_SERVER_ID, + serverName: RECONNECT_SERVER_NAME, }), }); expect(res.status).toBe(200); - const callArgs = mcpClientManager.connectToServer.mock.calls[0][1]; - expect(callArgs.url).toBe("http://localhost:3000/mcp"); + const data = (await res.json()) as { + success: boolean; + serverId: string; + status: string; + message: string; + }; + expect(data.success).toBe(true); + // Response echoes the display name — that's what the rest of the local + // API surface uses as the manager key. + expect(data.serverId).toBe(RECONNECT_SERVER_NAME); + expect(data.status).toBe("connected"); + expect(data.message).toContain("Reconnected to server"); + + expect(mcpClientManager.disconnectServer).toHaveBeenCalledWith( + RECONNECT_SERVER_NAME + ); + const callArgs = mcpClientManager.connectToServer.mock.calls[0]; + expect(callArgs[0]).toBe(RECONNECT_SERVER_NAME); + expect(callArgs[1]).toMatchObject({ + command: "node", + args: ["server.js"], + }); }); - it("normalizes URL object with href property to string", async () => { + it("reconnects successfully with HTTP config from Convex", async () => { + mockBatchAuthorize({ + results: { + "http-server": { + ok: true, + role: "owner", + accessLevel: "project_member", + permissions: { chatOnly: false }, + serverConfig: { + transportType: "http", + url: "http://localhost:3000/mcp", + headers: {}, + }, + }, + }, + }); + const res = await app.request("/api/mcp/servers/reconnect", { method: "POST", - headers: { "Content-Type": "application/json" }, + headers: reconnectAuthHeaders(), body: JSON.stringify({ + projectId: RECONNECT_PROJECT_ID, serverId: "http-server", - serverConfig: { url: { href: "http://localhost:4000/api" } }, + serverName: "http-server-display", }), }); expect(res.status).toBe(200); - const callArgs = mcpClientManager.connectToServer.mock.calls[0][1]; - expect(callArgs.url).toBe("http://localhost:4000/api"); + const callArgs = mcpClientManager.connectToServer.mock.calls[0]; + expect(callArgs[0]).toBe("http-server-display"); + // Resolver wraps the URL string in a URL object to match the legacy + // connect path's shape (`new URL(...)`), so assert against `.href`. + expect(callArgs[1].url).toBeInstanceOf(URL); + expect(callArgs[1].url.href).toBe("http://localhost:3000/mcp"); }); }); describe("reconnection failures", () => { - it("returns error when reconnection fails to establish connection", async () => { + it("returns success:false when reconnection fails to establish connection", async () => { + mockBatchAuthorize({ + results: { + [RECONNECT_SERVER_ID]: { + ok: true, + role: "owner", + accessLevel: "project_member", + permissions: { chatOnly: false }, + serverConfig: { + transportType: "stdio", + command: "node", + args: [], + env: {}, + }, + }, + }, + }); mcpClientManager.getConnectionStatus.mockReturnValue("failed"); const res = await app.request("/api/mcp/servers/reconnect", { method: "POST", - headers: { "Content-Type": "application/json" }, + headers: reconnectAuthHeaders(), body: JSON.stringify({ - serverId: "server-1", - serverConfig: { command: "node" }, + projectId: RECONNECT_PROJECT_ID, + serverId: RECONNECT_SERVER_ID, + serverName: RECONNECT_SERVER_NAME, }), }); expect(res.status).toBe(200); // Route returns 200 but success: false - const data = await res.json(); + const data = (await res.json()) as { + success: boolean; + status: string; + error?: string; + }; expect(data.success).toBe(false); expect(data.status).toBe("failed"); expect(data.error).toContain("failed"); }); it("returns 500 when connectToServer throws", async () => { + mockBatchAuthorize({ + results: { + [RECONNECT_SERVER_ID]: { + ok: true, + role: "owner", + accessLevel: "project_member", + permissions: { chatOnly: false }, + serverConfig: { + transportType: "stdio", + command: "node", + args: [], + env: {}, + }, + }, + }, + }); mcpClientManager.connectToServer.mockRejectedValue( - new Error("Connection refused"), + new Error("Connection refused") ); const res = await app.request("/api/mcp/servers/reconnect", { method: "POST", - headers: { "Content-Type": "application/json" }, + headers: reconnectAuthHeaders(), body: JSON.stringify({ - serverId: "server-1", - serverConfig: { command: "node" }, + projectId: RECONNECT_PROJECT_ID, + serverId: RECONNECT_SERVER_ID, + serverName: RECONNECT_SERVER_NAME, }), }); expect(res.status).toBe(500); - const data = await res.json(); + const data = (await res.json()) as { success: boolean; error: string }; expect(data.success).toBe(false); expect(data.error).toBe("Connection refused"); }); diff --git a/mcpjam-inspector/server/routes/mcp/connect.ts b/mcpjam-inspector/server/routes/mcp/connect.ts index 46319a27c..6bf4e27d9 100644 --- a/mcpjam-inspector/server/routes/mcp/connect.ts +++ b/mcpjam-inspector/server/routes/mcp/connect.ts @@ -1,34 +1,168 @@ import { Hono } from "hono"; import "../../types/hono"; // Type extensions import { HOSTED_MODE } from "../../config"; +import { logger } from "../../utils/logger"; +import { + parseConnectionDefaults, + readLocalApiBearer, + resolveLocalServerForConnect, +} from "../../utils/local-server-resolver.js"; +import { WebRouteError } from "../web/errors.js"; const connect = new Hono(); connect.post("/", async (c) => { + let body: any; try { - const { serverConfig, serverId } = await c.req.json(); + body = await c.req.json(); + } catch (error) { + return c.json( + { + success: false, + error: "Failed to parse request body", + details: error instanceof Error ? error.message : "Unknown error", + }, + 400, + ); + } + + const serverId = + typeof body?.serverId === "string" ? body.serverId.trim() : ""; + if (!serverId) { + return c.json( + { success: false, error: "serverId is required" }, + 400, + ); + } - if (!serverConfig) { + // New shape: {projectId, serverId}. Server resolves config + tokens from + // Convex via /web/authorize-batch-local. Mirrors hosted PR #2024. + const projectId = + typeof body?.projectId === "string" ? body.projectId.trim() : ""; + const useResolverPath = projectId.length > 0; + + const mcpClientManager = c.mcpClientManager; + + if (useResolverPath) { + // The MCP client manager is keyed by display name across the local API + // surface (tools list/execute, status, etc. all pass display names from + // the UI). The Convex `serverId` is used only as the lookup key into + // /web/authorize-batch-local and is never used as a manager key. + const serverDisplayName = + typeof body?.serverName === "string" ? body.serverName.trim() : ""; + if (!serverDisplayName) { + return c.json( + { success: false, error: "serverName is required with projectId" }, + 400, + ); + } + const bearer = readLocalApiBearer(c); + if (!bearer) { + return c.json( + { success: false, error: "Authorization bearer token is required" }, + 401, + ); + } + + let resolved; + try { + resolved = await resolveLocalServerForConnect( + c, + bearer, + projectId, + serverId, + { + serverDisplayName, + clientCapabilities: + typeof body?.clientCapabilities === "object" && + body.clientCapabilities !== null + ? body.clientCapabilities + : undefined, + defaults: parseConnectionDefaults(body?.connectionDefaults), + }, + ); + } catch (error) { + if (error instanceof WebRouteError) { + // OAuth-required 401s aren't a session-auth failure — the actor is + // signed in, the server just needs the user to complete its OAuth + // flow. Tag the response so authFetch doesn't waste a guest-session + // refresh round-trip that would inevitably hit the same 401. + if (error.details?.oauthRequired === true) { + c.header("X-MCP-Auth-Required", "oauth"); + } + return c.json( + { + success: false, + error: error.message, + ...(error.details ?? {}), + }, + error.status as any, + ); + } return c.json( { success: false, - error: "serverConfig is required", + error: "Failed to resolve server config", + details: error instanceof Error ? error.message : "Unknown error", }, - 400, + 500, ); } - if (!serverId) { + try { + // First connect can have nothing to disconnect; treat that as non-fatal + // so the path doesn't 500 before connectToServer runs. Mirrors the + // /servers/reconnect handler's tolerance. + try { + await mcpClientManager.disconnectServer(serverDisplayName); + } catch (disconnectError) { + logger.debug("Failed to disconnect MCP server before connect", { + serverId: serverDisplayName, + error: + disconnectError instanceof Error + ? disconnectError.message + : String(disconnectError), + }); + } + await mcpClientManager.connectToServer(serverDisplayName, resolved.config); + return c.json({ success: true, status: "connected" }); + } catch (error) { + try { + await mcpClientManager.removeServer(serverDisplayName); + } catch (cleanupError) { + logger.debug("Failed to remove MCP server after connection failure", { + serverId: serverDisplayName, + error: + cleanupError instanceof Error + ? cleanupError.message + : String(cleanupError), + }); + } return c.json( { success: false, - error: "serverId is required", + error: `Connection failed for server ${serverDisplayName}: ${error instanceof Error ? error.message : "Unknown error"}`, + details: error instanceof Error ? error.message : "Unknown error", }, - 400, + 500, ); } + } + + // Legacy shape: {serverConfig, serverId}. Retained during the Phase 5–7 + // client migration so existing inspector builds keep working until clients + // are flipped to the new {projectId, serverId} shape. To remove: drop this + // branch and update connect.test.ts to remove the legacy assertions. + const { serverConfig } = body; + if (!serverConfig) { + return c.json( + { success: false, error: "serverConfig is required" }, + 400, + ); + } - if (serverConfig.url) { + if (serverConfig.url) { + try { if (typeof serverConfig.url === "string") { serverConfig.url = new URL(serverConfig.url); } else if ( @@ -37,69 +171,69 @@ connect.post("/", async (c) => { ) { serverConfig.url = new URL(serverConfig.url.href); } + } catch { + return c.json( + { success: false, error: "Invalid server URL" }, + 400, + ); } + } + + // Block STDIO connections in hosted mode (security: prevents RCE) + if (HOSTED_MODE && serverConfig.command) { + return c.json( + { success: false, error: "STDIO transport is disabled in the web app" }, + 403, + ); + } - // Block STDIO connections in hosted mode (security: prevents RCE) - if (HOSTED_MODE && serverConfig.command) { + // Enforce HTTPS in hosted mode + if (HOSTED_MODE && serverConfig.url) { + if (serverConfig.url.protocol !== "https:") { return c.json( { success: false, - error: "STDIO transport is disabled in the web app", + error: + "HTTPS is required in the web app. Please use an https:// URL.", }, - 403, + 400, ); } + } - // Enforce HTTPS in hosted mode - if (HOSTED_MODE && serverConfig.url) { - if (serverConfig.url.protocol !== "https:") { - return c.json( - { - success: false, - error: - "HTTPS is required in the web app. Please use an https:// URL.", - }, - 400, - ); - } - } - - const mcpClientManager = c.mcpClientManager; + try { try { - // Disconnect first if already connected to avoid "already connected" errors await mcpClientManager.disconnectServer(serverId); - await mcpClientManager.connectToServer(serverId, serverConfig); - return c.json({ - success: true, - status: "connected", + } catch (disconnectError) { + logger.debug("Failed to disconnect MCP server before connect", { + serverId, + error: + disconnectError instanceof Error + ? disconnectError.message + : String(disconnectError), }); - } catch (error) { - try { - await mcpClientManager.removeServer(serverId); - } catch (cleanupError) { - console.debug( - `Failed to remove MCP server ${serverId} after connection failure`, - cleanupError, - ); - } - - return c.json( - { - success: false, - error: `Connection failed for server ${serverId}: ${error instanceof Error ? error.message : "Unknown error"}`, - details: error instanceof Error ? error.message : "Unknown error", - }, - 500, - ); } + await mcpClientManager.connectToServer(serverId, serverConfig); + return c.json({ success: true, status: "connected" }); } catch (error) { + try { + await mcpClientManager.removeServer(serverId); + } catch (cleanupError) { + logger.debug("Failed to remove MCP server after connection failure", { + serverId, + error: + cleanupError instanceof Error + ? cleanupError.message + : String(cleanupError), + }); + } return c.json( { success: false, - error: "Failed to parse request body", + error: `Connection failed for server ${serverId}: ${error instanceof Error ? error.message : "Unknown error"}`, details: error instanceof Error ? error.message : "Unknown error", }, - 400, + 500, ); } }); diff --git a/mcpjam-inspector/server/routes/mcp/servers.ts b/mcpjam-inspector/server/routes/mcp/servers.ts index a1245b3c9..da4ca2745 100644 --- a/mcpjam-inspector/server/routes/mcp/servers.ts +++ b/mcpjam-inspector/server/routes/mcp/servers.ts @@ -1,9 +1,14 @@ import { Hono } from "hono"; -import type { MCPServerConfig } from "@mcpjam/sdk"; import "../../types/hono"; // Type extensions +import { HOSTED_MODE } from "../../config"; import { rpcLogBus, type RpcLogEvent } from "../../services/rpc-log-bus"; import { logger } from "../../utils/logger"; -import { HOSTED_MODE } from "../../config"; +import { + parseConnectionDefaults, + readLocalApiBearer, + resolveLocalServerForConnect, +} from "../../utils/local-server-resolver.js"; +import { WebRouteError } from "../web/errors.js"; const servers = new Hono(); @@ -114,10 +119,10 @@ servers.delete("/:serverId", async (c) => { } } catch (error) { // Ignore disconnect errors for already disconnected servers - console.debug( - `Failed to disconnect MCP server ${serverId} during removal`, - error, - ); + logger.debug("Failed to disconnect MCP server during removal", { + serverId, + error: error instanceof Error ? error.message : String(error), + }); } mcpClientManager.removeServer(serverId); @@ -138,39 +143,123 @@ servers.delete("/:serverId", async (c) => { } }); -// Reconnect to a server +// Reconnect to a server. New shape sends {projectId, serverId}; the local +// Hono server resolves the config from Convex. Legacy {serverConfig} bodies +// remain accepted during the Phase 5–7 client migration so existing inspector +// builds keep working. servers.post("/reconnect", async (c) => { - let serverId: string | undefined; + let body: any; try { - const body = (await c.req.json()) as { - serverId?: string; - serverConfig?: MCPServerConfig; - }; + body = await c.req.json(); + } catch (error) { + return c.json( + { + success: false, + error: "Failed to parse request body", + details: error instanceof Error ? error.message : "Unknown error", + }, + 400, + ); + } + + const serverId = + typeof body?.serverId === "string" ? body.serverId.trim() : ""; + if (!serverId) { + return c.json( + { success: false, error: "serverId is required" }, + 400, + ); + } + + const projectId = + typeof body?.projectId === "string" ? body.projectId.trim() : ""; + const useResolverPath = projectId.length > 0; + const mcpClientManager = c.mcpClientManager; - serverId = body.serverId; - const serverConfig = body.serverConfig; + let normalizedConfig: import("@mcpjam/sdk").MCPServerConfig | null = null; + // The MCP client manager is keyed by display name. In the resolver path + // `serverId` carries the Convex document id (used only for the lookup); + // in the legacy path it already is the display name. + let managerKey = serverId; - if (!serverId || !serverConfig) { + if (useResolverPath) { + const serverDisplayName = + typeof body?.serverName === "string" ? body.serverName.trim() : ""; + if (!serverDisplayName) { + return c.json( + { success: false, error: "serverName is required with projectId" }, + 400, + ); + } + const bearer = readLocalApiBearer(c); + if (!bearer) { + return c.json( + { success: false, error: "Authorization bearer token is required" }, + 401, + ); + } + + try { + const resolved = await resolveLocalServerForConnect( + c, + bearer, + projectId, + serverId, + { + serverDisplayName, + clientCapabilities: + typeof body?.clientCapabilities === "object" && + body.clientCapabilities !== null + ? body.clientCapabilities + : undefined, + defaults: parseConnectionDefaults(body?.connectionDefaults), + }, + ); + normalizedConfig = resolved.config; + managerKey = serverDisplayName; + } catch (error) { + if (error instanceof WebRouteError) { + // See connect.ts — same rationale: an OAuth-required 401 means the + // server demands the user complete its OAuth flow, not that the + // session bearer is invalid. Skip the guest-refresh retry. + if (error.details?.oauthRequired === true) { + c.header("X-MCP-Auth-Required", "oauth"); + } + return c.json( + { + success: false, + error: error.message, + ...(error.details ?? {}), + }, + error.status as any, + ); + } + logger.error("Error resolving server config for reconnect", error, { + serverId, + }); return c.json( { success: false, - error: "serverId and serverConfig are required", + error: error instanceof Error ? error.message : "Unknown error", }, + 500, + ); + } + } else { + // Legacy path during transition. + const serverConfig = body?.serverConfig; + if (!serverConfig) { + return c.json( + { success: false, error: "serverId and serverConfig are required" }, 400, ); } - const mcpClientManager = c.mcpClientManager; - - const normalizedConfig: MCPServerConfig = { ...serverConfig }; - if ( - "url" in normalizedConfig && - normalizedConfig.url !== undefined && - normalizedConfig.url !== null - ) { - const urlValue = normalizedConfig.url as unknown; + const cfg: any = { ...serverConfig }; + if ("url" in cfg && cfg.url !== undefined && cfg.url !== null) { + const urlValue = cfg.url as unknown; if (typeof urlValue === "string") { - normalizedConfig.url = urlValue; + cfg.url = urlValue; } else if (urlValue instanceof URL) { // already normalized } else if ( @@ -179,26 +268,32 @@ servers.post("/reconnect", async (c) => { "href" in (urlValue as Record) && typeof (urlValue as { href?: unknown }).href === "string" ) { - normalizedConfig.url = new URL( - (urlValue as { href: string }).href, - ).toString(); + cfg.url = new URL((urlValue as { href: string }).href).toString(); } } - // Block STDIO connections in hosted mode - if (HOSTED_MODE && normalizedConfig.command) { + // Defense-in-depth: mirror the connect.ts legacy guards. The /api/mcp/* + // middleware in app.ts blocks hosted traffic to this endpoint with 410, + // but keep these checks aligned across the two routes so a future + // middleware change can't open a hole on one and not the other. + if (HOSTED_MODE && cfg.command) { return c.json( - { - success: false, - error: "STDIO transport is disabled in the web app", - }, + { success: false, error: "STDIO transport is disabled in the web app" }, 403, ); } - - // Enforce HTTPS in hosted mode - if (HOSTED_MODE && normalizedConfig.url) { - if (new URL(normalizedConfig.url).protocol !== "https:") { + if (HOSTED_MODE && cfg.url) { + let urlForCheck: URL; + try { + urlForCheck = + cfg.url instanceof URL ? cfg.url : new URL(String(cfg.url)); + } catch { + return c.json( + { success: false, error: "Invalid server URL" }, + 400, + ); + } + if (urlForCheck.protocol !== "https:") { return c.json( { success: false, @@ -210,25 +305,44 @@ servers.post("/reconnect", async (c) => { } } - await mcpClientManager.disconnectServer(serverId); - await mcpClientManager.connectToServer(serverId, normalizedConfig); + normalizedConfig = cfg; + } - const status = mcpClientManager.getConnectionStatus(serverId); + try { + // A stale or already-disconnected entry shouldn't fail reconnect; the + // DELETE handler in this file uses the same tolerance. + try { + await mcpClientManager.disconnectServer(managerKey); + } catch (disconnectError) { + logger.debug("Failed to disconnect MCP server before reconnect", { + serverId: managerKey, + error: + disconnectError instanceof Error + ? disconnectError.message + : String(disconnectError), + }); + } + await mcpClientManager.connectToServer( + managerKey, + normalizedConfig as import("@mcpjam/sdk").MCPServerConfig, + ); + + const status = mcpClientManager.getConnectionStatus(managerKey); const message = status === "connected" - ? `Reconnected to server: ${serverId}` - : `Server ${serverId} reconnected with status '${status}'`; + ? `Reconnected to server: ${managerKey}` + : `Server ${managerKey} reconnected with status '${status}'`; const success = status === "connected"; return c.json({ success, - serverId, + serverId: managerKey, status, message, ...(success ? {} : { error: message }), }); } catch (error) { - logger.error("Error reconnecting server", error, { serverId }); + logger.error("Error reconnecting server", error, { serverId: managerKey }); return c.json( { success: false, diff --git a/mcpjam-inspector/server/routes/web/__tests__/authorize-batch.test.ts b/mcpjam-inspector/server/routes/web/__tests__/authorize-batch.test.ts index b7f419bac..88bb59cd2 100644 --- a/mcpjam-inspector/server/routes/web/__tests__/authorize-batch.test.ts +++ b/mcpjam-inspector/server/routes/web/__tests__/authorize-batch.test.ts @@ -183,6 +183,68 @@ describe("authorizeBatch — request log context attribution", () => { }); }); + it("strips command/args/env from hosted serverConfig even if backend regresses", async () => { + // Defense-in-depth for hosted: the Convex /web/authorize-batch endpoint + // is contractually HTTP-only (normalizeAuthorizeResult drops STDIO + // fields). If a backend regression ever lets command/args/env through, + // the wrapper must drop them before they can reach the hosted client. + // Local mode uses /web/authorize-batch-local instead, which is allowed + // to carry STDIO fields and goes through local-server-resolver.ts. + vi.stubGlobal( + "fetch", + vi.fn().mockResolvedValue( + mockBatchResponse({ + results: { + "srv-stdio-leak": { + ok: true, + role: "member", + accessLevel: "project_member", + permissions: { chatOnly: false }, + serverConfig: { + transportType: "stdio", + command: "node", + args: ["server.js"], + env: { OPENAI_API_KEY: "sk-secret" }, + }, + internalLogContext: { ...projectCtx, serverId: "srv-stdio-leak" }, + }, + "srv-http-with-leak": { + ok: true, + role: "member", + accessLevel: "project_member", + permissions: { chatOnly: false }, + serverConfig: { + transportType: "http", + url: "https://example.com/mcp", + command: "should-not-be-here", + env: { LEAKED: "1" }, + }, + internalLogContext: { ...projectCtx, serverId: "srv-http-with-leak" }, + }, + }, + }), + ), + ); + + const { c } = makeContext(); + const result = await authorizeBatch(c, "bearer", "ws-1", [ + "srv-stdio-leak", + "srv-http-with-leak", + ]); + + for (const entry of Object.values(result.results)) { + if (!entry.ok) continue; + const cfg = entry.serverConfig as Record; + expect(cfg.command).toBeUndefined(); + expect(cfg.args).toBeUndefined(); + expect(cfg.env).toBeUndefined(); + } + // Sanity: HTTP fields that *are* allowed survive the strip. + const httpEntry = result.results["srv-http-with-leak"]; + if (!httpEntry.ok) throw new Error("expected ok result"); + expect(httpEntry.serverConfig.url).toBe("https://example.com/mcp"); + }); + it("strips internalLogContext from every successful result returned to caller", async () => { vi.stubGlobal( "fetch", diff --git a/mcpjam-inspector/server/routes/web/auth.ts b/mcpjam-inspector/server/routes/web/auth.ts index df21b6597..ac0dd13fc 100644 --- a/mcpjam-inspector/server/routes/web/auth.ts +++ b/mcpjam-inspector/server/routes/web/auth.ts @@ -9,7 +9,10 @@ import { } from "./hosted-rpc-logs.js"; import { INSPECTOR_MCP_RETRY_POLICY } from "../../utils/mcp-retry-policy.js"; import { setRequestLogContext } from "../../utils/request-logger.js"; -import type { RequestLogContext } from "../../utils/log-events.js"; +import { + type InternalLogContext, + mapInternalToRequestContext, +} from "../../utils/internal-log-context.js"; import { ErrorCode, WebRouteError, @@ -148,57 +151,6 @@ function buildServerNamesById( // ── Authorization ──────────────────────────────────────────────────── -// Server-only logging context returned by backend. Optional during rollout. -// When present, it must never be forwarded to the browser. -type InternalLogContext = { - authType: "signedIn" | "guest"; - userId?: string | null; - userExternalId?: string | null; - guestExternalId?: string | null; - emailDomain?: string | null; - orgId?: string | null; - orgPlan?: string | null; - orgSeatQuantity?: number | null; - orgCreatedBy?: string | null; - projectId?: string | null; - projectRole?: - | "owner" - | "admin" - | "member" - | "guest" - | "editor" - | "chat" - | null; - accessLevel?: "project_member" | "shared_chat" | null; - serverId?: string | null; - serverTransport?: "stdio" | "http" | null; - chatboxId?: string | null; - surface?: "preview" | "share_link" | null; -}; - -function mapInternalToRequestContext( - ctx: InternalLogContext -): Partial { - return { - authType: ctx.authType, - userId: ctx.userId ?? null, - userExternalId: ctx.userExternalId ?? null, - guestExternalId: ctx.guestExternalId ?? null, - emailDomain: ctx.emailDomain ?? null, - orgId: ctx.orgId ?? null, - orgPlan: ctx.orgPlan ?? null, - orgSeatQuantity: ctx.orgSeatQuantity ?? null, - orgCreatedBy: ctx.orgCreatedBy ?? null, - projectId: ctx.projectId ?? null, - projectRole: ctx.projectRole ?? null, - accessLevel: ctx.accessLevel ?? null, - serverId: ctx.serverId ?? null, - serverTransport: ctx.serverTransport ?? null, - chatboxId: ctx.chatboxId ?? null, - surface: ctx.surface ?? null, - }; -} - export type ConvexAuthorizeResponse = { authorized: boolean; role: "owner" | "admin" | "member"; @@ -255,6 +207,30 @@ export type ConvexBatchAuthorizeResponse = { results: Record; }; +// Defense-in-depth: hosted /web/authorize-batch is contractually meant to +// return an HTTP-only `serverConfig` — Convex strips command/args/env via +// `normalizeAuthorizeResult`. If a backend regression ever lets those fields +// through, we drop them here so they can never reach the hosted client or be +// fed into a transport. Local mode uses /web/authorize-batch-local instead, +// which is allowed to carry STDIO fields and goes through a different code +// path (`local-server-resolver.ts`). +const STDIO_ONLY_FIELDS = ["command", "args", "env"] as const; +function stripStdioFieldsFromHostedConfig< + T extends { serverConfig?: Record }, +>(holder: T): T { + const cfg = holder.serverConfig; + if (!cfg || typeof cfg !== "object") return holder; + let cleaned: Record | undefined; + for (const field of STDIO_ONLY_FIELDS) { + if (field in cfg) { + if (!cleaned) cleaned = { ...cfg }; + delete cleaned[field]; + } + } + if (!cleaned) return holder; + return { ...holder, serverConfig: cleaned }; +} + export async function authorizeServer( c: Context, bearerToken: string, @@ -341,7 +317,7 @@ export async function authorizeServer( if (internalLogContext) { setRequestLogContext(c, mapInternalToRequestContext(internalLogContext)); } - return clientSafe; + return stripStdioFieldsFromHostedConfig(clientSafe) as ClientSafeAuthorizeResponse; } export async function authorizeBatch( @@ -460,7 +436,9 @@ export async function authorizeBatch( for (const [serverId, result] of Object.entries(raw.results)) { if (result.ok) { const { internalLogContext: _omit, ...clientSafeResult } = result; - strippedResults[serverId] = clientSafeResult; + strippedResults[serverId] = stripStdioFieldsFromHostedConfig( + clientSafeResult, + ) as ConvexBatchAuthorizeSuccess; } else { strippedResults[serverId] = result; } diff --git a/mcpjam-inspector/server/utils/internal-log-context.ts b/mcpjam-inspector/server/utils/internal-log-context.ts new file mode 100644 index 000000000..cf646667a --- /dev/null +++ b/mcpjam-inspector/server/utils/internal-log-context.ts @@ -0,0 +1,68 @@ +import type { RequestLogContext } from "./log-events.js"; + +/** + * Server-only logging context returned by Convex authorize endpoints + * (`/web/authorize-batch` and `/web/authorize-batch-local`). Optional during + * rollout — every consumer must tolerate missing fields. + * + * NEVER forward this to the browser. Both producers (`routes/web/auth.ts` + * for hosted, `utils/local-server-resolver.ts` for local) strip it before + * the response leaves the server. Centralizing the shape here so a new + * field added on the Convex side only needs to be plumbed in one place. + */ +export type InternalLogContext = { + authType?: "signedIn" | "guest"; + userId?: string | null; + userExternalId?: string | null; + guestExternalId?: string | null; + emailDomain?: string | null; + orgId?: string | null; + orgPlan?: string | null; + orgSeatQuantity?: number | null; + orgCreatedBy?: string | null; + projectId?: string | null; + projectRole?: + | "owner" + | "admin" + | "member" + | "guest" + | "editor" + | "chat" + | null; + accessLevel?: "project_member" | "shared_chat" | null; + serverId?: string | null; + serverTransport?: "stdio" | "http" | null; + chatboxId?: string | null; + surface?: "preview" | "share_link" | null; +}; + +/** + * Lift an `InternalLogContext` into the partial shape `setRequestLogContext` + * accepts. Every field defaults to `null` when missing so the request log + * line has a stable shape. + */ +export function mapInternalToRequestContext( + ctx: InternalLogContext, +): Partial { + return { + // `RequestLogContext.authType` is required-non-null. Omit the field when + // the upstream payload didn't include one rather than poking `null` into + // a slot that's typed `AuthType`. + ...(ctx.authType ? { authType: ctx.authType } : {}), + userId: ctx.userId ?? null, + userExternalId: ctx.userExternalId ?? null, + guestExternalId: ctx.guestExternalId ?? null, + emailDomain: ctx.emailDomain ?? null, + orgId: ctx.orgId ?? null, + orgPlan: ctx.orgPlan ?? null, + orgSeatQuantity: ctx.orgSeatQuantity ?? null, + orgCreatedBy: ctx.orgCreatedBy ?? null, + projectId: ctx.projectId ?? null, + projectRole: ctx.projectRole ?? null, + accessLevel: ctx.accessLevel ?? null, + serverId: ctx.serverId ?? null, + serverTransport: ctx.serverTransport ?? null, + chatboxId: ctx.chatboxId ?? null, + surface: ctx.surface ?? null, + }; +} diff --git a/mcpjam-inspector/server/utils/local-server-resolver.ts b/mcpjam-inspector/server/utils/local-server-resolver.ts new file mode 100644 index 000000000..a9b409041 --- /dev/null +++ b/mcpjam-inspector/server/utils/local-server-resolver.ts @@ -0,0 +1,395 @@ +import type { Context } from "hono"; +import type { MCPServerConfig } from "@mcpjam/sdk"; +import { + ErrorCode, + WebRouteError, + parseErrorMessage, +} from "../routes/web/errors.js"; +import { setRequestLogContext } from "./request-logger.js"; +import { + type InternalLogContext, + mapInternalToRequestContext, +} from "./internal-log-context.js"; +import type { ConnectionDefaults } from "../../shared/connection-defaults.js"; + +type LocalAuthorizeServerConfig = + | { + transportType: "http"; + url: string; + headers: Record; + timeout?: number; + clientCapabilities?: unknown; + useOAuth?: boolean; + oauthScopes?: string[]; + clientId?: string; + oauthResourceUrl?: string; + } + | { + transportType: "stdio"; + command: string; + args: string[]; + env: Record; + timeout?: number; + clientCapabilities?: unknown; + }; + +type LocalAuthorizeBatchSuccess = { + ok: true; + role: string; + accessLevel: string; + permissions: { chatOnly: boolean }; + serverConfig: LocalAuthorizeServerConfig; + oauthAccessToken?: string | null; + internalLogContext?: InternalLogContext; +}; + +type LocalAuthorizeBatchFailure = { + ok: false; + status: number; + code: string; + message: string; +}; + +export type LocalAuthorizeBatchResult = + | LocalAuthorizeBatchSuccess + | LocalAuthorizeBatchFailure; + +export type LocalAuthorizeBatchResponse = { + results: Record; +}; + +/** + * Read the WorkOS or guest bearer the client attached to a /api/mcp/* request. + * Local mode runs both `X-MCP-Session-Auth` (loopback secret, checked by + * sessionAuthMiddleware) AND `Authorization: Bearer ...` (Convex actor identity). + * The session token alone gates access to the local API process; Convex itself + * still enforces project ownership via the bearer. + */ +export function readLocalApiBearer(c: Context): string | null { + const raw = c.req.header("Authorization") ?? c.req.header("authorization"); + if (!raw) return null; + const trimmed = raw.trim(); + if (!trimmed.toLowerCase().startsWith("bearer ")) return null; + const token = trimmed.slice(7).trim(); + return token.length > 0 ? token : null; +} + +/** + * Call Convex `/web/authorize-batch-local` with the user's bearer. + * Returns the full server config for each requested serverId, including + * STDIO command/args/env. Hosted-only fields (share/chatbox tokens) are not + * accepted by this endpoint by design. + */ +export async function authorizeBatchLocal( + c: Context, + bearerToken: string, + projectId: string, + serverIds: string[] +): Promise { + const convexUrl = process.env.CONVEX_HTTP_URL; + if (!convexUrl) { + throw new WebRouteError( + 500, + ErrorCode.INTERNAL_ERROR, + "Server missing CONVEX_HTTP_URL configuration" + ); + } + + // Cap the call so a hung Convex instance can't tie up the inspector worker + // for the inbound /api/mcp/connect or /api/mcp/servers/reconnect request. + // 10s is well above the 99p of the underlying authorize* query and below + // most browser/proxy idle timeouts. + const AUTHORIZE_BATCH_LOCAL_TIMEOUT_MS = 10_000; + const controller = new AbortController(); + const timeoutId = setTimeout( + () => controller.abort(), + AUTHORIZE_BATCH_LOCAL_TIMEOUT_MS, + ); + + let response: Response; + try { + response = await fetch(`${convexUrl}/web/authorize-batch-local`, { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${bearerToken}`, + }, + body: JSON.stringify({ projectId, serverIds }), + signal: controller.signal, + }); + } catch (error) { + const isAbort = + error instanceof Error && + (error.name === "AbortError" || (error as { code?: string }).code === "ABORT_ERR"); + throw new WebRouteError( + isAbort ? 504 : 502, + ErrorCode.SERVER_UNREACHABLE, + isAbort + ? `Authorization service timed out after ${AUTHORIZE_BATCH_LOCAL_TIMEOUT_MS}ms` + : `Failed to reach authorization service: ${parseErrorMessage(error)}` + ); + } finally { + clearTimeout(timeoutId); + } + + let body: any = null; + try { + body = await response.json(); + } catch { + // ignored + } + + if (!response.ok) { + const code = + typeof body?.code === "string" ? body.code : ErrorCode.INTERNAL_ERROR; + const message = + typeof body?.message === "string" + ? body.message + : `Authorization failed (${response.status})`; + throw new WebRouteError(response.status, code as ErrorCode, message); + } + + if (!body?.results || typeof body.results !== "object") { + throw new WebRouteError( + 500, + ErrorCode.INTERNAL_ERROR, + "Authorization response is missing batch results" + ); + } + + const raw = body as LocalAuthorizeBatchResponse; + const successful = Object.entries(raw.results).filter( + (entry): entry is [string, LocalAuthorizeBatchSuccess] => entry[1].ok + ); + const sourceCtx = successful.find(([, r]) => r.internalLogContext)?.[1] + .internalLogContext; + if (sourceCtx) { + const partial = mapInternalToRequestContext(sourceCtx); + if (successful.length > 1) { + // Multi-server batch: a single per-server identifier on the request log + // line would be misleading. Null them out so the line aggregates over + // the batch instead. + partial.serverId = null; + partial.serverTransport = null; + partial.chatboxId = null; + } + setRequestLogContext(c, partial); + } + + // Strip internalLogContext from results so it never leaks downstream. + const stripped: Record = {}; + for (const [serverId, result] of Object.entries(raw.results)) { + if (result.ok) { + const { internalLogContext: _omit, ...clean } = result; + stripped[serverId] = clean; + } else { + stripped[serverId] = result; + } + } + return { results: stripped }; +} + +/** + * Convenience wrapper: authorize a single server and return the {success: true} + * payload directly. Throws WebRouteError for any non-ok result. + */ +export async function authorizeServerLocal( + c: Context, + bearerToken: string, + projectId: string, + serverId: string +): Promise { + const batch = await authorizeBatchLocal(c, bearerToken, projectId, [ + serverId, + ]); + const result = batch.results[serverId]; + if (!result) { + throw new WebRouteError( + 500, + ErrorCode.INTERNAL_ERROR, + `Authorization response is missing result for server "${serverId}"` + ); + } + if (!result.ok) { + throw new WebRouteError( + result.status, + result.code as ErrorCode, + result.message + ); + } + return result; +} + +// Header precedence (lowest → highest) when the resolver merges these +// onto Convex-stored server config: Convex-stored server headers, project +// default headers from `defaults.headers`, OAuth `Authorization` (if the +// server uses OAuth), so OAuth always wins. + +/** + * Validate `connectionDefaults` from an /api/mcp/* request body. Returns + * `undefined` for missing/non-object input so the caller can fall back to + * Convex-stored values. Drops any fields that aren't of the expected shape + * rather than rejecting the whole request — defaults are advisory. + */ +export function parseConnectionDefaults( + raw: unknown +): ConnectionDefaults | undefined { + if (!raw || typeof raw !== "object") return undefined; + const input = raw as Record; + const out: ConnectionDefaults = {}; + + if (input.headers && typeof input.headers === "object") { + const headers: Record = {}; + for (const [k, v] of Object.entries(input.headers as Record)) { + if (typeof v === "string") headers[k] = v; + } + if (Object.keys(headers).length > 0) out.headers = headers; + } + + if (typeof input.timeoutMs === "number" && Number.isFinite(input.timeoutMs)) { + out.timeoutMs = input.timeoutMs; + } + + if ( + input.clientCapabilities && + typeof input.clientCapabilities === "object" + ) { + out.clientCapabilities = input.clientCapabilities as Record; + } + + return Object.keys(out).length > 0 ? out : undefined; +} + +/** + * Build an `MCPServerConfig` (the SDK's union of stdio + http configs) from a + * local authorize result. Distinct from hosted's `toHttpConfig` which strips + * STDIO and rejects non-HTTPS — this is the unstripped local-mode equivalent. + */ +export function toMCPServerConfig( + authResult: LocalAuthorizeBatchSuccess, + options?: { + timeoutMs?: number; + oauthAccessToken?: string; + clientCapabilities?: Record; + defaultHeaders?: Record; + } +): MCPServerConfig { + const { serverConfig } = authResult; + const timeout = options?.timeoutMs ?? serverConfig.timeout; + const clientCapabilities = + options?.clientCapabilities ?? + (serverConfig.clientCapabilities as Record | undefined); + + if (serverConfig.transportType === "stdio") { + const stdio: any = { + command: serverConfig.command, + args: serverConfig.args ?? [], + env: serverConfig.env ?? {}, + }; + if (typeof timeout === "number") stdio.timeout = timeout; + if (clientCapabilities) { + stdio.capabilities = clientCapabilities; + stdio.clientCapabilities = clientCapabilities; + } + return stdio as MCPServerConfig; + } + + // Header precedence for HTTP servers: Convex-stored server headers form the + // base, project-default headers from the runtime `defaultHeaders` overlay + // them (so org/project policy can add headers without touching individual + // server records), and a bearer for OAuth-using servers always wins because + // it carries the actor identity. + const headers: Record = { + ...(serverConfig.headers ?? {}), + ...(options?.defaultHeaders ?? {}), + }; + const oauthToken = options?.oauthAccessToken ?? authResult.oauthAccessToken; + if (oauthToken) { + headers["Authorization"] = `Bearer ${oauthToken}`; + } + + // Match the legacy connect-path shape: legacy `connect.ts` upgrades the + // URL string to a `URL` object before calling `connectToServer`, and any + // downstream code (HOSTED_MODE protocol checks, future SDK / middleware + // logic that does `instanceof URL` or accesses `.protocol`/`.hostname`) + // has been validated against that shape. Passing a string here would be + // silent drift between the two paths. + let url: URL; + try { + url = new URL(serverConfig.url); + } catch { + throw new WebRouteError( + 400, + ErrorCode.VALIDATION_ERROR, + `Server config has an invalid URL: ${serverConfig.url}`, + ); + } + + const http: any = { + url, + requestInit: { headers }, + }; + if (typeof timeout === "number") http.timeout = timeout; + if (clientCapabilities) { + http.capabilities = clientCapabilities; + http.clientCapabilities = clientCapabilities; + } + return http as MCPServerConfig; +} + +/** + * Single-call convenience: authorize a serverId and return both the raw + * response (for OAuth token plumbing) and a ready-to-pass MCPServerConfig. + * Throws WebRouteError for unauthorized / not found / OAuth-required cases. + */ +export async function resolveLocalServerForConnect( + c: Context, + bearerToken: string, + projectId: string, + serverId: string, + options?: { + timeoutMs?: number; + clientCapabilities?: Record; + serverDisplayName?: string; + /** + * Runtime defaults the inspector client computed via + * `withProjectConnectionDefaults` and forwarded explicitly so the + * resolver can reproduce the same MCPServerConfig the legacy + * `{serverConfig}` body would have produced. Without this, project-level + * header/timeout/capability defaults applied client-side are lost on the + * resolver path. + */ + defaults?: ConnectionDefaults; + } +): Promise<{ config: MCPServerConfig; authorizeResult: LocalAuthorizeBatchSuccess }> { + const result = await authorizeServerLocal(c, bearerToken, projectId, serverId); + + const useOAuth = + result.serverConfig.transportType === "http" && + result.serverConfig.useOAuth === true; + if (useOAuth && !result.oauthAccessToken) { + const displayName = options?.serverDisplayName ?? serverId; + throw new WebRouteError( + 401, + ErrorCode.UNAUTHORIZED, + `Server "${displayName}" requires OAuth authentication. Please complete the OAuth flow first.`, + { + oauthRequired: true, + serverId, + serverName: options?.serverDisplayName ?? null, + serverUrl: + result.serverConfig.transportType === "http" + ? result.serverConfig.url + : undefined, + } + ); + } + + const config = toMCPServerConfig(result, { + timeoutMs: options?.timeoutMs ?? options?.defaults?.timeoutMs, + clientCapabilities: + options?.clientCapabilities ?? options?.defaults?.clientCapabilities, + defaultHeaders: options?.defaults?.headers, + }); + return { config, authorizeResult: result }; +} diff --git a/mcpjam-inspector/shared/connection-defaults.ts b/mcpjam-inspector/shared/connection-defaults.ts new file mode 100644 index 000000000..17cb54985 --- /dev/null +++ b/mcpjam-inspector/shared/connection-defaults.ts @@ -0,0 +1,23 @@ +/** + * Project-level runtime overrides the inspector client computes via + * `withProjectConnectionDefaults` and forwards on `/api/mcp/connect` and + * `/api/mcp/servers/reconnect` requests. The server resolver merges these + * onto the Convex-stored server config so the resolver path produces the + * same MCPServerConfig the legacy `{serverConfig}` body would have. + * + * One source of truth for the wire shape: client encoder + * (`state/mcp-api.ts`) and server decoder (`utils/local-server-resolver.ts`) + * both import from here, so a field added on one side is impossible to + * forget on the other. + */ +export type ConnectionDefaults = { + /** + * Header overlay merged on top of Convex-stored server headers. OAuth's + * `Authorization` header (when present) always wins on the resolver side. + */ + headers?: Record; + /** Per-request timeout in milliseconds. */ + timeoutMs?: number; + /** MCP client capabilities forwarded to the SDK transport. */ + clientCapabilities?: Record; +};