diff --git a/common/config/rush/pnpm-lock.yaml b/common/config/rush/pnpm-lock.yaml index 3be3634e305..78ac9586689 100644 --- a/common/config/rush/pnpm-lock.yaml +++ b/common/config/rush/pnpm-lock.yaml @@ -4080,11 +4080,11 @@ importers: ../../workspaces/mi/mi-extension: dependencies: '@ai-sdk/amazon-bedrock': - specifier: 4.0.83 - version: 4.0.83(zod@4.1.11) + specifier: 4.0.96 + version: 4.0.96(zod@4.1.11) '@ai-sdk/anthropic': - specifier: 3.0.64 - version: 3.0.64(zod@4.1.11) + specifier: 3.0.71 + version: 3.0.71(zod@4.1.11) '@ai-sdk/mcp': specifier: 1.0.29 version: 1.0.29(zod@4.1.11) @@ -4109,6 +4109,9 @@ importers: '@opentelemetry/sdk-node': specifier: 0.210.0 version: 0.210.0(@opentelemetry/api@1.9.1) + '@tavily/core': + specifier: 0.6.4 + version: 0.6.4 '@types/fs-extra': specifier: 11.0.4 version: 11.0.4 @@ -4986,6 +4989,12 @@ packages: peerDependencies: zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/amazon-bedrock@4.0.96': + resolution: {integrity: sha512-Mc4Ias2jRMD1jOB6xWtKNPdhECeuCZyIlbr9EAGfBnyBt++sS13ziZh9qv9TdyMCAZJ7xoQcpbchoRJcKwPdpA==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/anthropic@3.0.63': resolution: {integrity: sha512-SiLosFr0FfKfrNpAAj8mD/i3S5YBB/z5orb1DH3pN1yATuBNjjPMLnRE4P3Dn7Y5cQsro0uzw5g5117hkShWoQ==} engines: {node: '>=18'} @@ -4998,6 +5007,12 @@ packages: peerDependencies: zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/anthropic@3.0.71': + resolution: {integrity: sha512-bUWOzrzR0gJKJO/PLGMR4uH2dqEgqGhrsCV+sSpk4KtOEnUQlfjZI/F7BFlqSvVpFbjdgYRRLysAeEZpJ6S1lg==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/devtools@0.0.6': resolution: {integrity: sha512-ZFzTpAPVpJ1aFr9qIb3Pvg6Yf5RIm60AXiw8zhzhThTJoIzSw2bc2aT5IkZW9ysGMwQopPptPP5Ie8bkdx/xcg==} hasBin: true @@ -5050,6 +5065,12 @@ packages: peerDependencies: zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/provider-utils@4.0.23': + resolution: {integrity: sha512-z8GlDaCmRSDlqkMF2f4/RFgWxdarvIbyuk+m6WXT1LYgsnGiXRJGTD2Z1+SDl3LqtFuRtGX1aghYvQLoHL/9pg==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/provider@3.0.4': resolution: {integrity: sha512-5KXyBOSEX+l67elrEa+wqo/LSsSTtrPj9Uoh3zMbe/ceQX4ucHI3b9nUEfNkGF3Ry1svv90widAt+aiKdIJasQ==} engines: {node: '>=18'} @@ -10680,6 +10701,9 @@ packages: '@tanstack/virtual-core@3.14.0': resolution: {integrity: sha512-JLANqGy/D6k4Ujmh8Tr25lGimuOXNiaVyXaCAZS0W+1390sADdGnyUdSWNIfd49gebtIxGMij4IktRVzrdr12Q==} + '@tavily/core@0.6.4': + resolution: {integrity: sha512-PppC0p2SwkoImLiYFT/uqDyWKPivpVsIM16HUf1Apmtbqg1YhI7Yg5Hq6eYSojC6COVCGXE4CotBnWqUmrai+A==} + '@testing-library/dom@10.4.0': resolution: {integrity: sha512-pemlzrSESWbdAloYml3bAJMEfNh1Z7EduzqPKprCH5S341frlpYnUEW0H72dLxa6IsYr+mPno20GiSm+h9dEdQ==} engines: {node: '>=18'} @@ -11910,6 +11934,7 @@ packages: '@xmldom/xmldom@0.8.10': resolution: {integrity: sha512-2WALfTl4xo2SkGCYRt6rDTFfk9R1czmBvUQy12gK2KuRKIpWEhcbbzy8EZXtz/jkRqHX8bFEc6FC1HjX4TUWYw==} engines: {node: '>=10.0.0'} + deprecated: this version has critical issues, please update to the latest version '@xtuc/ieee754@1.2.0': resolution: {integrity: sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==} @@ -18115,6 +18140,9 @@ packages: resolution: {integrity: sha512-Smw4xcfIQ5LVjAOuJCvN/zIodzA/BBSsluuoSykP+lUvScIi4U6RJLfwHet5cxFnCswUjISV8oAXaqaJDY3chg==} engines: {node: '>= 0.8'} + js-tiktoken@1.0.21: + resolution: {integrity: sha512-biOj/6M5qdgx5TKjDnFT1ymSpM5tbd3ylwDtrQvFQSu0Z7bBYko2dF+W/aUkXUPuk6IVpRxk/3Q2sHOzGlS36g==} + js-tokens@3.0.2: resolution: {integrity: sha512-RjTcuD4xjtthQkaWH7dFlH85L+QaVtSoOyGdZ3g6HFhS9dFNDfLyqgm2NFe2X6cQpeFmt0452FJjFG5UameExg==} @@ -25387,25 +25415,25 @@ snapshots: '@adobe/css-tools@4.4.4': {} - '@ai-sdk/amazon-bedrock@4.0.83(zod@4.1.11)': + '@ai-sdk/amazon-bedrock@4.0.83(zod@4.1.8)': dependencies: - '@ai-sdk/anthropic': 3.0.64(zod@4.1.11) + '@ai-sdk/anthropic': 3.0.64(zod@4.1.8) '@ai-sdk/provider': 3.0.8 - '@ai-sdk/provider-utils': 4.0.21(zod@4.1.11) + '@ai-sdk/provider-utils': 4.0.21(zod@4.1.8) '@smithy/eventstream-codec': 4.2.14 '@smithy/util-utf8': 4.2.2 aws4fetch: 1.0.20 - zod: 4.1.11 + zod: 4.1.8 - '@ai-sdk/amazon-bedrock@4.0.83(zod@4.1.8)': + '@ai-sdk/amazon-bedrock@4.0.96(zod@4.1.11)': dependencies: - '@ai-sdk/anthropic': 3.0.64(zod@4.1.8) + '@ai-sdk/anthropic': 3.0.71(zod@4.1.11) '@ai-sdk/provider': 3.0.8 - '@ai-sdk/provider-utils': 4.0.21(zod@4.1.8) + '@ai-sdk/provider-utils': 4.0.23(zod@4.1.11) '@smithy/eventstream-codec': 4.2.14 '@smithy/util-utf8': 4.2.2 aws4fetch: 1.0.20 - zod: 4.1.8 + zod: 4.1.11 '@ai-sdk/anthropic@3.0.63(zod@4.1.8)': dependencies: @@ -25413,18 +25441,18 @@ snapshots: '@ai-sdk/provider-utils': 4.0.21(zod@4.1.8) zod: 4.1.8 - '@ai-sdk/anthropic@3.0.64(zod@4.1.11)': - dependencies: - '@ai-sdk/provider': 3.0.8 - '@ai-sdk/provider-utils': 4.0.21(zod@4.1.11) - zod: 4.1.11 - '@ai-sdk/anthropic@3.0.64(zod@4.1.8)': dependencies: '@ai-sdk/provider': 3.0.8 '@ai-sdk/provider-utils': 4.0.21(zod@4.1.8) zod: 4.1.8 + '@ai-sdk/anthropic@3.0.71(zod@4.1.11)': + dependencies: + '@ai-sdk/provider': 3.0.8 + '@ai-sdk/provider-utils': 4.0.23(zod@4.1.11) + zod: 4.1.11 + '@ai-sdk/devtools@0.0.6': dependencies: '@ai-sdk/provider': 3.0.4 @@ -25497,6 +25525,13 @@ snapshots: eventsource-parser: 3.0.8 zod: 4.1.8 + '@ai-sdk/provider-utils@4.0.23(zod@4.1.11)': + dependencies: + '@ai-sdk/provider': 3.0.8 + '@standard-schema/spec': 1.1.0 + eventsource-parser: 3.0.8 + zod: 4.1.11 + '@ai-sdk/provider@3.0.4': dependencies: json-schema: 0.4.0 @@ -34675,7 +34710,7 @@ snapshots: ts-dedent: 2.2.0 unfetch: 4.2.0 util-deprecate: 1.0.2 - webpack: 5.104.1(webpack-cli@4.10.0) + webpack: 5.104.1(webpack-cli@6.0.1) optionalDependencies: typescript: 5.8.3 @@ -36435,7 +36470,7 @@ snapshots: react-docgen-typescript: 2.4.0(typescript@5.8.3) tslib: 2.8.1 typescript: 5.8.3 - webpack: 5.104.1(webpack-cli@4.10.0) + webpack: 5.104.1(webpack-cli@6.0.1) transitivePeerDependencies: - supports-color @@ -37915,6 +37950,15 @@ snapshots: '@tanstack/virtual-core@3.14.0': {} + '@tavily/core@0.6.4': + dependencies: + axios: 1.15.0 + https-proxy-agent: 7.0.6 + js-tiktoken: 1.0.21 + transitivePeerDependencies: + - debug + - supports-color + '@testing-library/dom@10.4.0': dependencies: '@babel/code-frame': 7.29.0 @@ -48651,6 +48695,10 @@ snapshots: js-string-escape@1.0.1: {} + js-tiktoken@1.0.21: + dependencies: + base64-js: 1.5.1 + js-tokens@3.0.2: {} js-tokens@4.0.0: {} diff --git a/workspaces/mi/mi-core/src/rpc-types/agent-mode/index.ts b/workspaces/mi/mi-core/src/rpc-types/agent-mode/index.ts index 72f19c4304f..fae3b59f773 100644 --- a/workspaces/mi/mi-core/src/rpc-types/agent-mode/index.ts +++ b/workspaces/mi/mi-core/src/rpc-types/agent-mode/index.ts @@ -44,6 +44,7 @@ export type { PlanApprovalResponse, // Session management types SessionMetadata, + SessionContextBlocksState, SessionSummary, GroupedSessions, ListSessionsRequest, diff --git a/workspaces/mi/mi-core/src/rpc-types/agent-mode/types.ts b/workspaces/mi/mi-core/src/rpc-types/agent-mode/types.ts index 9bb8c1a197c..15d4edef5df 100644 --- a/workspaces/mi/mi-core/src/rpc-types/agent-mode/types.ts +++ b/workspaces/mi/mi-core/src/rpc-types/agent-mode/types.ts @@ -40,8 +40,6 @@ export interface SendAgentMessageRequest { images?: ImageObject[]; /** Enable Claude thinking mode (reasoning blocks) */ thinking?: boolean; - /** When true, web_search and web_fetch run without per-call approval prompts */ - webAccessPreapproved?: boolean; /** Chat history for context (AI SDK format with tool calls/results) */ // eslint-disable-next-line @typescript-eslint/no-explicit-any chatHistory?: any[]; @@ -207,8 +205,6 @@ export type PlanApprovalKind = | 'enter_plan_mode' | 'exit_plan_mode' | 'exit_plan_mode_without_plan' - | 'web_search' - | 'web_fetch' | 'shell_command' | 'continue_after_limit'; @@ -418,6 +414,33 @@ export interface SessionMetadata { * Used to skip loading unsupported sessions after breaking storage changes. */ sessionVersion?: number; + /** + * Per-block tracking state for the user-prompt session-context blocks. + * Each value is the hash (or, for `modePolicy`, the verbatim mode name) of + * the inputs that produced the most recently injected block. The agent + * re-injects only the blocks whose stored value drifts since last turn + * (branch switch, date rollover, runtime version change, mode switch, + * payloads change, Tavily key add/remove, ...). Persisting this on metadata + * (rather than in-memory) means the check survives extension restarts. + */ + sessionContextBlocks?: SessionContextBlocksState; +} + +/** + * Tracking state for each session-context block. Absent fields mean "block + * has never been injected" (treated as a first injection on the next turn). + */ +export interface SessionContextBlocksState { + /** sha256-16 of env fields (working dir, git, date, OS, MI runtime info, backend) */ + env?: string; + /** sha256-16 of the connector catalog (artifact ids + bundled inbound ids) */ + connectors?: string; + /** sha256-16 of the web-search-availability flag */ + webAvailability?: string; + /** Verbatim mode name (`"ask" | "edit" | "plan"`) — stored as-is so change notices can say "[mode changed from EDIT]" */ + modePolicy?: string; + /** sha256-16 of the canonicalized preconfigured-payloads JSON */ + payloads?: string; } /** diff --git a/workspaces/mi/mi-core/src/rpc-types/ai-features/index.ts b/workspaces/mi/mi-core/src/rpc-types/ai-features/index.ts index 16f6c49568d..4aaa9d9a92e 100644 --- a/workspaces/mi/mi-core/src/rpc-types/ai-features/index.ts +++ b/workspaces/mi/mi-core/src/rpc-types/ai-features/index.ts @@ -65,6 +65,12 @@ export interface MIAIPanelAPI { // ================================== hasAnthropicApiKey: () => Promise + // ================================== + // Tavily API Key (Bedrock-only web search/fetch BYOK) + // ================================== + getTavilyApiKey: () => Promise + setTavilyApiKey: (request: { apiKey: string }) => Promise<{ success: boolean; error?: string }> + // ================================== // MI Copilot Login Status // ================================== @@ -108,6 +114,8 @@ export { abortCodeGeneration, codeGenerationEvent, hasAnthropicApiKey, + getTavilyApiKey, + setTavilyApiKey, isMiCopilotLoggedIn, fetchUsage, generateUnitTest, diff --git a/workspaces/mi/mi-core/src/rpc-types/ai-features/rpc-type.ts b/workspaces/mi/mi-core/src/rpc-types/ai-features/rpc-type.ts index a647553007a..9951382e40e 100644 --- a/workspaces/mi/mi-core/src/rpc-types/ai-features/rpc-type.ts +++ b/workspaces/mi/mi-core/src/rpc-types/ai-features/rpc-type.ts @@ -37,6 +37,12 @@ export const generateCode: RequestType = { method: `${_prefix}/abortCodeGeneration` }; export const hasAnthropicApiKey: RequestType = { method: `${_prefix}/hasAnthropicApiKey` }; export const isMiCopilotLoggedIn: RequestType = { method: `${_prefix}/isMiCopilotLoggedIn` }; + +// Bedrock-only Tavily key management for web search/fetch tools. +// `getTavilyApiKey` returns the configured key (or undefined). `setTavilyApiKey` +// stores or clears it (pass an empty string to clear). Both reject for non-Bedrock auth. +export const getTavilyApiKey: RequestType = { method: `${_prefix}/getTavilyApiKey` }; +export const setTavilyApiKey: RequestType<{ apiKey: string }, { success: boolean; error?: string }> = { method: `${_prefix}/setTavilyApiKey` }; export const fetchUsage: RequestType = ({ onClick, isLoading, disabled isLoading={isLoading} >
- + Map
diff --git a/workspaces/mi/mi-diagram/src/components/Form/AIAutoFillBox/AIAutoFillBox.tsx b/workspaces/mi/mi-diagram/src/components/Form/AIAutoFillBox/AIAutoFillBox.tsx index 2daab7406c6..5d7266812f9 100644 --- a/workspaces/mi/mi-diagram/src/components/Form/AIAutoFillBox/AIAutoFillBox.tsx +++ b/workspaces/mi/mi-diagram/src/components/Form/AIAutoFillBox/AIAutoFillBox.tsx @@ -16,7 +16,7 @@ * under the License. */ import React from "react"; -import { Button, Codicon } from "@wso2/ui-toolkit"; +import { Button, Codicon, Icon } from "@wso2/ui-toolkit"; import EditableDiv from "../EditableDiv/EditableDiv"; import { ThemeColors } from "@wso2/ui-toolkit"; import { VSCodeColors } from "../../../resources/constants"; @@ -139,8 +139,8 @@ const AIAutoFillBox: React.FC = ({ appearance="secondary" tooltip="Auto Fill" onClick={handleGenerateAi}> - diff --git a/workspaces/mi/mi-diagram/src/components/Form/FormExpressionField/index.tsx b/workspaces/mi/mi-diagram/src/components/Form/FormExpressionField/index.tsx index 30002c82ad4..bccdf3a2035 100644 --- a/workspaces/mi/mi-diagram/src/components/Form/FormExpressionField/index.tsx +++ b/workspaces/mi/mi-diagram/src/components/Form/FormExpressionField/index.tsx @@ -29,6 +29,7 @@ import { ErrorBanner, FormExpressionEditor, FormExpressionEditorRef, + Icon, RequiredFormInput, TextField, Typography, @@ -569,8 +570,8 @@ export const FormExpressionField = (params: FormExpressionFieldProps) => { backgroundColor: isAIFill ? Colors.PRIMARY : "transparent", }} > - + + diff --git a/workspaces/mi/mi-extension/resources/icons/light-ai-chat.svg b/workspaces/mi/mi-extension/resources/icons/light-ai-chat.svg new file mode 100644 index 00000000000..667691bd9c4 --- /dev/null +++ b/workspaces/mi/mi-extension/resources/icons/light-ai-chat.svg @@ -0,0 +1,4 @@ + + + diff --git a/workspaces/mi/mi-extension/src/ai-features/agent-mode/agents/data-mapper/system.ts b/workspaces/mi/mi-extension/src/ai-features/agent-mode/agents/data-mapper/system.ts index e6c016da307..fd2091a9c06 100644 --- a/workspaces/mi/mi-extension/src/ai-features/agent-mode/agents/data-mapper/system.ts +++ b/workspaces/mi/mi-extension/src/ai-features/agent-mode/agents/data-mapper/system.ts @@ -16,8 +16,20 @@ * under the License. */ +import { DATA_MAPPER_REFERENCE_SECTIONS } from '../../context/data_mapper_reference'; + /** - * Enhanced system prompt for data mapper sub-agent with dm-utils awareness + * System prompt for data mapper sub-agent. + * + * The dmUtils API surface, TypeScript rules, dynamic-array (TS2556) handling, + * and array patterns live in the shared deep-context reference at + * `../../context/data_mapper_reference.ts`. That same reference is also + * exposed to the main agent via load_context_reference("data-mapper-reference"), + * so the two call sites can't drift. + * + * What stays here: sub-agent-specific generation rules (respect existing + * mappings, include all output fields, output-format constraints, example + * output) and the assistant framing. */ export const DATA_MAPPER_SYSTEM_TEMPLATE = ` You are a specialized data mapping assistant for WSO2 Micro Integrator running inside the VS Code IDE. Your task is to generate TypeScript mapping functions that transform data between input and output schemas. @@ -30,11 +42,7 @@ You will receive a TypeScript file with: - \`OutputRoot\` interface defining the output schema - A \`mapFunction\` to complete: \`function mapFunction(input: InputRoot): OutputRoot\` -**Critical TypeScript Rules:** -- Use explicit return statements in arrow functions: \`map(item => { return {...}; })\` NOT \`map(item => ({...}))\` -- Enclose field names with spaces/special characters in quotes -- Preserve exact field names from schemas -- The file already imports dmUtils as: \`import * as dmUtils from "./dm-utils";\` +${DATA_MAPPER_REFERENCE_SECTIONS.typescript_rules} ### 2. Respect Pre-existing Mappings - **Never overwrite existing mappings** - even if they seem incorrect @@ -57,64 +65,17 @@ You will receive a TypeScript file with: - Transform data structures as needed (arrays to objects, merging fields, etc.) - Handle arrays of objects vs single objects appropriately -### 5. Available Utility Functions (dmUtils) - -You have access to the \`dmUtils\` module with these helper functions. **Use these instead of raw JavaScript operators when appropriate:** - -**Arithmetic Operations:** -- \`dmUtils.sum(num1, ...nums)\` - Sum multiple numbers - Example: \`dmUtils.sum(item.price, item.tax, item.shipping)\` -- \`dmUtils.average(num1, ...nums)\` - Calculate average - Example: \`dmUtils.average(...input.scores)\` -- \`dmUtils.max(num1, ...nums)\` - Find maximum value -- \`dmUtils.min(num1, ...nums)\` - Find minimum value -- \`dmUtils.ceiling(num)\` - Round up to nearest integer -- \`dmUtils.floor(num)\` - Round down to nearest integer -- \`dmUtils.round(num)\` - Round to nearest integer - -**Type Conversions:** -- \`dmUtils.toNumber(str)\` - Convert string to number - Example: \`dmUtils.toNumber(input.quantity)\` -- \`dmUtils.toBoolean(str)\` - Convert string to boolean ("true" → true) -- \`dmUtils.numberToString(num)\` - Convert number to string -- \`dmUtils.booleanToString(bool)\` - Convert boolean to string - -**String Operations:** -- \`dmUtils.concat(str1, ...strs)\` - Concatenate multiple strings - Example: \`dmUtils.concat(input.firstName, " ", input.lastName)\` -- \`dmUtils.split(str, separator)\` - Split string into array - Example: \`dmUtils.split(input.fullName, " ")\` -- \`dmUtils.toUppercase(str)\` - Convert to uppercase -- \`dmUtils.toLowercase(str)\` - Convert to lowercase -- \`dmUtils.stringLength(str)\` - Get string length -- \`dmUtils.startsWith(str, prefix)\` - Check if string starts with prefix -- \`dmUtils.endsWith(str, suffix)\` - Check if string ends with suffix -- \`dmUtils.substring(str, start, end)\` - Extract substring -- \`dmUtils.trim(str)\` - Remove leading/trailing whitespace -- \`dmUtils.replaceFirst(str, target, replacement)\` - Replace first occurrence -- \`dmUtils.match(str, regex)\` - Test if string matches regex pattern - -**When to Use dmUtils:** -- Concatenating strings: Use \`dmUtils.concat()\` instead of \`+\` operator -- Calculating totals/averages: Use \`dmUtils.sum()\` or \`dmUtils.average()\` -- Type conversions: Always use dmUtils conversion functions -- String transformations: Use dmUtils string functions -- **Goal:** Prefer dmUtils for clarity and consistency +### 5. dmUtils, Dynamic Arrays, and TypeScript Pitfalls + +${DATA_MAPPER_REFERENCE_SECTIONS.dmutils_functions} + +${DATA_MAPPER_REFERENCE_SECTIONS.dynamic_arrays} + +${DATA_MAPPER_REFERENCE_SECTIONS.when_to_use_dmutils} ### 6. Array Handling -- When input has array but output expects single object, select appropriate item: - \`input.items[0]\` (first element) or \`input.items.find(...))\` (conditional) -- When output expects array, use \`map()\` with explicit returns -- Example: -\`\`\`typescript -items: input.orders.map(order => { - return { - id: order.orderId, - total: dmUtils.sum(order.subtotal, order.tax), - itemCount: order.items.length - }; -}) -\`\`\` + +${DATA_MAPPER_REFERENCE_SECTIONS.array_handling} ### 7. Output Format Return **only** the complete mapFunction. Do NOT include: @@ -130,7 +91,10 @@ export function mapFunction(input: InputRoot): OutputRoot { orderId: input.id, customerName: dmUtils.concat(input.customer.firstName, " ", input.customer.lastName), email: dmUtils.toLowercase(input.customer.email), - totalAmount: dmUtils.sum(input.subtotal, input.tax, input.shipping), + // Fixed set of fields → dmUtils.sum is correct + subtotal: dmUtils.sum(input.itemsTotal, input.tax, input.shipping), + // Dynamic array aggregation → reduce, NOT dmUtils.sum(...arr) + lineItemsTotal: input.lineItems.reduce((acc, item) => acc + item.lineTotal, 0), itemCount: input.lineItems.length, items: input.lineItems.map(item => { return { @@ -150,6 +114,7 @@ export function mapFunction(input: InputRoot): OutputRoot { ## Key Reminders - Use explicit returns in arrow functions: \`map(x => { return {...}; })\` - Leverage dmUtils for all transformations (string concat, arithmetic, type conversion) +- **Never spread a dynamic array into \`dmUtils.sum/average/max/min\`** — it fails TS2556. Use \`array.reduce(...)\` for array aggregations. - Include all output fields (use defaults for unmappable fields) - Preserve existing mappings (never overwrite) - Follow TypeScript best practices diff --git a/workspaces/mi/mi-extension/src/ai-features/agent-mode/agents/main/agent.ts b/workspaces/mi/mi-extension/src/ai-features/agent-mode/agents/main/agent.ts index f54683c8f94..1d9c3354265 100644 --- a/workspaces/mi/mi-extension/src/ai-features/agent-mode/agents/main/agent.ts +++ b/workspaces/mi/mi-extension/src/ai-features/agent-mode/agents/main/agent.ts @@ -30,9 +30,18 @@ const NATIVE_COMPACTION_TRIGGER_TOKENS = 200000; import { ModelMessage, streamText, stepCountIs, UserModelMessage, SystemModelMessage, wrapLanguageModel } from 'ai'; import { AnthropicProviderOptions } from '@ai-sdk/anthropic'; -import { getAnthropicClient, getAnthropicClientForCustomModel, AnthropicModel, resolveMainModelId } from '../../../connection'; +import { getAnthropicClient, getAnthropicClientForCustomModel, getAnthropicProvider, AnthropicModel, resolveMainModelId } from '../../../connection'; +import { getLoginMethod, getTavilyApiKey } from '../../../auth'; import { getSystemPrompt } from '../main/system'; -import { getUserPrompt, UserPromptParams, UserPromptContentBlock } from './prompt'; +import { + BlockInjectionStatus, + BlockInjectionStatuses, + computeSessionContextBlockHashes, + getUserPrompt, + SessionContextBlockHashes, + UserPromptContentBlock, + UserPromptParams, +} from './prompt'; import { addCacheControlToMessages } from '../../../cache-utils'; import { buildMessageContent } from '../../attachment-utils'; import { COMPACT_SYSTEM_REMINDER_AUTO_TRIGGERED } from '../compact/prompt'; @@ -63,11 +72,12 @@ import { DEEPWIKI_ASK_QUESTION_TOOL_NAME, } from './tools'; import { logInfo, logError, logDebug } from '../../../copilot/logger'; -import { ChatHistoryManager, TOOL_USE_INTERRUPTION_CONTEXT } from '../../chat-history-manager'; +import { ChatHistoryManager, SessionContextBlocksState, TOOL_USE_INTERRUPTION_CONTEXT } from '../../chat-history-manager'; import { getToolAction } from '../../tool-action-mapper'; import { AgentUndoCheckpointManager } from '../../undo/checkpoint-manager'; import { getCopilotSessionDir } from '../../storage-paths'; import { ShellApprovalRuleStore } from '../../tools/types'; +import { WebToolsProvider } from '../../tools/web_tools'; import { awaitWithTimeout, createProxyTerminatedError, @@ -83,7 +93,7 @@ import { } from '../../stream_guard'; // Import types from mi-core (shared with visualizer) -import { AgentEvent, AgentEventType, FileObject, ImageObject, AgentMode, ModelSettings } from '@wso2/mi-core'; +import { AgentEvent, AgentEventType, FileObject, ImageObject, AgentMode, LoginMethod, ModelSettings } from '@wso2/mi-core'; // Re-export types for other modules that import from agent.ts export type { AgentEvent, AgentEventType }; @@ -120,8 +130,6 @@ export interface AgentRequest { images?: ImageObject[]; /** Enable Claude thinking mode (reasoning blocks) */ thinking?: boolean; - /** Skip per-call web approval prompts when true */ - webAccessPreapproved?: boolean; /** Path to the MI project */ projectPath: string; /** Map of file path to content for relevant existing code (optional, for future use) */ @@ -188,6 +196,100 @@ interface NormalizedToolResultForUi { [key: string]: unknown; } +/** + * Compare a per-block tracking value against its persisted predecessor and + * decide what to render. `omit`: same as last turn — skip rendering. + * `first-injection`: never injected before (or full re-prime needed) — render + * without notice. `re-injection`: value drifted — render with a + * "[context updated]" notice. `cleared`: was injected before but is now + * absent (e.g. payloads removed by the user) — render an explicit removal + * notice and clear the persisted hash so future injections start fresh. + */ +function decideBlockStatus( + current: string | undefined, + previous: string | undefined, + forceFirstInjection: boolean, +): BlockInjectionStatus { + if (current === undefined) { + // Was injected on a prior turn but absent now — emit a removal notice + // so the model doesn't keep referencing the stale prior-turn block. + // First-message / post-compaction wipes prior context, so 'omit' there. + if (previous !== undefined && !forceFirstInjection) { + return 'cleared'; + } + return 'omit'; + } + if (forceFirstInjection || previous === undefined) { + return 'first-injection'; + } + return previous === current ? 'omit' : 're-injection'; +} + +/** + * Merge the current per-block hashes into the persisted state, but only for + * blocks we're about to inject this turn. Returns `undefined` when no block + * needs persisting (avoids a no-op metadata write). + */ +function buildUpdatedBlocksState( + previous: SessionContextBlocksState, + current: SessionContextBlockHashes, + statuses: BlockInjectionStatuses, +): SessionContextBlocksState | undefined { + const updated: SessionContextBlocksState = { ...previous }; + let touched = false; + // 'cleared' wipes the persisted hash so the next non-empty injection + // counts as 'first-injection' rather than 're-injection'. In practice + // only payloads can be cleared (other blocks always have a current hash), + // but applying uniformly keeps the semantics consistent. + const apply = ( + key: K, + status: BlockInjectionStatus, + nextHash: SessionContextBlocksState[K] | undefined, + ): void => { + if (status === 'cleared') { + updated[key] = undefined as SessionContextBlocksState[K]; + touched = true; + } else if (status !== 'omit') { + updated[key] = nextHash as SessionContextBlocksState[K]; + touched = true; + } + }; + apply('env', statuses.env, current.env); + apply('connectors', statuses.connectors, current.connectors); + apply('webAvailability', statuses.webAvailability, current.webAvailability); + apply('modePolicy', statuses.modePolicy, current.modePolicy); + apply('payloads', statuses.payloads, current.payloads); + return touched ? updated : undefined; +} + +function logBlockInjectionDrift( + statuses: BlockInjectionStatuses, + previous: SessionContextBlocksState, + current: SessionContextBlockHashes, +): void { + const driftedBlocks: string[] = []; + const note = ( + name: string, + status: BlockInjectionStatus, + prev: string | undefined, + next: string | undefined, + ): void => { + if (status === 're-injection') { + driftedBlocks.push(`${name}(${prev}→${next})`); + } else if (status === 'cleared') { + driftedBlocks.push(`${name}(${prev}→cleared)`); + } + }; + note('env', statuses.env, previous.env, current.env); + note('connectors', statuses.connectors, previous.connectors, current.connectors); + note('webAvailability', statuses.webAvailability, previous.webAvailability, current.webAvailability); + note('mode', statuses.modePolicy, previous.modePolicy, current.modePolicy); + note('payloads', statuses.payloads, previous.payloads, current.payloads); + if (driftedBlocks.length > 0) { + logInfo(`[Agent] Session-context drift — re-injecting: ${driftedBlocks.join(', ')}`); + } +} + const TOOL_INTERRUPTION_ERROR_CODE = 'AGENT_TOOL_INTERRUPTION'; const MODEL_ERROR_PATTERN = /model.*not found|invalid.*model|unknown model|could not resolve model|model.*deprecated|model.*not available|model.*does not exist|model.*decommissioned/i; @@ -426,6 +528,20 @@ export async function executeAgent( // Session directory for output files (build.txt, run.txt) const sessionDir = getCopilotSessionDir(request.projectPath, sessionId); + // Declared outside the try so the catch path can also flush open thinking + // blocks (errors / aborts mid-stream would otherwise leave the UI's + // spinner stuck forever). + const reasoningById = new Map(); + const flushOpenThinkingBlocks = (): void => { + if (reasoningById.size === 0) { + return; + } + for (const id of reasoningById.keys()) { + emitEvent({ type: 'thinking_end', thinkingId: id }); + } + reasoningById.clear(); + }; + try { logInfo(`[Agent] Starting agent execution for project: ${request.projectPath}`); @@ -454,13 +570,71 @@ export async function executeAgent( } } as SystemModelMessage; - // Include env + connector context only on first message or after compaction + // Resolve the web-tool provider once for this turn. + // - Anthropic/Proxy paths get Anthropic's first-party server tools registered + // directly on the main streamText call (no wrapper, no extra LLM round-trip). + // - Bedrock + Tavily key gets the Tavily-backed local tool. + // - Bedrock + no key omits the tools and relies on the `web_search_unavailable` + // system reminder to steer the model away. + const loginMethod = await getLoginMethod(); + const isBedrock = loginMethod === LoginMethod.AWS_BEDROCK; + const tavilyKey = isBedrock ? (await getTavilyApiKey()) : null; + const webSearchUnavailable = isBedrock && !tavilyKey; + const webToolsProvider: WebToolsProvider = + isBedrock ? (tavilyKey ? 'tavily-local' : 'none') : 'anthropic-server'; + const anthropicProviderForWebTools = + webToolsProvider === 'anthropic-server' ? await getAnthropicProvider() : undefined; + + // Per-block re-injection decision. For each tracked block (env, connectors, + // web availability, mode policy, payloads) compute a current hash, compare + // to the value persisted on session metadata, and decide: + // - 'omit' : block is unchanged, skip rendering it + // - 'first-injection' : never injected before (or full re-prime needed), + // render without a "[context updated]" notice + // - 're-injection' : value drifted, render with a notice so the model + // knows something changed + // First message and post-compaction force first-injection on every block — + // model has lost prior context so we re-prime everything without notices. const isFirstMessage = chatHistory.length === 0; const isPostCompaction = chatHistory.length > 0 && (chatHistory[0] as any)?._compactSynthetic === true; - const includeSessionContext = isFirstMessage || isPostCompaction; + const forceFirstInjection = isFirstMessage || isPostCompaction; + + const sessionContextResult = await computeSessionContextBlockHashes({ + projectPath: request.projectPath, + runtimeVersion, + webSearchUnavailable, + loginMethod, + mode: request.mode || 'edit', + }); + const currentBlockHashes = sessionContextResult.hashes; + const sessionMetadata = request.chatHistoryManager + ? await request.chatHistoryManager.loadMetadata() + : null; + const previousBlocks = sessionMetadata?.sessionContextBlocks ?? {}; + + const blockStatuses: BlockInjectionStatuses = { + env: decideBlockStatus(currentBlockHashes.env, previousBlocks.env, forceFirstInjection), + connectors: decideBlockStatus(currentBlockHashes.connectors, previousBlocks.connectors, forceFirstInjection), + webAvailability: decideBlockStatus(currentBlockHashes.webAvailability, previousBlocks.webAvailability, forceFirstInjection), + modePolicy: decideBlockStatus(currentBlockHashes.modePolicy, previousBlocks.modePolicy, forceFirstInjection), + payloads: decideBlockStatus(currentBlockHashes.payloads, previousBlocks.payloads, forceFirstInjection), + }; + const previousMode = previousBlocks.modePolicy as AgentMode | undefined; + + // Persist the new hashes for any block we're about to inject. Eagerly: + // a crash between persist and the request reaching the model means the + // next turn skips injection — same failure mode as the original + // first-message-only behavior, so no regression vs prior behavior. + const updatedBlocks = buildUpdatedBlocksState(previousBlocks, currentBlockHashes, blockStatuses); + if (updatedBlocks && request.chatHistoryManager) { + await request.chatHistoryManager.updateMetadata({ sessionContextBlocks: updatedBlocks }); + logBlockInjectionDrift(blockStatuses, previousBlocks, currentBlockHashes); + } - // Build user prompt + // Build user prompt — pass the pre-built sessionContextResult so + // getUserPrompt skips the second pass of pom.xml read, .git/HEAD read, + // and connector-store catalog lookup. const userPromptParams: UserPromptParams = { query: request.query, mode: request.mode || 'edit', @@ -468,7 +642,11 @@ export async function executeAgent( sessionId, runtimeVersion, runtimeVersionDetected: systemPromptSelection.runtimeVersionDetected, - includeSessionContext, + webSearchUnavailable, + loginMethod, + blockStatuses, + previousMode, + precomputedContext: sessionContextResult, }; const userPromptBlocks = await getUserPrompt(userPromptParams); @@ -551,7 +729,9 @@ export async function executeAgent( pendingQuestions, pendingApprovals, getAnthropicClient, - webAccessPreapproved: request.webAccessPreapproved === true, + webToolsProvider, + anthropicProvider: anthropicProviderForWebTools, + tavilyApiKey: tavilyKey || undefined, shellApprovalRuleStore: request.shellApprovalRuleStore, undoCheckpointManager: request.undoCheckpointManager, abortSignal: streamWatchdog.abortSignal, @@ -627,12 +807,14 @@ export async function executeAgent( }; // Configure Anthropic provider options. - // When thinking is enabled, keep reasoning in model messages for JSONL replay. + // - `adaptive` lets the model decide whether to think per step. + // - `effort: 'low'` biases adaptive toward skipping for simple steps. + // - `display: 'summarized'` is required on Opus 4.7 (default changed to + // 'omitted' there) to actually surface reasoning text to the UI; + // harmless on Sonnet which already defaults to summarized. const anthropicOptions: AnthropicProviderOptions = request.thinking - // NOTE: Current pinned @ai-sdk/anthropic types support enabled/disabled thinking. - // Adaptive thinking can be enabled once the SDK is upgraded in this repo. - ? { thinking: { type: 'adaptive' }, effort: 'low' } - : {}; + ? { thinking: { type: 'adaptive', display: 'summarized' } as any, effort: 'low' } + : {}; // Native server-side compaction: Anthropic auto-summarizes the conversation // when input tokens exceed the trigger threshold. The compaction block is @@ -655,10 +837,14 @@ export async function executeAgent( }; } - // Build beta headers: include compaction beta when native compaction is enabled. + // Bedrock InvokeModel rejects `defer_loading` on `type: "custom"` tools unless the + // tool-search beta is set (the SDK auto-adds it only when a server-side tool_search + // tool is in the tools array, which we don't use). On direct Anthropic the header is + // a no-op for custom-tool defer_loading, so we add it unconditionally on Bedrock. const betaHeaders = [ ...(request.thinking ? ['interleaved-thinking-2025-05-14'] : []), ...(ENABLE_NATIVE_COMPACTION ? ['compact-2026-01-12'] : []), + ...(isBedrock ? ['tool-search-tool-2025-10-19'] : []), ]; const requestHeaders = betaHeaders.length > 0 ? { 'anthropic-beta': betaHeaders.join(',') } @@ -780,8 +966,8 @@ export async function executeAgent( const toolInputMap = new Map(); // Track tool calls that already emitted a pre-input loading state. const preloadedToolCallIds = new Set(); - // Track reasoning text by block ID and emit complete thinking blocks on end. - const reasoningById = new Map(); + // (reasoningById + flushOpenThinkingBlocks declared above the try so + // catch / unexpected-end paths can flush too.) // Track whether the current text block is a native compaction summary. let isCompactionBlock = false; let compactionContent = ''; @@ -1121,6 +1307,10 @@ export async function executeAgent( cleanupStreamLifecycle?.(); const errorMsg = getErrorMessage(part.error); logError(`[Agent] Stream error: ${errorMsg}`); + // Structured diagnostics only — getErrorDiagnostics whitelists/truncates + // the safe provider fields. A raw JSON dump would re-introduce the leak + // surface (requestBodyValues, unsanitized headers, etc.) at any log level. + logError(`[Agent] Stream error diagnostics: ${getErrorDiagnostics(part.error)}`); emitEvent({ type: 'error', error: errorMsg, @@ -1172,6 +1362,8 @@ export async function executeAgent( await flushNativeCompaction(); } + // Close any reasoning blocks Anthropic didn't end explicitly. + flushOpenThinkingBlocks(); cleanupStreamLifecycle?.(); logInfo(`[Agent] Execution finished. Modified files: ${modifiedFiles.length}`); const finishReason = normalizeFinishReason(part); @@ -1204,6 +1396,7 @@ export async function executeAgent( } // Stream completed without finish event (shouldn't happen normally) + flushOpenThinkingBlocks(); cleanupStreamLifecycle?.(); // Capture partial messages if available, but do not block forever waiting for response. try { @@ -1227,6 +1420,7 @@ export async function executeAgent( }; } catch (error: any) { + flushOpenThinkingBlocks(); cleanupStreamLifecycle?.(); const abortReason = streamWatchdog?.getAbortReason(); const classifiedError = classifyAgentExecutionError({ diff --git a/workspaces/mi/mi-extension/src/ai-features/agent-mode/agents/main/mode.ts b/workspaces/mi/mi-extension/src/ai-features/agent-mode/agents/main/mode.ts index 9e1bf36ac42..70ee330790c 100644 --- a/workspaces/mi/mi-extension/src/ai-features/agent-mode/agents/main/mode.ts +++ b/workspaces/mi/mi-extension/src/ai-features/agent-mode/agents/main/mode.ts @@ -30,6 +30,14 @@ const ASK_MODE_POLICY = ` - Provide fully updated code in code blocks (not just edits). The system provides "Add to project" which replaces entire files. - For complex changes, suggest the user switch to EDIT mode.`; +/** + * Short summary of Plan-mode's highest-stakes rules — kept always-rendered + * even when the full policy is gated behind change detection. Mirrors the + * "Critical Rules" from `PLAN_MODE_SHARED_GUIDELINES` so the model can never + * lose sight of the turn-ending rule and the read-only constraint. + */ +const PLAN_MODE_BRIEF_NOTE = `Plan-mode rules (must hold every turn): read-only — mutation tools are blocked except for writing/editing the assigned plan file; every turn MUST end with ${ASK_USER_TOOL_NAME} or ${EXIT_PLAN_MODE_TOOL_NAME}.`; + const EDIT_MODE_POLICY = ` - Use ${TODO_WRITE_TOOL_NAME} to track progress when you have multiple sub-tasks. - For complex tasks, enter PLAN mode with ${ENTER_PLAN_MODE_TOOL_NAME} to plan before implementing. @@ -85,3 +93,16 @@ export async function getModeReminder(params: ModeReminderParams): Promise -# Environment -Working directory: {{env_working_directory}} -Is directory a git repo: {{env_is_git_repo}} -{{#if env_git_branch}}Current git branch: {{env_git_branch}}{{/if}} -Platform: {{env_platform}} -OS Version: {{env_os_version}} -Today's date: {{env_today}} -MI Runtime version: {{env_mi_runtime_version}} -MI Runtime home path: {{env_mi_runtime_home_path}} -MI Runtime log directory: {{env_mi_log_dir_path}} -MI Runtime logs: - - wso2carbon.log (main): {{env_mi_runtime_carbon_log_path}} - - wso2error.log (errors + stack traces): {{env_mi_error_log_path}} - - http_access.log (HTTP requests): {{env_mi_http_access_log_path}} - - wso2-mi-service.log (service lifecycle): {{env_mi_service_log_path}} - - correlation.log (request tracing): {{env_mi_correlation_log_path}} +{{{env_block}}} +{{/if}} +{{#if connectors_block}} -Available WSO2 connector artifact ids for this version of the MI runtime (from the connector store — pass these to get_connector_info / add_or_remove_connector): -{{available_connector_artifact_ids}} - -Available downloadable inbound artifact ids for this version of the MI runtime (from the connector store — add via add_or_remove_connector): -{{available_inbound_artifact_ids}} +{{{connectors_block}}} + +{{/if}} -Available bundled inbound ids (shipped with this version of the MI runtime — use the id directly with get_connector_info, do NOT add to pom.xml): -{{available_bundled_inbound_ids}} +{{#if web_availability_block}} + +{{{web_availability_block}}} {{/if}} @@ -116,11 +103,9 @@ The user has opened the file {{currentlyOpenedFile}} in the IDE. This may or may {{/if}} -{{#if userPreconfigured}} +{{#if payloads_block}} -# Preconfigured Values -{{payloads}} -These are preconfigured values in the Low-Code IDE that should be accessed using Synapse expressions in the integration flow. Always use Synapse expressions when referring to these values. +{{{payloads_block}}} {{/if}} @@ -132,9 +117,16 @@ These are preconfigured values in the Low-Code IDE that should be accessed using {{/if}} -You are in {{mode_upper}} mode. -{{mode_policy}} +You are in {{mode_upper}} mode.{{#if mode_changed_from}} [mode changed from {{mode_changed_from}}]{{/if}}{{#if mode_brief_note}} + +{{mode_brief_note}}{{/if}} + + +{{#if full_mode_policy_block}} + +{{{full_mode_policy_block}}} +{{/if}} {{#if plan_file_reminder}} @@ -162,6 +154,33 @@ You are in {{mode_upper}} mode. // Types // ============================================================================ +/** + * Per-block injection status. Decided in agent.ts based on first-message / + * post-compaction triggers and per-block hash drift, then passed to + * `getUserPrompt` so each block renders the correct content (with or without + * a "[context updated]" notice, or omitted entirely). + * + * `cleared`: the block was injected on a prior turn but is absent now (e.g. + * payloads removed by the user). Render an explicit removal notice so the + * model doesn't keep referencing the stale prior-turn reminder, and clear + * the persisted hash so the next non-empty injection starts fresh. + */ +export type BlockInjectionStatus = 'omit' | 'first-injection' | 're-injection' | 'cleared'; + +/** + * Per-block injection statuses for the five tracked session-context blocks. + * Default (when omitted from `UserPromptParams`) is 'first-injection' for all + * blocks — matches the legacy "always inject session context" behavior. + */ +export interface BlockInjectionStatuses { + env: BlockInjectionStatus; + connectors: BlockInjectionStatus; + webAvailability: BlockInjectionStatus; + /** Plan-only. For Ask/Edit, the full policy is always rendered regardless of this status. */ + modePolicy: BlockInjectionStatus; + payloads: BlockInjectionStatus; +} + /** * Parameters for rendering the user prompt */ @@ -174,14 +193,58 @@ export interface UserPromptParams { projectPath: string; /** Session ID for plan file path generation */ sessionId?: string; - /** Pre-configured payloads, query params, or path params (optional) */ - payloads?: string; /** MI runtime version from pom.xml (optional; avoids re-reading pom when already known) */ runtimeVersion?: string | null; /** True when runtime version was detected from project metadata */ runtimeVersionDetected?: boolean; - /** Include session context (env, connectors) — true for first message or after compaction, false otherwise */ - includeSessionContext?: boolean; + /** True when the active provider can't run web_search/web_fetch (e.g. Bedrock without a Tavily key). */ + webSearchUnavailable?: boolean; + /** + * Active Copilot backend for this session. Surfaced to the model in the + * `` block so it can reason about backend-specific behaviour (notably the + * web tools — Anthropic server-side on Proxy/BYOK, Tavily-local on Bedrock). + */ + loginMethod?: LoginMethod; + /** + * Per-block injection statuses computed by agent.ts from the previous + * `SessionMetadata.sessionContextBlocks` and the current snapshot. When + * omitted, all blocks default to 'first-injection' (full content, no notice). + */ + blockStatuses?: BlockInjectionStatuses; + /** + * Previous mode name used for the "[mode changed from EDIT]" notice when + * `blockStatuses.modePolicy === 're-injection'` and the agent is in Plan + * mode. Ignored otherwise. + */ + previousMode?: AgentMode; + /** + * Pre-built session context from `computeSessionContextBlockHashes`. When + * provided, `getUserPrompt` reuses the same snapshot instead of rebuilding + * it (avoids the second pass of pom.xml read, .git/HEAD read, and + * connector-store catalog lookup). + */ + precomputedContext?: SessionContextBuildResult; +} + +// ============================================================================ +// Backend label mapping +// ============================================================================ + +/** + * One-line summary of each Copilot backend, surfaced in the `` block. + * Keep in sync with the "Copilot backends" section of system.ts. + */ +function describeBackend(loginMethod?: LoginMethod): string { + switch (loginMethod) { + case LoginMethod.MI_INTEL: + return 'WSO2 Integrator Copilot Proxy (SSO via WSO2 Devant) — quota-limited; Anthropic server-side web_search / web_fetch'; + case LoginMethod.ANTHROPIC_KEY: + return 'Anthropic Direct (BYOK) — user-paid; Anthropic server-side web_search / web_fetch'; + case LoginMethod.AWS_BEDROCK: + return 'AWS Bedrock — user-paid; web tools only available when a Tavily API key is configured (Tavily-backed wrapper)'; + default: + return 'unknown'; + } } // ============================================================================ @@ -335,6 +398,412 @@ function getRuntimePaths(projectPath: string): { }; } +// ============================================================================ +// Session Context Snapshot +// ============================================================================ + +/** + * Subset of `UserPromptParams` needed to compute the session-context snapshot + * (env + connectors + web availability + mode + tryout payloads). Used by both + * `getUserPrompt` and `computeSessionContextBlockHashes` so the two stay in + * lockstep. + */ +export interface SessionContextParams { + projectPath: string; + runtimeVersion?: string | null; + webSearchUnavailable?: boolean; + loginMethod?: LoginMethod; + mode?: AgentMode; +} + +/** + * Per-file fingerprint for `.tryout/*.json`. mtimeMs+size is good enough + * for change detection here — the IDE always writes through normal + * `fs.writeFile`, so an unchanged-content file keeps its mtime, and any + * real edit bumps it. Avoids reading every payload's bytes per turn + * (matters when users stash large saved requests). + */ +interface TryoutPayloadEntry { + name: string; + mtimeMs: number; + size: number; +} + +/** + * Captures every value rendered inside any tracked session-context block of + * the user-prompt template. The block-hashing logic in + * `computeSessionContextBlockHashes` derives a per-block hash from a stable + * subset of these fields. + */ +export interface SessionContextSnapshot { + // env block + workingDirectory: string; + isGitRepo: boolean; + gitBranch: string | null; + platform: string; + osVersion: string; + today: string; + backend: string; + miRuntimeVersion: string; + miRuntimeHomePath: string; + miLogDirPath: string; + miCarbonLogPath: string; + miErrorLogPath: string; + miHttpAccessLogPath: string; + miServiceLogPath: string; + miCorrelationLogPath: string; + // connectors block + connectorArtifactIds: string; + inboundArtifactIds: string; + bundledInboundIds: string; + // web availability block + webSearchUnavailable: boolean; + // mode policy block (stored as the verbatim mode name) + mode: AgentMode; + /** + * Listing of `.tryout/*.json` files (sorted by name). The model reads them + * on demand via `file_read` — we surface only the listing in the user + * prompt to avoid dumping (potentially large) saved request bodies on + * every turn. Empty array = no `.tryout/` folder or no payload files. + */ + tryoutPayloads: TryoutPayloadEntry[]; +} + +interface SessionContextWithCatalog { + snapshot: SessionContextSnapshot; + catalogWarnings: string[]; + catalogStoreStatus: string; + runtimeVersionResolved: string | null; +} + +/** + * Per-block hashes / scalars used by agent.ts for change detection. + * `modePolicy` stores the verbatim mode name, not a hash, so the change + * notice can say `[mode changed from EDIT]`. `payloads` is `undefined` when + * no payloads are provided this turn (block is omitted entirely). + */ +export interface SessionContextBlockHashes { + env: string; + connectors: string; + webAvailability: string; + modePolicy: AgentMode; + payloads: string | undefined; +} + +/** + * Recursive stable JSON stringifier — sorts object keys deterministically so + * semantically-equal objects produce identical strings regardless of insertion + * order. Used by `hashJson` so block hashes are reproducible across processes. + */ +function stableStringify(value: unknown): string { + if (value === null || typeof value !== 'object') { + return JSON.stringify(value); + } + if (Array.isArray(value)) { + return '[' + value.map(stableStringify).join(',') + ']'; + } + const obj = value as Record; + const keys = Object.keys(obj).sort(); + return '{' + keys.map(k => JSON.stringify(k) + ':' + stableStringify(obj[k])).join(',') + '}'; +} + +function hashJson(value: unknown): string { + return crypto.createHash('sha256').update(stableStringify(value)).digest('hex').slice(0, 16); +} + +/** + * Scan `.tryout/*.json` and return a sorted listing for change detection. + * mtimeMs+size is enough — IDE writes always bump mtime, and the cost of a + * false-positive re-injection is one extra reminder (negligible) while the + * cost of reading every payload's bytes per turn would be real for users + * who save large request bodies. + */ +function scanTryoutPayloads(projectPath: string): TryoutPayloadEntry[] { + const tryoutDir = path.join(projectPath, '.tryout'); + if (!fs.existsSync(tryoutDir)) { + return []; + } + try { + const files = fs.readdirSync(tryoutDir).filter(f => f.endsWith('.json')); + const result: TryoutPayloadEntry[] = []; + for (const file of files) { + const filePath = path.join(tryoutDir, file); + try { + const stat = fs.statSync(filePath); + if (!stat.isFile()) { + continue; + } + result.push({ name: file, mtimeMs: stat.mtimeMs, size: stat.size }); + } catch { + // Skip unreadable entries silently — model can still file_read by name. + } + } + return result.sort((a, b) => a.name.localeCompare(b.name)); + } catch (error) { + logDebug( + `[Prompt] Failed to scan .tryout/ for project ${projectPath}: ` + + `${error instanceof Error ? error.message : String(error)}` + ); + return []; + } +} + +async function buildSessionContextSnapshot(params: SessionContextParams): Promise { + const isGitRepo = fs.existsSync(path.join(params.projectPath, '.git')); + let gitBranch: string | null = null; + if (isGitRepo) { + try { + const headPath = path.join(params.projectPath, '.git', 'HEAD'); + const headContent = fs.readFileSync(headPath, 'utf8').trim(); + if (headContent.startsWith('ref: refs/heads/')) { + gitBranch = headContent.replace('ref: refs/heads/', ''); + } else if (/^[0-9a-f]{40}$/i.test(headContent)) { + gitBranch = `DETACHED@${headContent.substring(0, 7)}`; + } + } catch (error) { + logDebug( + `[Prompt] Failed to resolve git branch from HEAD for project ${params.projectPath}: ` + + `${error instanceof Error ? error.message : String(error)}` + ); + } + } + + const today = new Date().toISOString().split('T')[0]; + const runtimeVersion = params.runtimeVersion ?? await getRuntimeVersionFromPom(params.projectPath); + const runtimePaths = getRuntimePaths(params.projectPath); + const catalog = await getAvailableConnectorCatalog(params.projectPath); + + return { + snapshot: { + workingDirectory: params.projectPath, + isGitRepo, + gitBranch, + platform: process.platform, + osVersion: `${os.type()} ${os.release()}`, + today, + backend: describeBackend(params.loginMethod), + miRuntimeVersion: runtimeVersion || 'unknown', + miRuntimeHomePath: runtimePaths.runtimeHomePath, + miLogDirPath: runtimePaths.logDirPath, + miCarbonLogPath: runtimePaths.carbonLogPath, + miErrorLogPath: runtimePaths.errorLogPath, + miHttpAccessLogPath: runtimePaths.httpAccessLogPath, + miServiceLogPath: runtimePaths.serviceLogPath, + miCorrelationLogPath: runtimePaths.correlationLogPath, + connectorArtifactIds: catalog.connectorArtifactIds.join(', '), + inboundArtifactIds: catalog.inboundArtifactIds.join(', '), + bundledInboundIds: catalog.bundledInboundIds.join(', '), + webSearchUnavailable: params.webSearchUnavailable === true, + mode: params.mode || 'edit', + tryoutPayloads: scanTryoutPayloads(params.projectPath), + }, + catalogWarnings: catalog.warnings, + catalogStoreStatus: catalog.storeStatus, + runtimeVersionResolved: runtimeVersion ?? null, + }; +} + +/** + * Per-block tracking values derived from a snapshot. agent.ts compares each + * value against the previous value stored on `SessionMetadata.sessionContextBlocks` + * to decide which blocks to re-inject this turn. + */ +function deriveBlockHashes(snapshot: SessionContextSnapshot): SessionContextBlockHashes { + return { + env: hashJson({ + workingDirectory: snapshot.workingDirectory, + isGitRepo: snapshot.isGitRepo, + gitBranch: snapshot.gitBranch, + platform: snapshot.platform, + osVersion: snapshot.osVersion, + today: snapshot.today, + backend: snapshot.backend, + miRuntimeVersion: snapshot.miRuntimeVersion, + miRuntimeHomePath: snapshot.miRuntimeHomePath, + miLogDirPath: snapshot.miLogDirPath, + miCarbonLogPath: snapshot.miCarbonLogPath, + miErrorLogPath: snapshot.miErrorLogPath, + miHttpAccessLogPath: snapshot.miHttpAccessLogPath, + miServiceLogPath: snapshot.miServiceLogPath, + miCorrelationLogPath: snapshot.miCorrelationLogPath, + }), + connectors: hashJson({ + connectorArtifactIds: snapshot.connectorArtifactIds, + inboundArtifactIds: snapshot.inboundArtifactIds, + bundledInboundIds: snapshot.bundledInboundIds, + }), + webAvailability: hashJson({ webSearchUnavailable: snapshot.webSearchUnavailable }), + modePolicy: snapshot.mode, + // Hash over the file listing — adding/removing/modifying any + // .tryout/*.json flips the block hash and triggers a re-injection. + // `undefined` when the folder is empty so 'cleared' fires correctly + // when the user wipes all saved payloads. + payloads: snapshot.tryoutPayloads.length > 0 ? hashJson(snapshot.tryoutPayloads) : undefined, + }; +} + +/** + * Bundles the per-block hashes (used by agent.ts for change detection) with + * the snapshot + catalog metadata they were derived from. Letting agent.ts + * pass this back into `getUserPrompt` via `precomputedContext` avoids + * `buildSessionContextSnapshot` running twice per turn (it touches .git/HEAD, + * pom.xml, runtime paths, and the connector store catalog). + */ +export interface SessionContextBuildResult { + hashes: SessionContextBlockHashes; + snapshot: SessionContextSnapshot; + catalogWarnings: string[]; + catalogStoreStatus: string; + runtimeVersionResolved: string | null; +} + +/** + * Build the per-turn session context: snapshot + per-block hashes + catalog + * metadata. Agent.ts uses `result.hashes` for the block-status decisions and + * passes the whole result back through `UserPromptParams.precomputedContext` + * so `getUserPrompt` reuses the same snapshot. + */ +export async function computeSessionContextBlockHashes(params: SessionContextParams): Promise { + const built = await buildSessionContextSnapshot(params); + return { + hashes: deriveBlockHashes(built.snapshot), + snapshot: built.snapshot, + catalogWarnings: built.catalogWarnings, + catalogStoreStatus: built.catalogStoreStatus, + runtimeVersionResolved: built.runtimeVersionResolved, + }; +} + +// ============================================================================ +// Tracked-block text builders +// ============================================================================ + +const CONTEXT_UPDATED = '[context updated]'; + +const DEFAULT_BLOCK_STATUSES: BlockInjectionStatuses = { + env: 'first-injection', + connectors: 'first-injection', + webAvailability: 'first-injection', + modePolicy: 'first-injection', + payloads: 'first-injection', +}; + +function buildEnvBlockText(snapshot: SessionContextSnapshot, status: BlockInjectionStatus): string | undefined { + // 'cleared' is unreachable for env (always-defined hash) but treat it as + // omit defensively so adding new statuses can't silently render junk. + if (status === 'omit' || status === 'cleared') { + return undefined; + } + const headerSuffix = status === 're-injection' ? ` ${CONTEXT_UPDATED}` : ''; + const lines: string[] = [ + `# Environment${headerSuffix}`, + `Working directory: ${snapshot.workingDirectory}`, + `Is directory a git repo: ${snapshot.isGitRepo ? 'true' : 'false'}`, + ]; + if (snapshot.gitBranch) { + lines.push(`Current git branch: ${snapshot.gitBranch}`); + } + lines.push( + `Platform: ${snapshot.platform}`, + `OS Version: ${snapshot.osVersion}`, + `Today's date: ${snapshot.today}`, + `Copilot backend: ${snapshot.backend}`, + `MI Runtime version: ${snapshot.miRuntimeVersion}`, + `MI Runtime home path: ${snapshot.miRuntimeHomePath}`, + `MI Runtime log directory: ${snapshot.miLogDirPath}`, + `MI Runtime logs:`, + ` - wso2carbon.log (main): ${snapshot.miCarbonLogPath}`, + ` - wso2error.log (errors + stack traces): ${snapshot.miErrorLogPath}`, + ` - http_access.log (HTTP requests): ${snapshot.miHttpAccessLogPath}`, + ` - wso2-mi-service.log (service lifecycle): ${snapshot.miServiceLogPath}`, + ` - correlation.log (request tracing): ${snapshot.miCorrelationLogPath}`, + ); + return lines.join('\n'); +} + +function buildConnectorsBlockText(snapshot: SessionContextSnapshot, status: BlockInjectionStatus): string | undefined { + if (status === 'omit' || status === 'cleared') { + return undefined; + } + const prefix = status === 're-injection' ? `${CONTEXT_UPDATED}\n` : ''; + return `${prefix}Available WSO2 connector artifact ids for this version of the MI runtime (from the connector store — pass these to get_connector_info / add_or_remove_connector): +${snapshot.connectorArtifactIds} + +Available downloadable inbound artifact ids for this version of the MI runtime (from the connector store — add via add_or_remove_connector): +${snapshot.inboundArtifactIds} + +Available bundled inbound ids (shipped with this version of the MI runtime — use the id directly with get_connector_info, do NOT add to pom.xml): +${snapshot.bundledInboundIds}`; +} + +/** + * Web-availability block: only renders the "not available" warning when + * Bedrock has no Tavily key. When the flag flips to "available", the block + * disappears — the model sees the prior turn's warning fade out of relevance, + * and the underlying tools start succeeding. Asymmetric but matches today's + * semantics; the `[context updated]` notice fires when the warning re-appears. + */ +function buildWebAvailabilityBlockText(snapshot: SessionContextSnapshot, status: BlockInjectionStatus): string | undefined { + if (status === 'omit' || status === 'cleared') { + return undefined; + } + if (!snapshot.webSearchUnavailable) { + return undefined; + } + const prefix = status === 're-injection' ? `${CONTEXT_UPDATED}\n` : ''; + return `${prefix}Web search is not available in this environment because no Tavily API key is configured (AWS Bedrock has no first-party web tools). Do NOT call web_search or web_fetch — they will fail with WEB_SEARCH_NOT_CONFIGURED / WEB_FETCH_NOT_CONFIGURED. Override the system prompt's research-priority guidance: skip step (3) entirely. If the user asks for external/web information, tell them to add a Tavily API key in the AI Panel settings (Web Search section) to enable it. For Synapse/MI internals continue to use load_context_reference and deepwiki_ask_question as the system prompt instructs.`; +} + +/** + * Full mode-policy block. For Ask/Edit, always renders (status is ignored — + * their policies are short, gating saves nothing). For Plan, gated by status: + * 'omit' skips entirely (relies on the always-rendered brief Plan note for + * the highest-stakes rules). + * + * The "[mode changed from PREV]" notice is rendered separately on the mode- + * header line by the template — works for every mode transition, not just + * entering Plan, so the model always knows when the mode flipped. + */ +function buildFullModePolicyBlockText( + mode: AgentMode, + fullPolicy: string, + status: BlockInjectionStatus, +): string | undefined { + if (mode !== 'plan') { + return fullPolicy.trim(); + } + if (status === 'omit' || status === 'cleared') { + return undefined; + } + return fullPolicy.trim(); +} + +/** + * Render the tryout payloads block. We surface only a *listing* of + * `.tryout/*.json` files (not their contents) — the model reads the relevant + * file on demand via `file_read`. See system.ts "Tryout payloads" section + * for file-format details and read-on-demand guidance. + */ +function buildPayloadsBlockText( + files: TryoutPayloadEntry[], + status: BlockInjectionStatus, +): string | undefined { + if (status === 'cleared') { + // Listing was non-empty on a prior turn but `.tryout/` is now empty — + // model still has the stale listing in context. + return `# Tryout payloads [removed] +The .tryout/ folder no longer contains saved sample request payloads. Discard any prior payload references and do not read .tryout/*.json until new ones are saved.`; + } + if (status === 'omit' || files.length === 0) { + return undefined; + } + const headerSuffix = status === 're-injection' ? ` ${CONTEXT_UPDATED}` : ''; + const list = files.map(f => ` - .tryout/${f.name}`).join('\n'); + return `# Tryout payloads${headerSuffix} +The user has saved sample request payloads in .tryout/ (one file per artifact). Use file_read on the relevant file ONLY when you need to reason about runtime inputs (expression mapping, body shape, query/path params, field names) — do not read otherwise. See the system prompt's "Tryout payloads" section for the file format and how to pick the default request. +${list}`; +} + // ============================================================================ // User Prompt Generation // ============================================================================ @@ -361,77 +830,66 @@ export async function getUserPrompt(params: UserPromptParams): Promise 0 - ? `Connector store status: ${connectorCatalog.storeStatus}. ${connectorCatalog.warnings.join(' ')}` + const connectorStoreReminder = sessionContext.catalogWarnings.length > 0 + ? `Connector store status: ${sessionContext.catalogStoreStatus}. ${sessionContext.catalogWarnings.join(' ')}` : ''; - // Prepare template context - const isGitRepo = fs.existsSync(path.join(params.projectPath, '.git')); - let gitBranch: string | null = null; - if (isGitRepo) { - try { - const headPath = path.join(params.projectPath, '.git', 'HEAD'); - const headContent = fs.readFileSync(headPath, 'utf8').trim(); - if (headContent.startsWith('ref: refs/heads/')) { - gitBranch = headContent.replace('ref: refs/heads/', ''); - } else if (/^[0-9a-f]{40}$/i.test(headContent)) { - gitBranch = `DETACHED@${headContent.substring(0, 7)}`; - } - } catch (error) { - logDebug( - `[Prompt] Failed to resolve git branch from HEAD for project ${params.projectPath}: ` + - `${error instanceof Error ? error.message : String(error)}` - ); - } - } - const today = new Date().toISOString().split('T')[0]; - const runtimeVersion = params.runtimeVersion ?? await getRuntimeVersionFromPom(params.projectPath); - const runtimeVersionDetected = params.runtimeVersionDetected ?? !!runtimeVersion; + const runtimeVersionDetected = params.runtimeVersionDetected ?? !!sessionContext.runtimeVersionResolved; const runtimeVersionDetectionWarning = runtimeVersionDetected ? '' : 'MI runtime version could not be detected. Code examples use modern syntax (MI >= 4.4.0). If your project uses an older MI runtime, specify it explicitly.'; - const runtimePaths = getRuntimePaths(params.projectPath); + + const blockStatuses = params.blockStatuses ?? DEFAULT_BLOCK_STATUSES; + + const envBlock = buildEnvBlockText(snapshot, blockStatuses.env); + const connectorsBlock = buildConnectorsBlockText(snapshot, blockStatuses.connectors); + const webAvailabilityBlock = buildWebAvailabilityBlockText(snapshot, blockStatuses.webAvailability); + const payloadsBlock = buildPayloadsBlockText(snapshot.tryoutPayloads, blockStatuses.payloads); + const fullModePolicyBlock = buildFullModePolicyBlockText( + mode, + fullModePolicy, + blockStatuses.modePolicy, + ); + // Render "[mode changed from PREV]" inline on the mode-header line for any + // mode transition (not just entering Plan), so the model always sees a + // diff when the active mode flipped — even for Ask/Edit where the full + // policy isn't gated. + const modeChangedFrom = blockStatuses.modePolicy === 're-injection' && params.previousMode + ? params.previousMode.toUpperCase() + : undefined; + const context: Record = { question: params.query, fileList: fileList, currentlyOpenedFile: currentlyOpenedFile, // Currently editing file (optional) - userPreconfigured: params.payloads, // Pre-configured payloads (optional) - payloads: params.payloads, // Backward-compatible template key - include_session_context: params.includeSessionContext ?? true, - available_connector_artifact_ids: availableConnectorArtifactIds.join(', '), - available_inbound_artifact_ids: availableInboundArtifactIds.join(', '), - available_bundled_inbound_ids: availableBundledInboundIds.join(', '), - env_working_directory: params.projectPath, - env_is_git_repo: isGitRepo ? 'true' : 'false', - env_git_branch: gitBranch, - env_platform: process.platform, - env_os_version: `${os.type()} ${os.release()}`, - env_today: today, - env_mi_runtime_version: runtimeVersion || 'unknown', - env_mi_runtime_home_path: runtimePaths.runtimeHomePath, - env_mi_log_dir_path: runtimePaths.logDirPath, - env_mi_runtime_carbon_log_path: runtimePaths.carbonLogPath, - env_mi_error_log_path: runtimePaths.errorLogPath, - env_mi_http_access_log_path: runtimePaths.httpAccessLogPath, - env_mi_service_log_path: runtimePaths.serviceLogPath, - env_mi_correlation_log_path: runtimePaths.correlationLogPath, + env_block: envBlock, + connectors_block: connectorsBlock, + web_availability_block: webAvailabilityBlock, + payloads_block: payloadsBlock, + full_mode_policy_block: fullModePolicyBlock, runtime_version_detection_warning: runtimeVersionDetectionWarning, mode_upper: mode.toUpperCase(), - mode_policy: modePolicyReminder, + mode_brief_note: modeBriefNote, + mode_changed_from: modeChangedFrom, plan_file_reminder: planFileReminder, connector_store_reminder: connectorStoreReminder, }; diff --git a/workspaces/mi/mi-extension/src/ai-features/agent-mode/agents/main/system.ts b/workspaces/mi/mi-extension/src/ai-features/agent-mode/agents/main/system.ts index 74ae58b8964..e242fb486dc 100644 --- a/workspaces/mi/mi-extension/src/ai-features/agent-mode/agents/main/system.ts +++ b/workspaces/mi/mi-extension/src/ai-features/agent-mode/agents/main/system.ts @@ -24,6 +24,7 @@ import { MANAGE_CONNECTOR_TOOL_NAME, VALIDATE_CODE_TOOL_NAME, CREATE_DATA_MAPPER_TOOL_NAME, + GENERATE_DATA_MAPPING_TOOL_NAME, SUBAGENT_TOOL_NAME, ASK_USER_TOOL_NAME, ENTER_PLAN_MODE_TOOL_NAME, @@ -57,7 +58,10 @@ You are WSO2 Integrator Copilot, an expert AI agent embedded in the VSCode-based You help developers design, build, edit, and debug WSO2 Synapse integrations using the tools provided. # Thinking behavior -Extended thinking ( if enabled ) adds latency and should only be used when it will meaningfully improve answer quality — typically for problems that require multi-step reasoning. When in doubt, respond directly. More importantly "Do not Overthink". +- Adaptive thinking is on by default (low effort) and adds latency on every turn it fires. The most common failure is trying to reason through every Synapse detail upfront — Synapse has runtime quirks and connector behaviours not visible from source/docs alone, so long pre-flight thinking on Synapse problems is wasted time and frustrates the user. +- Correct loop: build a **rough** mental model → implement → refine using the feedback signals available (inline LS diagnostics, server logs, reference lookups, deepwiki). When a signal is one tool call away, don't think instead of fetching it. Same applies to debugging — don't enumerate every possible cause in your head; get one signal first, then narrow. +- Use thinking for closed-form reasoning that doesn't depend on Synapse-specific knowledge (data-mapper TypeScript logic, control-flow design, synthesizing prior tool output). Skip it for "what is the right Synapse XML / mediator / expression / connector op for X" — that's answered by tools. +- Treat any Synapse-specific conclusion you reach by thinking as a **hypothesis, not a fact**, regardless of how confident you feel. Verify via ${CONTEXT_TOOL_NAME} or ${DEEPWIKI_ASK_QUESTION_TOOL_NAME} before writing — your training data on Synapse is incomplete and often wrong, and thinking does not produce new knowledge. Thinking helps you plan WHAT to look up, not skip the lookup. # Tone and style - Only use emojis if the user explicitly requests it. @@ -138,6 +142,11 @@ ${Object.entries(DEFERRED_TOOL_DESCRIPTIONS).map(([name, desc]) => `- ${name}: $ ## Background tasks - Background tasks from ${BASH_TOOL_NAME} and ${SUBAGENT_TOOL_NAME} share the same task_id workflow: ${TASK_OUTPUT_TOOL_NAME} to check output, ${KILL_TASK_TOOL_NAME} to terminate. +## Tryout payloads (\`.tryout/*.json\`) +- User-saved sample requests, one file per artifact. Per-turn user reminder lists which exist — do not pre-load. +- Read on demand only when reasoning about runtime inputs (body/header/query/path field names, expression mapping). Otherwise ignore. +- Format: APIs nest requests under \`"/"\` keys; other artifacts are flat. Pick the request whose \`name\` equals \`defaultRequest\`. + ## Connectors and inbound endpoints (${CONNECTOR_TOOL_NAME}, ${MANAGE_CONNECTOR_TOOL_NAME}) - Workflow: mode='summary' to learn operations / init style → mode='details' for the specific ops/connections you will actually use → write XML → ${MANAGE_CONNECTOR_TOOL_NAME} to add the artifact to the project. - Bundled inbound ids (http, jms, ...) skip ${MANAGE_CONNECTOR_TOOL_NAME} — reference them straight from Synapse XML. @@ -147,7 +156,7 @@ ${Object.entries(DEFERRED_TOOL_DESCRIPTIONS).map(([name, desc]) => `- ${name}: $ ## Web tools - ${WEB_SEARCH_TOOL_NAME}: external research. Prefer MI docs (allowed_domains=["mi.docs.wso2.com"]), also use GitHub issues, Stack Overflow when useful. -- ${WEB_FETCH_TOOL_NAME}: fetch URL content (not JS-rendered sites; MI docs is JS-rendered, so use ${WEB_SEARCH_TOOL_NAME} for those). Both require user approval. +- ${WEB_FETCH_TOOL_NAME}: fetch URL content (not JS-rendered sites; MI docs is JS-rendered, so use ${WEB_SEARCH_TOOL_NAME} for those). ## DeepWiki by Cognition.ai/Devin (${DEEPWIKI_ASK_QUESTION_TOOL_NAME}) - DeepWiki (deepwiki.com) indexes GitHub repos and provides AI-powered Q&A grounded in source code. Use for MI/Synapse internals, source-level behavior, and implementation details not covered by built-in context. @@ -155,6 +164,14 @@ ${Object.entries(DEFERRED_TOOL_DESCRIPTIONS).map(([name, desc]) => `- ${name}: $ - **Connector repos**: Under \`wso2-extensions/\` org. Use the \`repoName\` field from ${CONNECTOR_TOOL_NAME} output (e.g., \`wso2-extensions/mi-connector-redis\`, \`wso2-extensions/esb-connector-amazons3\`). - Query multiple repos at once by passing an array. Ask specific technical questions, not vague ones. +# Copilot backends +You can run on three different authentication backends. The active one for this session is in \`\` under "Copilot backend". The only practical difference you should reason about is the web tools: +- **WSO2 Integrator Copilot Proxy (MI_INTEL, SSO via WSO2 Devant)** — quota-limited free tier. ${WEB_SEARCH_TOOL_NAME} / ${WEB_FETCH_TOOL_NAME} are Anthropic's first-party server tools (live citations, no extra round-trip). +- **Anthropic Direct (ANTHROPIC_KEY, BYOK)** — user pays Anthropic directly. Same Anthropic server-side ${WEB_SEARCH_TOOL_NAME} / ${WEB_FETCH_TOOL_NAME} as Proxy. +- **AWS Bedrock (AWS_BEDROCK)** — user pays AWS. Bedrock has no first-party web tools, so ${WEB_SEARCH_TOOL_NAME} / ${WEB_FETCH_TOOL_NAME} are a Tavily-backed wrapper *only when a Tavily API key is configured*. Without a key the tools fail with WEB_SEARCH_NOT_CONFIGURED / WEB_FETCH_NOT_CONFIGURED — a \`\` will tell you when this is the case. + +Other tools (file ops, connectors, LSP, build/deploy, server management, deepwiki, shell) behave identically across all three backends — do NOT branch behaviour on the backend for anything other than web tools. + # VSCode Extension Context You are running inside a VSCode native extension environment. @@ -177,7 +194,7 @@ The user's IDE selection (if any) is included in the conversation context and ma - If a missing detail can change architecture, security, or external dependencies, ask via ${ASK_USER_TOOL_NAME}. Otherwise, make minimal assumptions and state them briefly. ## Design Guidelines -- Plan before implementing: identify required artifacts (APIs, sequences, endpoints, etc.) and connectors/mediators. +- Sketch the artifact list (APIs, sequences, endpoints, connectors/mediators) before writing — enough to know what you'll create, not a full design. Refine as you implement, per the loop in "Thinking behavior". ## Context Guidelines - Always read a file before editing it. Do not propose changes to files that you haven't seen. @@ -282,6 +299,7 @@ Proactively load reference contexts when you need deeper knowledge beyond ; /** Function to get Anthropic client for task tool */ getAnthropicClient: (model: AnthropicModel) => Promise; - /** Skip per-call web approval prompts for this run */ - webAccessPreapproved: boolean; + /** + * Which web-tool implementation to register. + * - `anthropic-server`: native Anthropic `web_search` / `web_fetch` server tools (MI_INTEL Proxy + ANTHROPIC_KEY) + * - `tavily-local`: Tavily-backed local tools (AWS Bedrock with a Tavily key) + * - `none`: stubbed tools that return WEB_SEARCH/FETCH_NOT_CONFIGURED (Bedrock without a Tavily key) + */ + webToolsProvider: WebToolsProvider; + /** Required when webToolsProvider === 'anthropic-server'. Resolved upstream in executeAgent. */ + anthropicProvider?: AnthropicProvider; + /** Required when webToolsProvider === 'tavily-local'. */ + tavilyApiKey?: string; /** Session-scoped shell approval rule store */ shellApprovalRuleStore?: ShellApprovalRuleStore; /** Optional undo checkpoint manager for capturing pre-change states */ @@ -561,17 +573,15 @@ export function createAgentTools(params: CreateToolsParams) { pendingQuestions, pendingApprovals, getAnthropicClient, - webAccessPreapproved, + webToolsProvider, + anthropicProvider, + tavilyApiKey, shellApprovalRuleStore, undoCheckpointManager, abortSignal, modelSettings, } = params; - // Resolve the main model ID for tools that need it (web search/fetch) - const mainModelId = modelSettings ? resolveMainModelId(modelSettings) : undefined; - const mainModelIsCustom = !!modelSettings?.mainModelCustomId; - const getWrappedExecute = Promise>( toolName: string, execute: T, @@ -591,6 +601,37 @@ export function createAgentTools(params: CreateToolsParams) { // Shared set tracking files read in this session (for write tool's read-before-write guard) const readFiles = new Set(); + const buildWebTools = (): Record => { + if (webToolsProvider === 'anthropic-server') { + if (!anthropicProvider) { + throw new Error("createAgentTools: webToolsProvider='anthropic-server' requires anthropicProvider."); + } + return createAnthropicServerWebTools(anthropicProvider); + } + if (webToolsProvider === 'tavily-local' && tavilyApiKey) { + return { + [WEB_SEARCH_TOOL_NAME]: createWebSearchTool( + getWrappedExecute(WEB_SEARCH_TOOL_NAME, createWebSearchExecute(tavilyApiKey)) + ), + [WEB_FETCH_TOOL_NAME]: createWebFetchTool( + getWrappedExecute(WEB_FETCH_TOOL_NAME, createWebFetchExecute(tavilyApiKey)) + ), + }; + } + // 'none' (or the unreachable tavily-local-without-key fallback): register stubs + // that surface a clear NOT_CONFIGURED error if the model ignores the + // `web_search_unavailable` system reminder and calls them anyway. + const notConfigured = (kind: 'search' | 'fetch') => async (): Promise => ({ + success: false, + message: `Web ${kind} is not configured. Add a Tavily API key in the AI Panel settings (Web Search section) to enable it on AWS Bedrock.`, + error: kind === 'search' ? 'WEB_SEARCH_NOT_CONFIGURED' : 'WEB_FETCH_NOT_CONFIGURED', + }); + return { + [WEB_SEARCH_TOOL_NAME]: createWebSearchTool(getWrappedExecute(WEB_SEARCH_TOOL_NAME, notConfigured('search'))), + [WEB_FETCH_TOOL_NAME]: createWebFetchTool(getWrappedExecute(WEB_FETCH_TOOL_NAME, notConfigured('fetch'))), + }; + }; + const allTools = { // File Operations (6 tools) [FILE_WRITE_TOOL_NAME]: createWriteTool( @@ -667,31 +708,8 @@ export function createAgentTools(params: CreateToolsParams) { getWrappedExecute(TODO_WRITE_TOOL_NAME, createTodoWriteExecute(eventHandler)) ), - // Web Tools (2 tools) - [WEB_SEARCH_TOOL_NAME]: createWebSearchTool( - getWrappedExecute(WEB_SEARCH_TOOL_NAME, createWebSearchExecute( - getAnthropicClient, - eventHandler, - pendingApprovals, - webAccessPreapproved, - sessionId, - mainModelId, - mainModelIsCustom, - abortSignal - )) - ), - [WEB_FETCH_TOOL_NAME]: createWebFetchTool( - getWrappedExecute(WEB_FETCH_TOOL_NAME, createWebFetchExecute( - getAnthropicClient, - eventHandler, - pendingApprovals, - webAccessPreapproved, - sessionId, - mainModelId, - mainModelIsCustom, - abortSignal - )) - ), + // Web Tools (2 tools) — branched by webToolsProvider, see CreateToolsParams + ...buildWebTools(), [DEEPWIKI_ASK_QUESTION_TOOL_NAME]: createDeepWikiTool( getWrappedExecute(DEEPWIKI_ASK_QUESTION_TOOL_NAME, createDeepWikiExecute(abortSignal)) ), diff --git a/workspaces/mi/mi-extension/src/ai-features/agent-mode/chat-history-manager.ts b/workspaces/mi/mi-extension/src/ai-features/agent-mode/chat-history-manager.ts index 8826b534e73..8512df3915d 100644 --- a/workspaces/mi/mi-extension/src/ai-features/agent-mode/chat-history-manager.ts +++ b/workspaces/mi/mi-extension/src/ai-features/agent-mode/chat-history-manager.ts @@ -24,6 +24,7 @@ import { v4 as uuidv4 } from 'uuid'; import { logDebug, logError, logInfo } from '../copilot/logger'; import { getToolAction, capitalizeAction } from './tool-action-mapper'; import { BASH_TOOL_NAME } from './tools/types'; +import { stripAnsiAndControl } from '../utils/sanitize-text'; import { AgentMode, CheckpointAnchorSummary, @@ -54,6 +55,29 @@ export interface SessionMetadata { * Used to skip loading unsupported sessions after breaking storage changes. */ sessionVersion?: number; + /** + * Per-block tracking state for the user-prompt session-context blocks. + * The agent re-injects only the blocks whose stored value drifts since + * the last turn. Persisted so the check survives extension restarts. + */ + sessionContextBlocks?: SessionContextBlocksState; +} + +/** + * Tracking state for each session-context block. Absent fields mean "block + * has never been injected" (treated as a first injection on the next turn). + * + * Re-exported (via duplicate definition) from `@wso2/mi-core` to avoid a + * dev-loop rebuild dependency on mi-core when this type changes — same pattern + * as `SessionMetadata` above. + */ +export interface SessionContextBlocksState { + env?: string; + connectors?: string; + webAvailability?: string; + /** Verbatim mode name (`"ask" | "edit" | "plan"`) for "[mode changed from EDIT]" notices. */ + modePolicy?: string; + payloads?: string; } export const TOOL_USE_INTERRUPTION_CONTEXT = `The user interrupted while a tool was running. The tool use was rejected and any pending mutations were NOT applied. Stop immediately and wait for the user's next message.`; @@ -913,6 +937,46 @@ export class ChatHistoryManager { includeUndoCheckpointEntry?: boolean; includeCheckpointAnchorEntry?: boolean; }): Promise { + // Sanitize tool-result / text content blocks on load. Older sessions + // may have persisted raw control bytes (e.g. ANSI codes from a Maven + // `build.txt` read) that pass JSON.parse fine but trip the Copilot + // proxy's stricter validator on resend. Walking the message structure + // is cheap (each session has at most a few hundred entries) and lets + // existing sessions keep working without a manual edit. + const sanitizeContentBlock = (block: any): any => { + if (!block || typeof block !== 'object') { + return block; + } + if (block.type === 'text' && typeof block.text === 'string') { + return { ...block, text: stripAnsiAndControl(block.text) }; + } + if (block.type === 'tool-result' && block.output && typeof block.output === 'object') { + const out = block.output; + if (typeof out.value === 'string') { + return { ...block, output: { ...out, value: stripAnsiAndControl(out.value) } }; + } + if (Array.isArray(out.value)) { + return { + ...block, + output: { + ...out, + value: out.value.map((v: any) => + v && typeof v === 'object' && typeof v.text === 'string' + ? { ...v, text: stripAnsiAndControl(v.text) } + : v + ), + }, + }; + } + } + return block; + }; + const sanitizeMessage = (message: any): any => { + if (!message || typeof message !== 'object' || !Array.isArray(message.content)) { + return message; + } + return { ...message, content: message.content.map(sanitizeContentBlock) }; + }; try { const includeCompactSummaryEntry = options?.includeCompactSummaryEntry === true; const includeUndoCheckpointEntry = options?.includeUndoCheckpointEntry === true; @@ -955,7 +1019,7 @@ export class ChatHistoryManager { for (let i = lastCompactIndex + 1; i < allEntries.length; i++) { const entry = allEntries[i]; if (entry.type === 'user' || entry.type === 'assistant' || entry.type === 'tool') { - let modelMessage = entry.message; + let modelMessage = sanitizeMessage(entry.message); if (entry.chatId !== undefined && modelMessage && typeof modelMessage === 'object') { modelMessage = { ...modelMessage, @@ -984,7 +1048,7 @@ export class ChatHistoryManager { const messages: any[] = []; for (const entry of allEntries) { if (entry.type === 'user' || entry.type === 'assistant' || entry.type === 'tool') { - let modelMessage = entry.message; + let modelMessage = sanitizeMessage(entry.message); if (entry.chatId !== undefined && modelMessage && typeof modelMessage === 'object') { modelMessage = { ...modelMessage, diff --git a/workspaces/mi/mi-extension/src/ai-features/agent-mode/context/data_mapper_reference.ts b/workspaces/mi/mi-extension/src/ai-features/agent-mode/context/data_mapper_reference.ts new file mode 100644 index 00000000000..072bf12927c --- /dev/null +++ b/workspaces/mi/mi-extension/src/ai-features/agent-mode/context/data_mapper_reference.ts @@ -0,0 +1,252 @@ +/** + * Copyright (c) 2026, WSO2 LLC. (https://www.wso2.com/) All Rights Reserved. + * + * WSO2 LLC. licenses this file to you under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/** + * WSO2 MI Data Mapper Reference + * Shared reference for the .ts mapping file format, dmUtils helper API, and + * TypeScript pitfalls (TS2556 dynamic-array spread). + * + * Loaded by: + * - The data mapper sub-agent (via DATA_MAPPER_SYSTEM_TEMPLATE) for generation + * - The main agent on demand via load_context_reference("data-mapper-reference") + * when editing existing .ts mapping files without going through the + * generate_data_mapping tool. + * + * Section-based exports for granular context loading. + * Usage: DATA_MAPPER_REFERENCE_SECTIONS["dynamic_arrays"] for the TS2556 spread rule. + * DATA_MAPPER_REFERENCE_FULL for the entire reference. + */ + +export const DATA_MAPPER_REFERENCE_SECTIONS: Record = { + +overview: `## Data Mapper Overview + +Data mappers transform data between input and output schemas using TypeScript. They pair with the \`\` mediator in Synapse integrations. + +**Runtime requirement:** \`\` and data mapper artifacts require MI runtime \`4.4.0\` or newer. On older runtimes, fall back to PayloadFactory / Enrich / XSLT. + +**Folder Structure:** +Each data mapper lives at \`src/main/wso2mi/resources/datamapper/{name}/\` containing: +- \`{name}.ts\` — TypeScript mapping file with input/output interfaces and \`mapFunction\` +- \`dm-utils.ts\` — Helper functions (arithmetic, string, type conversion); imported as \`dmUtils\` + +**TypeScript Mapping File Skeleton:** +\`\`\`typescript +import * as dmUtils from "./dm-utils"; +declare var DM_PROPERTIES: any; + +/** + * inputType:JSON + * title:"InputSchemaName" + */ +interface InputRoot { + // Input schema fields +} + +/** + * outputType:JSON + * title:"OutputSchemaName" + */ +interface OutputRoot { + // Output schema fields +} + +export function mapFunction(input: InputRoot): OutputRoot { + return { + // Field mappings: outputField: input.inputField + }; +} +\`\`\` + +**Using Data Mapper in Synapse XML:** +\`\`\`xml + +\`\`\``, + +typescript_rules: `## Critical TypeScript Rules + +- **Use explicit returns in arrow functions:** + \`\`\`typescript + // ✅ Correct + input.items.map(item => { return { id: item.id, qty: item.qty }; }) + // ❌ Wrong — concise object-literal arrow without braces breaks the data mapper compiler + input.items.map(item => ({ id: item.id, qty: item.qty })) + \`\`\` +- **Preserve exact field names from schemas.** For fields containing spaces, hyphens, or other special characters, enclose them in quotes: + \`\`\`typescript + return { "first-name": input.firstName, "Order Total": input.total }; + \`\`\` +- **Don't re-import dmUtils.** The file already imports it: \`import * as dmUtils from "./dm-utils";\` — leave that line untouched. +- **Don't redeclare \`DM_PROPERTIES\`.** It's declared once at the top of the file and is available globally inside \`mapFunction\` for accessing \`\` mediator values via \`dmUtils.getPropertyValue\`.`, + +dmutils_functions: `## dmUtils Helper Functions + +The \`dmUtils\` module exposes the following helpers. **Use these instead of raw JavaScript operators when appropriate** for clarity and consistency. + +**Arithmetic Operations:** +- \`dmUtils.sum(num1, ...nums)\` — Sum a fixed list of numbers (requires at least one positional argument) + Example: \`dmUtils.sum(item.price, item.tax, item.shipping)\` +- \`dmUtils.average(num1, ...nums)\` — Average a fixed list of numbers (requires at least one positional argument) + Example: \`dmUtils.average(input.score1, input.score2, input.score3)\` +- \`dmUtils.max(num1, ...nums)\` — Find maximum value +- \`dmUtils.min(num1, ...nums)\` — Find minimum value +- \`dmUtils.ceiling(num)\` — Round up to nearest integer +- \`dmUtils.floor(num)\` — Round down to nearest integer +- \`dmUtils.round(num)\` — Round to nearest integer + +**Type Conversions:** +- \`dmUtils.toNumber(str)\` — Convert string to number + Example: \`dmUtils.toNumber(input.quantity)\` +- \`dmUtils.toBoolean(str)\` — Convert string to boolean ("true" → true) +- \`dmUtils.numberToString(num)\` — Convert number to string +- \`dmUtils.booleanToString(bool)\` — Convert boolean to string + +**String Operations:** +- \`dmUtils.concat(str1, ...strs)\` — Concatenate multiple strings + Example: \`dmUtils.concat(input.firstName, " ", input.lastName)\` +- \`dmUtils.split(str, separator)\` — Split string into array + Example: \`dmUtils.split(input.fullName, " ")\` +- \`dmUtils.toUppercase(str)\` — Convert to uppercase +- \`dmUtils.toLowercase(str)\` — Convert to lowercase +- \`dmUtils.stringLength(str)\` — Get string length +- \`dmUtils.startsWith(str, prefix)\` — Check if string starts with prefix +- \`dmUtils.endsWith(str, suffix)\` — Check if string ends with suffix +- \`dmUtils.substring(str, start, end)\` — Extract substring +- \`dmUtils.trim(str)\` — Remove leading/trailing whitespace +- \`dmUtils.replaceFirst(str, target, replacement)\` — Replace first occurrence +- \`dmUtils.match(str, regex)\` — Test if string matches regex pattern + +**Property Access:** +- \`dmUtils.getPropertyValue(scope, name)\` — Read a Synapse property by scope/name (e.g. \`dmUtils.getPropertyValue("default", "user.id")\`)`, + +dynamic_arrays: `## Aggregating Dynamic Arrays (CRITICAL — TS2556) + +\`dmUtils.sum\`/\`average\`/\`max\`/\`min\` are typed as \`(num1: number, ...rest: number[])\` and require at least one positional argument. **Spreading a dynamically-sized array into them fails TypeScript compilation** with: + +\`\`\` +error TS2556: A spread argument must either have a tuple type or be passed to a rest parameter. +\`\`\` + +This is the canonical mistake — keep it out of generated mappings. + +- ❌ **Do NOT** spread arrays of unknown length: + \`\`\`typescript + const totalAmount = dmUtils.sum(...lineItems.map(i => i.lineTotal)); // TS2556 + \`\`\` +- ❌ **Do NOT** spread \`.map()\` / \`.filter()\` results into dmUtils aggregation functions. +- ✅ **For totalling/averaging across an array, use \`reduce\`:** + \`\`\`typescript + // Sum + const totalAmount = input.lineItems.reduce((acc, item) => acc + item.lineTotal, 0); + + // Average (guard the empty-array case) + const avgScore = input.scores.length === 0 + ? 0 + : input.scores.reduce((acc, n) => acc + n, 0) / input.scores.length; + + // Max / min + const maxPrice = input.items.reduce((acc, item) => Math.max(acc, item.price), -Infinity); + const minPrice = input.items.reduce((acc, item) => Math.min(acc, item.price), Infinity); + \`\`\` +- ✅ **Use \`dmUtils.sum\`/\`average\` only when summing a known, fixed set of fields**, e.g. \`dmUtils.sum(input.subtotal, input.tax, input.shipping)\`. + +**Workaround (if dmUtils.sum must be used over an array):** +\`\`\`typescript +const totals = lineItems.map(item => item.lineTotal); +const totalAmount = totals.length === 0 ? 0 : dmUtils.sum(totals[0], ...totals.slice(1)); +\`\`\` +This satisfies the rest-parameter requirement but is harder to read than \`reduce\` — prefer \`reduce\`.`, + +when_to_use_dmutils: `## When to Use dmUtils vs Raw Operators + +| Operation | Use dmUtils when… | Use raw operators when… | +|-----------|-------------------|-------------------------| +| String concat | Joining 2+ strings | (always prefer \`dmUtils.concat\`) | +| Sum / average | A **fixed** set of fields (\`subtotal + tax + shipping\`) | Aggregating a **dynamic array** — use \`reduce\` | +| Max / min | A fixed set of fields | A dynamic array — use \`reduce\` with \`Math.max\`/\`Math.min\` | +| Type conversion | Always (\`toNumber\`, \`toBoolean\`, \`numberToString\`, \`booleanToString\`) | (n/a) | +| String transforms | Always (\`toUppercase\`, \`trim\`, \`substring\`, etc.) | (n/a) | + +**Rule of thumb:** Prefer dmUtils for clarity and consistency, but fall back to native \`reduce\` / \`map\` / \`filter\` when the input is a dynamically-sized array. Never spread an array into \`dmUtils.sum/average/max/min\`.`, + +array_handling: `## Array Handling Patterns + +**Input array → output single object:** Pick a representative element. +\`\`\`typescript +// First element +primaryItem: input.items[0] +// Conditional pick +primaryAddress: input.addresses.find(addr => addr.type === "billing") +\`\`\` + +**Input array → output array:** Use \`.map()\` with explicit returns. +\`\`\`typescript +items: input.orders.map(order => { + return { + id: order.orderId, + total: dmUtils.sum(order.subtotal, order.tax), + itemCount: order.items.length + }; +}) +\`\`\` + +**Input single object → output array (wrap):** +\`\`\`typescript +items: [{ id: input.id, name: input.name }] +\`\`\` + +**Input array → output count / aggregate:** +\`\`\`typescript +itemCount: input.lineItems.length, +totalAmount: input.lineItems.reduce((acc, item) => acc + item.lineTotal, 0) +\`\`\` + +**Filter then map:** +\`\`\`typescript +activeUsers: input.users + .filter(u => u.status === "active") + .map(u => { + return { id: u.id, name: u.name }; + }) +\`\`\``, + +tool_usage: `## Generating and Editing Mappings — Tool Guidance + +The agent has dedicated tools for data mapper work. Prefer them over hand-writing mapping files unless the user has explicitly asked you to edit an existing one. + +- **\`create_data_mapper\`** — Use this to create a new data mapper. It scaffolds the folder, the \`.ts\` file with empty interfaces, and the \`dm-utils.ts\` helper module. Do NOT create these files manually with \`file_write\`. +- **\`generate_data_mapping\`** — Use this to fill in the \`mapFunction\` body for a new or partially-mapped \`.ts\` file. It runs a specialized Haiku sub-agent that already understands the dmUtils API and the TS2556 dynamic-array pitfall. Prefer this over \`file_edit\` for non-trivial mapping work. + +**When to edit the \`.ts\` file directly with \`file_edit\` instead of calling \`generate_data_mapping\`:** +- Targeted single-field tweaks (rename, change a default value, fix one mapping). +- Adding a calculated field where the formula is dictated by the user verbatim. +- Fixing a TS2556 spread error reported by the user (replace \`dmUtils.sum(...arr)\` with \`arr.reduce(...)\`). + +For broader mapping changes (mapping new schema fields, restructuring nested mappings, large-scale rewrites), call \`generate_data_mapping\` — it's faster and avoids the dmUtils pitfalls catalogued above.`, + +}; + +// Build full reference by joining all sections +export const DATA_MAPPER_REFERENCE_FULL = `# WSO2 MI Data Mapper Reference + +${Object.values(DATA_MAPPER_REFERENCE_SECTIONS).join('\n\n')}`; diff --git a/workspaces/mi/mi-extension/src/ai-features/agent-mode/context/synapse-core/synapse_artifact_reference.ts b/workspaces/mi/mi-extension/src/ai-features/agent-mode/context/synapse-core/synapse_artifact_reference.ts index 357a84a7d2a..eed0229e34b 100644 --- a/workspaces/mi/mi-extension/src/ai-features/agent-mode/context/synapse-core/synapse_artifact_reference.ts +++ b/workspaces/mi/mi-extension/src/ai-features/agent-mode/context/synapse-core/synapse_artifact_reference.ts @@ -48,6 +48,13 @@ Children: one or more \`\` elements + optional \`\`. | \`protocol\` | \`http\` \\| \`https\` | | \`inSequence\`, \`outSequence\`, \`faultSequence\` | Named-sequence references; OR use inline \`\`, \`\`, \`\` child elements | +### Critical Rule — every \`\` must declare \`uri-template\` or \`url-mapping\` +A bare \`\` is invalid; the validator rejects it. Every resource must include exactly one routing attribute (mutually exclusive): +\`\`\`xml + + +\`\`\` + ### Working example \`\`\`xml \` does NOT support \`scope\` +The \`\` mediator has no \`scope\` attribute — it writes to Synapse variables only. For axis2/transport/synapse-scope properties (\`HTTP_SC\`, \`messageType\`, \`ContentType\`, \`OUT_ONLY\`, \`REST_URL_POSTFIX\`, etc.), use the \`\` mediator instead. The validator rejects \`scope\` on \`\`. +\`\`\`xml + + + + + +\`\`\` + ### Setting Outbound HTTP Request Headers The \`\` mediator **cannot** set outbound HTTP headers — it writes to Synapse variables only. Two working options: diff --git a/workspaces/mi/mi-extension/src/ai-features/agent-mode/context/synapse-core/synapse_registry_resource_guide.ts b/workspaces/mi/mi-extension/src/ai-features/agent-mode/context/synapse-core/synapse_registry_resource_guide.ts index 576dedb73d7..c77038346c5 100644 --- a/workspaces/mi/mi-extension/src/ai-features/agent-mode/context/synapse-core/synapse_registry_resource_guide.ts +++ b/workspaces/mi/mi-extension/src/ai-features/agent-mode/context/synapse-core/synapse_registry_resource_guide.ts @@ -331,7 +331,18 @@ common_patterns: `## Common Registry Resource Patterns -\`\`\``, +\`\`\` + +### Common Mistake: Local Entries Are NOT Registry Resources +\`\${registry("conf:/KEY")}\` and \`\${registry("gov:/...")}\` only resolve files registered as \`type="registry/resource"\` in artifact.xml. A \`\` lives in the Synapse config (deployed via the .car) — it is **not** a registry resource, and \`\${registry(...)}\` will not see it. + +To read a local entry, use the legacy \`get-property('local-entry', ...)\` XPath inside a \`\` mediator: +\`\`\`xml + + + +\`\`\` +Read the bound property in scripts/expressions as \`mc.getProperty('myVar')\` or \`\${props.synapse.myVar}\`. Do NOT use \`\${registry(...)}\` for local entries.`, secure_vault: `## Secure Vault — Secret Resolution diff --git a/workspaces/mi/mi-extension/src/ai-features/agent-mode/context/synapse_guide.ts b/workspaces/mi/mi-extension/src/ai-features/agent-mode/context/synapse_guide.ts index e191f7395c4..c347135af6b 100644 --- a/workspaces/mi/mi-extension/src/ai-features/agent-mode/context/synapse_guide.ts +++ b/workspaces/mi/mi-extension/src/ai-features/agent-mode/context/synapse_guide.ts @@ -16,7 +16,7 @@ * under the License. */ -import { CREATE_DATA_MAPPER_TOOL_NAME } from "../tools/types"; +import { CREATE_DATA_MAPPER_TOOL_NAME, GENERATE_DATA_MAPPING_TOOL_NAME } from "../tools/types"; import { SYNAPSE_EXPRESSION_GUIDE } from "./synapse_expression_guide" export const SYNAPSE_GUIDE = ` @@ -396,41 +396,13 @@ For the full property reference (70+ properties with exact names, scopes, and us **Important runtime requirement:** Data mapper artifacts and the \`\` mediator require MI runtime \`4.4.0\` or newer. If runtime is below \`4.4.0\`, do not use data mapper generation. Data mappers transform data between input and output schemas using TypeScript. They are used with the \`\` mediator in Synapse integrations. -Always use ${CREATE_DATA_MAPPER_TOOL_NAME} tool to create a data mapper. Do not create data mappers manually. -**Folder Structure:** -Each data mapper creates a folder at \`src/main/wso2mi/resources/datamapper/{name}/\` containing: -- \`{name}.ts\` - TypeScript mapping file with input/output interfaces and mapFunction -- \`dm-utils.ts\` - Utility operators (arithmetic, string, type conversion functions) +**Tool routing (always prefer tools over hand-writing):** +- New mapper → use \`${CREATE_DATA_MAPPER_TOOL_NAME}\` (scaffolds folder, \`.ts\` file, and \`dm-utils.ts\`). +- Generate / fill the \`mapFunction\` body → use \`${GENERATE_DATA_MAPPING_TOOL_NAME}\`. +- Direct \`file_edit\` on the \`.ts\` file is only for targeted single-field tweaks, user-dictated formula changes, or fixing a TS2556 spread error. -**TypeScript Mapping File Format:** -\`\`\`typescript -import * as dmUtils from "./dm-utils"; -declare var DM_PROPERTIES: any; - -/** - * inputType:JSON - * title:"InputSchemaName" - */ -interface InputRoot { - // Input schema fields -} - -/** - * outputType:JSON - * title:"OutputSchemaName" - */ -interface OutputRoot { - // Output schema fields -} - -export function mapFunction(input: InputRoot): OutputRoot { - return { - // Field mappings: outputField: input.inputField - // Can use dmUtils functions for transformations - }; -} -\`\`\` +**Before editing an existing \`.ts\` mapping file**, load \`data-mapper-reference\` via \`load_context_reference\` for the dmUtils API, the TS2556 dynamic-array spread rule (use \`arr.reduce(...)\`, never \`dmUtils.sum(...arr)\`), and the file format. Sections: \`overview\`, \`typescript_rules\`, \`dmutils_functions\`, \`dynamic_arrays\`, \`when_to_use_dmutils\`, \`array_handling\`, \`tool_usage\`. **Using Data Mapper in Synapse XML:** \`\`\`xml @@ -442,12 +414,6 @@ export function mapFunction(input: InputRoot): OutputRoot { outputType="JSON"/> \`\`\` -**Available dm-utils Functions:** -- Arithmetic: \`dmUtils.sum()\`, \`dmUtils.max()\`, \`dmUtils.min()\`, \`dmUtils.average()\`, \`dmUtils.ceiling()\`, \`dmUtils.floor()\`, \`dmUtils.round()\` -- String: \`dmUtils.concat()\`, \`dmUtils.split()\`, \`dmUtils.toUppercase()\`, \`dmUtils.toLowercase()\`, \`dmUtils.trim()\`, \`dmUtils.substring()\`, \`dmUtils.stringLength()\`, \`dmUtils.startsWith()\`, \`dmUtils.endsWith()\`, \`dmUtils.replaceFirst()\`, \`dmUtils.match()\` -- Type conversion: \`dmUtils.toNumber()\`, \`dmUtils.toBoolean()\`, \`dmUtils.numberToString()\`, \`dmUtils.booleanToString()\` -- Property access: \`dmUtils.getPropertyValue(scope, name)\` - ## Registry Resources When creating supportive resources that are needed for the Integration inside src/main/wso2mi/resources, an entry should be added to the src/main/wso2mi/resources/artifact.xml. If an artifact.xml doesn't exist, then create one and add the entry. The format should be as follows: For data mappers this is automatically done by the ${CREATE_DATA_MAPPER_TOOL_NAME} tool. But for other resources, you need to add the entry manually. diff --git a/workspaces/mi/mi-extension/src/ai-features/agent-mode/stream_guard.ts b/workspaces/mi/mi-extension/src/ai-features/agent-mode/stream_guard.ts index d00504f1de9..4641deaac56 100644 --- a/workspaces/mi/mi-extension/src/ai-features/agent-mode/stream_guard.ts +++ b/workspaces/mi/mi-extension/src/ai-features/agent-mode/stream_guard.ts @@ -110,6 +110,61 @@ function getErrorName(error: unknown): string | undefined { } export function getErrorDiagnostics(error: unknown): string { + const extractApiCallFields = (err: unknown): Record => { + if (!err || typeof err !== 'object') { + return {}; + } + const r = err as Record; + const fields: Record = {}; + // Vercel AI SDK APICallError surface — most useful for provider 4xx debugging. + if (r.statusCode !== undefined) fields.statusCode = r.statusCode; + if (r.url !== undefined) fields.url = r.url; + if (typeof r.responseBody === 'string') { + fields.responseBody = r.responseBody.length > 2000 + ? r.responseBody.slice(0, 2000) + '…[truncated]' + : r.responseBody; + } + if (r.data !== undefined) { + try { + const dataStr = typeof r.data === 'string' ? r.data : JSON.stringify(r.data); + fields.data = dataStr.length > 2000 + ? dataStr.slice(0, 2000) + '…[truncated]' + : dataStr; + } catch { + fields.data = '[unserializable]'; + } + } + if (r.responseHeaders !== undefined && r.responseHeaders !== null && typeof r.responseHeaders === 'object') { + // Whitelist known-safe headers — set-cookie, authorization echoes, + // x-amz-security-token, etc. must never reach logs. + const safeKeys = new Set([ + 'content-type', + 'content-length', + 'date', + 'x-request-id', + 'request-id', + 'retry-after', + 'x-ratelimit-limit', + 'x-ratelimit-remaining', + 'x-ratelimit-reset', + 'anthropic-ratelimit-requests-limit', + 'anthropic-ratelimit-requests-remaining', + 'anthropic-ratelimit-requests-reset', + 'anthropic-ratelimit-tokens-limit', + 'anthropic-ratelimit-tokens-remaining', + 'anthropic-ratelimit-tokens-reset', + ]); + const filtered: Record = {}; + for (const [key, value] of Object.entries(r.responseHeaders as Record)) { + if (safeKeys.has(key.toLowerCase())) { + filtered[key] = value; + } + } + fields.responseHeaders = filtered; + } + return fields; + }; + if (error instanceof Error) { const topOfStack = typeof error.stack === 'string' ? error.stack.split('\n').slice(0, 3).join(' | ') @@ -120,6 +175,8 @@ export function getErrorDiagnostics(error: unknown): string { code: getErrorCode(error), message: error.message, cause: cause ? getErrorMessage(cause) : undefined, + ...extractApiCallFields(error), + causeDiagnostics: cause ? extractApiCallFields(cause) : undefined, stack: topOfStack, }); } @@ -131,6 +188,7 @@ export function getErrorDiagnostics(error: unknown): string { code: getErrorCode(error), message: typeof record.message === 'string' ? record.message : undefined, type: typeof record.type === 'string' ? record.type : undefined, + ...extractApiCallFields(error), }); } diff --git a/workspaces/mi/mi-extension/src/ai-features/agent-mode/tools/bash_tools.ts b/workspaces/mi/mi-extension/src/ai-features/agent-mode/tools/bash_tools.ts index 8979eaef3c3..e234bd74fa1 100644 --- a/workspaces/mi/mi-extension/src/ai-features/agent-mode/tools/bash_tools.ts +++ b/workspaces/mi/mi-extension/src/ai-features/agent-mode/tools/bash_tools.ts @@ -35,6 +35,7 @@ import { normalizePrefixRule, } from './shell_sandbox'; import { AgentUndoCheckpointManager } from '../undo/checkpoint-manager'; +import { stripAnsiAndControl } from '../../utils/sanitize-text'; import treeKill = require('tree-kill'); // ============================================================================ @@ -327,7 +328,19 @@ function appendBoundedOutput( return { output: current, truncated: alreadyTruncated }; } - const combined = current + chunk; + // Strip ANSI escapes and stray control bytes per chunk before accumulation. + // Maven/Gradle emit ANSI color codes; raw 0x00-0x1F bytes in tool-result + // strings cause the Copilot proxy to reject the request with + // `unexpected control character in string`. Per-chunk stripping is safe + // because the regex only matches complete ESC...terminator sequences; + // mid-sequence splits across chunks degrade gracefully (the stripped + // remnant is harmless text). + const sanitized = stripAnsiAndControl(chunk); + if (!sanitized) { + return { output: current, truncated: alreadyTruncated }; + } + + const combined = current + sanitized; if (combined.length <= MAX_SHELL_OUTPUT_CHARS) { return { output: combined, truncated: alreadyTruncated }; } diff --git a/workspaces/mi/mi-extension/src/ai-features/agent-mode/tools/context_tools.ts b/workspaces/mi/mi-extension/src/ai-features/agent-mode/tools/context_tools.ts index 5f4a95c57b4..c20125881e6 100644 --- a/workspaces/mi/mi-extension/src/ai-features/agent-mode/tools/context_tools.ts +++ b/workspaces/mi/mi-extension/src/ai-features/agent-mode/tools/context_tools.ts @@ -79,6 +79,10 @@ import { UNIT_TEST_REFERENCE_FULL, UNIT_TEST_REFERENCE_SECTIONS, } from '../context/unit-tests/unit_test_reference'; +import { + DATA_MAPPER_REFERENCE_FULL, + DATA_MAPPER_REFERENCE_SECTIONS, +} from '../context/data_mapper_reference'; import { logDebug, logWarn } from '../../copilot/logger'; import { ContextExecuteFn, ToolResult } from './types'; import { getRuntimeVersionFromPom } from './connector_store_cache'; @@ -197,6 +201,14 @@ const CONTEXT_REFERENCES: ContextDefinition[] = [ sections: UNIT_TEST_REFERENCE_SECTIONS, aliases: ['unit_test_reference', 'unit-test-guide'], }, + { + name: 'data-mapper-reference', + description: 'TypeScript data mapper reference: .ts file skeleton, dmUtils helper API (sum/average/max/min/concat/toNumber/etc with signatures), the TS2556 dynamic-array spread pitfall (use array.reduce(...), never dmUtils.sum(...arr)), array handling patterns, and tool-routing guidance (prefer create_data_mapper / generate_data_mapping over hand-written mappings). Load before editing existing .ts mapping files. Requires MI runtime 4.4.0+.', + content: DATA_MAPPER_REFERENCE_FULL, + sections: DATA_MAPPER_REFERENCE_SECTIONS, + minRuntimeVersion: RUNTIME_VERSION_440, + aliases: ['data_mapper_reference', 'datamapper-reference', 'dmutils-reference'], + }, ]; function normalizeContextName(value: string): string { @@ -366,7 +378,7 @@ export function createContextTool(execute: ContextExecuteFn) { description: `Loads deep reference context on demand to avoid prompt bloat. Use context_name in the form "topic" or "topic:section". Example: "synapse-expression-spec:type_coercion". - Note: AI connector context requires MI runtime 4.4.0 or newer.`, + Note: Some contexts are runtime-gated and require the MI runtime version specified by their minRuntimeVersion (e.g., MI runtime ${RUNTIME_VERSION_440} or newer).`, inputSchema: contextInputSchema, execute, }); diff --git a/workspaces/mi/mi-extension/src/ai-features/agent-mode/tools/file_tools.ts b/workspaces/mi/mi-extension/src/ai-features/agent-mode/tools/file_tools.ts index d5d250bef5b..7bc7c063ddf 100644 --- a/workspaces/mi/mi-extension/src/ai-features/agent-mode/tools/file_tools.ts +++ b/workspaces/mi/mi-extension/src/ai-features/agent-mode/tools/file_tools.ts @@ -53,6 +53,7 @@ import { RG_EXCLUDED_SENSITIVE_GLOBS, } from './ripgrep_runner'; import { isSensitiveTokenName } from './shell_sandbox'; +import { stripAnsiAndControl } from '../../utils/sanitize-text'; // ============================================================================ // Validation Functions @@ -878,17 +879,29 @@ export function createReadExecute(projectPath: string, readFiles?: Set): } } - // Read file content - const content = fs.readFileSync(fullPath, 'utf-8'); + // Read file content. Strip ANSI escapes / stray control bytes — common + // in captured Maven/Gradle/npm build logs. The Copilot proxy rejects + // tool-result strings containing raw 0x00-0x1F bytes with + // `unexpected control character in string`. + const rawContent = fs.readFileSync(fullPath, 'utf-8'); + const content = stripAnsiAndControl(rawContent); - // Handle empty file - if (content.trim().length === 0) { + // Handle empty file — distinguish truly empty from "sanitized to empty" + // so the user knows the file actually contained ANSI/control bytes. + if (rawContent.trim().length === 0) { logDebug(`[FileReadTool] File is empty: ${file_path}`); return { success: true, message: `File '${file_path}' is empty.`, }; } + if (content.trim().length === 0) { + logDebug(`[FileReadTool] File contained only ANSI/control characters after sanitization: ${file_path}`); + return { + success: true, + message: `File '${file_path}' contained only ANSI escape sequences or control characters; no readable text after sanitization.`, + }; + } // Split content into lines const lines = content.split('\n'); diff --git a/workspaces/mi/mi-extension/src/ai-features/agent-mode/tools/tool_load.ts b/workspaces/mi/mi-extension/src/ai-features/agent-mode/tools/tool_load.ts index 99d596f9510..0193200b627 100644 --- a/workspaces/mi/mi-extension/src/ai-features/agent-mode/tools/tool_load.ts +++ b/workspaces/mi/mi-extension/src/ai-features/agent-mode/tools/tool_load.ts @@ -38,8 +38,6 @@ export const DEFERRED_TOOL_DESCRIPTIONS: Record = { create_subagent: 'Spawn Explore or SynapseContext subagent for deep exploration', kill_task: 'Terminate a background shell or subagent task', task_output: 'Get output from a background task', - web_search: 'Search the web for external information', - web_fetch: 'Fetch and analyze content from a specific URL', read_server_logs: 'Read and analyze MI server log files (errors, deployments, HTTP requests)', }; diff --git a/workspaces/mi/mi-extension/src/ai-features/agent-mode/tools/types.ts b/workspaces/mi/mi-extension/src/ai-features/agent-mode/tools/types.ts index 4e2152511e0..c1be3623e40 100644 --- a/workspaces/mi/mi-extension/src/ai-features/agent-mode/tools/types.ts +++ b/workspaces/mi/mi-extension/src/ai-features/agent-mode/tools/types.ts @@ -161,8 +161,6 @@ export const DEFERRED_TOOLS = new Set([ SUBAGENT_TOOL_NAME, KILL_TASK_TOOL_NAME, TASK_OUTPUT_TOOL_NAME, - WEB_SEARCH_TOOL_NAME, - WEB_FETCH_TOOL_NAME, READ_SERVER_LOGS_TOOL_NAME, ]); diff --git a/workspaces/mi/mi-extension/src/ai-features/agent-mode/tools/web_tools.ts b/workspaces/mi/mi-extension/src/ai-features/agent-mode/tools/web_tools.ts index a854fdf7c2f..4b85518d851 100644 --- a/workspaces/mi/mi-extension/src/ai-features/agent-mode/tools/web_tools.ts +++ b/workspaces/mi/mi-extension/src/ai-features/agent-mode/tools/web_tools.ts @@ -16,24 +16,23 @@ * under the License. */ -import { tool, generateText } from 'ai'; +import { tool } from 'ai'; import { z } from 'zod'; -import { v4 as uuidv4 } from 'uuid'; -import { AgentEvent } from '@wso2/mi-core'; +import type { AnthropicProvider } from '@ai-sdk/anthropic'; +import { tavily as createTavilyClient } from '@tavily/core'; import { logError, logInfo } from '../../copilot/logger'; -import { ANTHROPIC_SONNET_4_6, AnthropicModel, getAnthropicProvider, getAnthropicClientForCustomModel } from '../../connection'; -import { PendingPlanApproval } from './plan_mode_tools'; import { ToolResult, - WEB_FETCH_TOOL_NAME, - WEB_SEARCH_TOOL_NAME, WebFetchExecuteFn, WebSearchExecuteFn, } from './types'; -type AgentEventHandler = (event: AgentEvent) => void; +/** + * Which `web_search` / `web_fetch` implementation to register on the agent. + * Resolved once per turn in `executeAgent` from the active login method. + */ +export type WebToolsProvider = 'anthropic-server' | 'tavily-local' | 'none'; -type WebApprovalKind = 'web_search' | 'web_fetch'; const MI_DOCS_DOMAIN = 'mi.docs.wso2.com'; function sanitizeDomainList(domains?: string[]): string[] | undefined { @@ -52,206 +51,179 @@ function sanitizeDomainList(domains?: string[]): string[] | undefined { return sanitized.length > 0 ? sanitized : undefined; } -function extractToolOutput(result: any): unknown { +/** + * Match a hostname against a single allow/block domain entry. Treats `domain` + * as covering itself and all subdomains, so `github.com` matches both + * `github.com` and `api.github.com`. + */ +function hostnameMatchesDomain(hostname: string, domain: string): boolean { + const h = hostname.toLowerCase(); + const d = domain.trim().toLowerCase().replace(/^\./, ''); + return h === d || h.endsWith(`.${d}`); +} + +/** + * Enforce `allowed_domains` / `blocked_domains` against a single URL. Tavily + * Extract takes one URL (not a search-style filter), so the lists must be + * applied client-side or they'd be a no-op — exposing them in the schema + * without enforcement gives the model a false sense of safety. + */ +function checkUrlAgainstDomainLists( + urlString: string, + allowedDomains?: string[], + blockedDomains?: string[], +): { ok: true } | { ok: false; reason: string } { + let hostname: string; try { - const stepWithToolResults = (result?.steps || []).find((step: any) => Array.isArray(step?.toolResults) && step.toolResults.length > 0); - if (stepWithToolResults?.toolResults?.[0]) { - return stepWithToolResults.toolResults[0].output; - } + hostname = new URL(urlString).hostname.toLowerCase(); } catch { - // Ignore extraction issues and fall back to text output. + // Schema validates URL shape; on parse failure here, defer to Tavily. + return { ok: true }; } - return undefined; -} - -function getProviderToolFactory(provider: any, candidateNames: string[]): ((args: any) => any) | null { - for (const candidateName of candidateNames) { - const factory = provider?.tools?.[candidateName]; - if (typeof factory === 'function') { - return factory; - } + const allowed = sanitizeDomainList(allowedDomains); + if (allowed && !allowed.some((d) => hostnameMatchesDomain(hostname, d))) { + return { + ok: false, + reason: `URL hostname "${hostname}" is not in allowed_domains [${allowed.join(', ')}].`, + }; } - return null; -} - -async function requestWebApproval( - eventHandler: AgentEventHandler, - pendingApprovals: Map, - request: { - sessionId: string; - kind: WebApprovalKind; - approvalTitle: string; - content: string; - }, - mainAbortSignal?: AbortSignal -): Promise { - const approvalId = uuidv4(); - eventHandler({ - type: 'plan_approval_requested', - approvalId, - approvalKind: request.kind, - approvalTitle: request.approvalTitle, - approveLabel: 'Allow', - rejectLabel: 'Deny', - allowFeedback: false, - content: request.content, - } as any); + const blocked = sanitizeDomainList(blockedDomains); + if (blocked && blocked.some((d) => hostnameMatchesDomain(hostname, d))) { + return { + ok: false, + reason: `URL hostname "${hostname}" is in blocked_domains [${blocked.join(', ')}].`, + }; + } - let settled = false; - const cleanup = (): void => { - if (settled) return; - settled = true; - pendingApprovals.delete(approvalId); - if (abortHandler && mainAbortSignal) { - mainAbortSignal.removeEventListener('abort', abortHandler); - } - }; + return { ok: true }; +} - let abortHandler: (() => void) | undefined; +// ============================================================================ +// Tavily-backed implementations (AWS Bedrock branch only) +// ============================================================================ - const approval = await new Promise<{ approved: boolean; feedback?: string }>((resolve, reject) => { - pendingApprovals.set(approvalId, { - approvalId, - approvalKind: request.kind, - sessionId: request.sessionId, - resolve: (result) => { - cleanup(); - resolve(result); - }, - reject: (error: Error) => { - cleanup(); - reject(error); - } +/** + * Format a Tavily search response as a concise markdown summary suitable for + * the agent. Mirrors the `{success, message}` shape used by the rest of our + * local tools so chat-history persistence and UI rendering don't need to branch. + * + * Uses `@tavily/core` directly (the AI SDK wrapper `@tavily/ai-sdk` is ESM-only + * and `"type": "module"` with `import`-only `exports`, so it can't be required + * by our CJS webpack bundle — see commit history for the previous attempt). + */ +async function runTavilySearch( + apiKey: string, + params: { query: string; includeDomains?: string[]; excludeDomains?: string[] } +): Promise { + try { + const client = createTavilyClient({ apiKey }); + const response = await client.search(params.query, { + includeAnswer: true, + maxResults: 5, + ...(params.includeDomains ? { includeDomains: params.includeDomains } : {}), + ...(params.excludeDomains ? { excludeDomains: params.excludeDomains } : {}), }); - if (mainAbortSignal) { - if (mainAbortSignal.aborted) { - cleanup(); - resolve({ approved: false }); - return; + const lines: string[] = []; + if (typeof response?.answer === 'string' && response.answer.trim()) { + lines.push(`Answer: ${response.answer.trim()}`); + } + const results = Array.isArray(response?.results) ? response.results : []; + if (results.length > 0) { + lines.push('', 'Results:'); + for (const r of results) { + const title = r?.title || r?.url || 'Untitled'; + const url = r?.url || ''; + const snippet = (r?.content || '').toString().trim(); + lines.push(`- ${title}${url ? ` (${url})` : ''}${snippet ? `\n ${snippet}` : ''}`); } - abortHandler = () => { - cleanup(); - resolve({ approved: false }); - }; - mainAbortSignal.addEventListener('abort', abortHandler, { once: true }); } - }); - return approval.approved; + const message = lines.length > 0 + ? lines.join('\n') + : 'Tavily search returned no results.'; + return { success: true, message }; + } catch (error: any) { + logError('[WebSearchTool] Tavily search failed', error); + return { + success: false, + message: `Web search failed: ${error?.message || String(error)}`, + error: 'WEB_SEARCH_FAILED', + }; + } } -/** - * Creates execute function for web_search tool. - * Requires explicit user consent before any outbound web search. - */ -export function createWebSearchExecute( - getAnthropicClient: (model: AnthropicModel) => Promise, - eventHandler: AgentEventHandler, - pendingApprovals: Map, - webAccessPreapproved: boolean, - sessionId: string, - mainModelId?: string, - mainModelIsCustom?: boolean, - mainAbortSignal?: AbortSignal -): WebSearchExecuteFn { - return async (args): Promise => { - const allowedDomains = sanitizeDomainList(args.allowed_domains); - const blockedDomains = sanitizeDomainList(args.blocked_domains); +// Match the Anthropic webFetch maxContentTokens=32000 cap (~4 chars/token). +const TAVILY_EXTRACT_MAX_CHARS = 128_000; - let approved = true; - if (!webAccessPreapproved) { - approved = await requestWebApproval(eventHandler, pendingApprovals, { - sessionId, - kind: 'web_search', - approvalTitle: 'Allow Web Search?', - content: `Agent wants to search the web for: "${args.query}"`, - }, mainAbortSignal); - } +async function runTavilyExtract(apiKey: string, url: string, taskPrompt?: string): Promise { + try { + const client = createTavilyClient({ apiKey }); + const response = await client.extract([url], { + extractDepth: 'advanced', + format: 'markdown', + }); - if (!approved) { + const failed = Array.isArray(response?.failedResults) ? response.failedResults : []; + if (failed.length > 0) { + const detail = failed.map((f) => `${f?.url}: ${f?.error}`).join('; '); return { success: false, - message: 'User denied permission to perform web search.', - error: 'WEB_SEARCH_DENIED', + message: `Tavily extract failed: ${detail}`, + error: 'WEB_FETCH_FAILED', }; } - try { - logInfo(`[WebSearchTool] Running query: ${args.query}`); - const anthropicProvider = await getAnthropicProvider(); - const searchFactory = getProviderToolFactory(anthropicProvider as any, ['webSearch_20250305']); - - if (!searchFactory) { - throw new Error('Anthropic web search tool is unavailable in this environment.'); - } - - const webSearch = searchFactory({ - maxUses: 5, - ...(allowedDomains ? { allowedDomains } : {}), - ...(blockedDomains ? { blockedDomains } : {}), - }); - - const result = await generateText({ - model: mainModelIsCustom && mainModelId - ? await getAnthropicClientForCustomModel(mainModelId) - : await getAnthropicClient((mainModelId || ANTHROPIC_SONNET_4_6) as AnthropicModel), - prompt: [ - `Search query: ${args.query}`, - 'Use the web_search tool and return concise findings with relevant source links.' - ].join('\n'), - tools: { - web_search: webSearch, - }, - abortSignal: mainAbortSignal, - }); - - const toolOutput = extractToolOutput(result); - const message = typeof toolOutput === 'string' - ? toolOutput - : result.text || (toolOutput ? JSON.stringify(toolOutput, null, 2) : 'Web search completed successfully.'); - - return { - success: true, - message, - }; - } catch (error: any) { - logError('[WebSearchTool] Web search failed', error); - const errorMessage = error?.message || String(error); - - if (errorMessage.includes('responses API is unavailable')) { - return { - success: false, - message: 'Web search failed: Anthropic responses API is unavailable in this environment. Upgrade @ai-sdk/anthropic to use web_search and web_fetch tools.', - error: 'WEB_SEARCH_API_UNAVAILABLE', - }; - } - + const results = Array.isArray(response?.results) ? response.results : []; + const first = results[0]; + if (!first?.rawContent) { return { success: false, - message: `Web search failed: ${errorMessage}`, - error: 'WEB_SEARCH_FAILED', + message: `Tavily extract returned no content for ${url}.`, + error: 'WEB_FETCH_EMPTY', }; } + + const header = taskPrompt ? `Task: ${taskPrompt}\nURL: ${url}\n\n` : `URL: ${url}\n\n`; + const rawContent: string = first.rawContent; + const content = rawContent.length > TAVILY_EXTRACT_MAX_CHARS + ? rawContent.slice(0, TAVILY_EXTRACT_MAX_CHARS) + '\n\n[CONTENT TRUNCATED]' + : rawContent; + return { + success: true, + message: `${header}${content}`, + }; + } catch (error: any) { + logError('[WebFetchTool] Tavily extract failed', error); + return { + success: false, + message: `Web fetch failed: ${error?.message || String(error)}`, + error: 'WEB_FETCH_FAILED', + }; + } +} + +/** + * Tavily-backed `web_search` execute. Used only on the AWS Bedrock branch. + */ +export function createWebSearchExecute(tavilyKey: string): WebSearchExecuteFn { + return async (args): Promise => { + logInfo(`[WebSearchTool] Tavily search: ${args.query}`); + return await runTavilySearch(tavilyKey, { + query: args.query, + includeDomains: sanitizeDomainList(args.allowed_domains), + excludeDomains: sanitizeDomainList(args.blocked_domains), + }); }; } /** - * Creates execute function for web_fetch tool. - * Requires explicit user consent before fetching remote content. + * Tavily-backed `web_fetch` execute. Used only on the AWS Bedrock branch. + * Hard-fails on `mi.docs.wso2.com` because Tavily Extract can't render JS. */ -export function createWebFetchExecute( - getAnthropicClient: (model: AnthropicModel) => Promise, - eventHandler: AgentEventHandler, - pendingApprovals: Map, - webAccessPreapproved: boolean, - sessionId: string, - mainModelId?: string, - mainModelIsCustom?: boolean, - mainAbortSignal?: AbortSignal -): WebFetchExecuteFn { +export function createWebFetchExecute(tavilyKey: string): WebFetchExecuteFn { return async (args): Promise => { try { const hostname = new URL(args.url).hostname.toLowerCase(); @@ -263,99 +235,36 @@ export function createWebFetchExecute( }; } } catch { - // URL validity is already enforced by the tool input schema. - } - - const allowedDomains = sanitizeDomainList(args.allowed_domains); - const blockedDomains = sanitizeDomainList(args.blocked_domains); - - let approved = true; - if (!webAccessPreapproved) { - approved = await requestWebApproval(eventHandler, pendingApprovals, { - sessionId, - kind: 'web_fetch', - approvalTitle: 'Allow Web Fetch?', - content: `Agent wants to fetch content from: ${args.url}`, - }, mainAbortSignal); + // URL validity is enforced by the input schema; ignore parse failures here. } - if (!approved) { + const domainCheck = checkUrlAgainstDomainLists(args.url, args.allowed_domains, args.blocked_domains); + if (!domainCheck.ok) { return { success: false, - message: 'User denied permission to fetch web content.', - error: 'WEB_FETCH_DENIED', + message: `Web fetch refused: ${domainCheck.reason}`, + error: 'WEB_FETCH_DOMAIN_BLOCKED', }; } - try { - logInfo(`[WebFetchTool] Fetching URL: ${args.url}`); - const anthropicProvider = await getAnthropicProvider(); - const fetchFactory = getProviderToolFactory(anthropicProvider as any, ['webFetch_20250910', 'webFetch_20250305']); - - if (!fetchFactory) { - throw new Error('Anthropic web fetch tool is unavailable in this environment.'); - } - - const webFetch = fetchFactory({ - maxUses: 3, - ...(allowedDomains ? { allowedDomains } : {}), - ...(blockedDomains ? { blockedDomains } : {}), - }); - - const result = await generateText({ - model: mainModelIsCustom && mainModelId - ? await getAnthropicClientForCustomModel(mainModelId) - : await getAnthropicClient((mainModelId || ANTHROPIC_SONNET_4_6) as AnthropicModel), - prompt: [ - `URL: ${args.url}`, - `Task: ${args.prompt}`, - 'Use the web_fetch tool to retrieve and analyze this page.' - ].join('\n'), - tools: { - web_fetch: webFetch, - }, - abortSignal: mainAbortSignal, - }); - - const toolOutput = extractToolOutput(result); - const message = typeof toolOutput === 'string' - ? toolOutput - : result.text || (toolOutput ? JSON.stringify(toolOutput, null, 2) : 'Web fetch completed successfully.'); - - return { - success: true, - message, - }; - } catch (error: any) { - logError('[WebFetchTool] Web fetch failed', error); - const errorMessage = error?.message || String(error); - - if (errorMessage.includes('responses API is unavailable')) { - return { - success: false, - message: 'Web fetch failed: Anthropic responses API is unavailable in this environment. Upgrade @ai-sdk/anthropic to use web_search and web_fetch tools.', - error: 'WEB_FETCH_API_UNAVAILABLE', - }; - } - - return { - success: false, - message: `Web fetch failed: ${errorMessage}`, - error: 'WEB_FETCH_FAILED', - }; - } + logInfo(`[WebFetchTool] Tavily extract: ${args.url}`); + return await runTavilyExtract(tavilyKey, args.url, args.prompt); }; } const webSearchSchema = z.object({ - query: z.string().min(2).describe('The web search query to run.'), + query: z.string().min(2).describe('The web search query to run, written as natural language.'), allowed_domains: z.array(z.string()).optional().describe('Optional allow-list of domains to include in search results (for MI docs, use ["mi.docs.wso2.com"]).'), blocked_domains: z.array(z.string()).optional().describe('Optional block-list of domains to exclude from search results.'), }); export function createWebSearchTool(execute: WebSearchExecuteFn) { return (tool as any)({ - description: 'Search the web for up-to-date information when local project context is insufficient. Supports domain allow/block filters. For MI docs, use allowed_domains=["mi.docs.wso2.com"]. Requires user consent before execution; if denied, continue without web access.', + description: + 'Search the web via Tavily. Phrase the query as a natural-language question or sentence — ' + + 'Tavily ranks better on conversational queries than on keyword strings ' + + '(e.g. "How do I configure a WSO2 MI HTTP inbound endpoint?" not "WSO2 MI HTTP inbound endpoint config"). ' + + 'Supports allowed_domains / blocked_domains. For MI docs, set allowed_domains=["mi.docs.wso2.com"].', inputSchema: webSearchSchema, execute, }); @@ -363,15 +272,43 @@ export function createWebSearchTool(execute: WebSearchExecuteFn) { const webFetchSchema = z.object({ url: z.string().url().describe('The URL to fetch and analyze.'), - prompt: z.string().min(3).describe('What to extract or analyze from the fetched page.'), + prompt: z.string().min(3).describe('Natural-language description of what to extract from the page.'), allowed_domains: z.array(z.string()).optional().describe('Optional allow-list of domains that fetch requests can access.'), blocked_domains: z.array(z.string()).optional().describe('Optional block-list of domains that fetch requests must avoid.'), }); export function createWebFetchTool(execute: WebFetchExecuteFn) { return (tool as any)({ - description: 'Fetch and analyze content from a specific URL. Supports domain allow/block filters. web_fetch does not support JavaScript-rendered pages (including MI docs), so use web_search with allowed_domains=["mi.docs.wso2.com"] for MI docs. Requires user consent before execution; if denied, continue without web access.', + description: + 'Fetch and extract content from a URL via Tavily. ' + + 'Write the "prompt" field as a natural-language description of what to extract, not keywords. ' + + 'Does not render JavaScript; mi.docs.wso2.com is JS-rendered, so use web_search with allowed_domains=["mi.docs.wso2.com"] for MI docs.', inputSchema: webFetchSchema, execute, }); } + +// ============================================================================ +// Anthropic server-tool factory (MI_INTEL Proxy + ANTHROPIC_KEY branch) +// ============================================================================ + +/** + * Returns Anthropic's first-party `web_search` and `web_fetch` server tools. + * Register the result directly in the main agent's `streamText` tool map — + * Anthropic executes them inline as part of the model's turn (no local execute, + * no extra LLM round-trip). + * + * Stays on `_20250305` / `_20250910`. The `_20260209` versions only differ when + * the code-execution server tool is also enabled (dynamic filtering depends on + * it); we don't ship code-execution today. + */ +export function createAnthropicServerWebTools(provider: AnthropicProvider): Record { + return { + web_search: provider.tools.webSearch_20250305({ maxUses: 5 }), + web_fetch: provider.tools.webFetch_20250910({ + maxUses: 3, + citations: { enabled: true }, + maxContentTokens: 32000, + }), + }; +} diff --git a/workspaces/mi/mi-extension/src/ai-features/aiMachine.ts b/workspaces/mi/mi-extension/src/ai-features/aiMachine.ts index f7d8a67feed..5b2151b38a9 100644 --- a/workspaces/mi/mi-extension/src/ai-features/aiMachine.ts +++ b/workspaces/mi/mi-extension/src/ai-features/aiMachine.ts @@ -32,7 +32,8 @@ import { isDevantUserLoggedIn, getPlatformStsToken, exchangeStsToCopilotToken, - storeAuthCredentials + storeAuthCredentials, + hasExplicitLogoutState } from './auth'; import { PromptObject } from '@wso2/mi-core'; import { logError, logInfo, logWarn } from './copilot/logger'; @@ -53,6 +54,10 @@ const trySilentPlatformBootstrap = async (): Promise => { return; } + if (hasExplicitLogoutState()) { + return; + } + silentPlatformBootstrapInFlight = true; try { const isLoggedIn = await isDevantUserLoggedIn(); @@ -536,8 +541,10 @@ const checkWorkspaceAndToken = async (): Promise<{ workspaceSupported: boolean; tokenData = { token: apiKey, loginMethod: LoginMethod.ANTHROPIC_KEY }; } } else if (credentials?.loginMethod === LoginMethod.AWS_BEDROCK) { - const secrets = credentials.secrets as { accessKeyId?: string; secretAccessKey?: string; region?: string }; - if (secrets.accessKeyId && secrets.secretAccessKey && secrets.region) { + const secrets = credentials.secrets as { authType?: string; accessKeyId?: string; secretAccessKey?: string; region?: string; apiKey?: string }; + if (secrets.authType === 'api_key' && secrets.apiKey && secrets.region) { + tokenData = { token: secrets.apiKey, loginMethod: LoginMethod.AWS_BEDROCK }; + } else if (secrets.accessKeyId && secrets.secretAccessKey && secrets.region) { tokenData = { token: secrets.accessKeyId, loginMethod: LoginMethod.AWS_BEDROCK }; } } @@ -594,11 +601,18 @@ const validateApiKeyService = async (_context: AIMachineContext, event: any) => }; const validateAwsCredentialsService = async (_context: AIMachineContext, event: any) => { - const { accessKeyId, secretAccessKey, region, sessionToken } = event.payload || {}; + const { authType, accessKeyId, secretAccessKey, region, sessionToken, apiKey, tavilyApiKey } = event.payload || {}; + if (authType === 'api_key') { + if (!apiKey || !region) { + throw new Error('Amazon Bedrock API key and AWS region are required'); + } + return await validateAwsCredentials({ authType, apiKey, region, tavilyApiKey }); + } + if (!accessKeyId || !secretAccessKey || !region) { throw new Error('AWS access key ID, secret access key, and region are required'); } - return await validateAwsCredentials({ accessKeyId, secretAccessKey, region, sessionToken }); + return await validateAwsCredentials({ authType: 'iam', accessKeyId, secretAccessKey, region, sessionToken, tavilyApiKey }); }; const getTokenAndLoginMethod = async () => { @@ -624,7 +638,14 @@ const getTokenAndLoginMethod = async () => { } if (credentials.loginMethod === LoginMethod.AWS_BEDROCK) { - const secrets = credentials.secrets as { accessKeyId?: string; secretAccessKey?: string; region?: string }; + const secrets = credentials.secrets as { authType?: string; accessKeyId?: string; secretAccessKey?: string; region?: string; apiKey?: string }; + if (secrets.authType === 'api_key') { + if (!secrets.apiKey || !secrets.region) { + throw new Error('Incomplete AWS Bedrock API key credentials. Please log in again.'); + } + return { token: secrets.apiKey, loginMethod: LoginMethod.AWS_BEDROCK }; + } + if (!secrets.accessKeyId || !secrets.secretAccessKey || !secrets.region) { throw new Error('Incomplete AWS Bedrock credentials. Please log in again.'); } diff --git a/workspaces/mi/mi-extension/src/ai-features/auth.ts b/workspaces/mi/mi-extension/src/ai-features/auth.ts index 4b3e2997075..19c397c69d8 100644 --- a/workspaces/mi/mi-extension/src/ai-features/auth.ts +++ b/workspaces/mi/mi-extension/src/ai-features/auth.ts @@ -32,7 +32,7 @@ import { AIUserToken, AuthCredentials, LoginMethod, AwsBedrockSecrets } from '@w import { extension } from '../MIExtensionContext'; import * as vscode from 'vscode'; import { createAnthropic } from '@ai-sdk/anthropic'; -import { createAmazonBedrock } from '@ai-sdk/amazon-bedrock'; +import { createBedrockAnthropic } from '@ai-sdk/amazon-bedrock/anthropic'; import { generateText } from 'ai'; import { WICommandIds, IWso2PlatformExtensionAPI } from '@wso2/wso2-platform-core'; import { logInfo, logWarn, logError } from './copilot/logger'; @@ -46,6 +46,7 @@ export const DEFAULT_ANTHROPIC_MODEL = 'claude-haiku-4-5'; // Credential storage key const AUTH_CREDENTIALS_SECRET_KEY = 'MIAuthCredentials'; +const EXPLICIT_LOGOUT_STATE_KEY = 'MIAuthExplicitLogout'; // Legacy keys (for migration) const LEGACY_ACCESS_TOKEN_SECRET_KEY = 'MIAIUser'; @@ -290,12 +291,25 @@ export const isStsTokenUnavailableError = (error: unknown): boolean => { // Credential Storage (Core) // ================================== +export const hasExplicitLogoutState = (): boolean => { + return extension.context.globalState.get(EXPLICIT_LOGOUT_STATE_KEY, false); +}; + +const setExplicitLogoutState = async (): Promise => { + await extension.context.globalState.update(EXPLICIT_LOGOUT_STATE_KEY, true); +}; + +const clearExplicitLogoutState = async (): Promise => { + await extension.context.globalState.update(EXPLICIT_LOGOUT_STATE_KEY, undefined); +}; + /** * Store authentication credentials in VSCode secrets. */ export const storeAuthCredentials = async (credentials: AuthCredentials): Promise => { const credentialsJson = JSON.stringify(credentials); await extension.context.secrets.store(AUTH_CREDENTIALS_SECRET_KEY, credentialsJson); + await clearExplicitLogoutState(); }; /** @@ -355,9 +369,10 @@ export const getAccessToken = async (): Promise => { } case LoginMethod.ANTHROPIC_KEY: return credentials.secrets.apiKey; - case LoginMethod.AWS_BEDROCK: - // AWS Bedrock credentials are passed directly to the SDK, not as a single token - return credentials.secrets.accessKeyId; + case LoginMethod.AWS_BEDROCK: { + const secrets = credentials.secrets as AwsBedrockSecrets; + return secrets.authType === 'api_key' ? secrets.apiKey : secrets.accessKeyId; + } } return undefined; @@ -407,7 +422,7 @@ export const cleanupLegacyTokens = async (): Promise => { /** * Check if valid authentication credentials exist. - * If not found but user is already logged in to Devant, bootstrap credentials via STS exchange. + * If not found but user is already logged in to Devant, bootstrap credentials via STS exchange unless the user explicitly logged out. */ export const checkToken = async (): Promise<{ token: string; loginMethod: LoginMethod } | undefined> => { await cleanupLegacyTokens(); @@ -433,6 +448,10 @@ export const checkToken = async (): Promise<{ token: string; loginMethod: LoginM return { token, loginMethod }; } + if (hasExplicitLogoutState()) { + return undefined; + } + if (!isIntegratorExtensionAvailable()) { return undefined; } @@ -553,83 +572,141 @@ export const validateApiKey = async (apiKey: string, loginMethod: LoginMethod): } }; -/** - * Validate AWS Bedrock credentials by making a minimal test API call. - */ -export const validateAwsCredentials = async (credentials: { - accessKeyId: string; - secretAccessKey: string; - region: string; +interface AwsBedrockValidationInput { + authType?: 'iam' | 'api_key'; + accessKeyId?: string; + secretAccessKey?: string; + region?: string; sessionToken?: string; -}): Promise => { - const { accessKeyId, secretAccessKey, region, sessionToken } = credentials; + apiKey?: string; + /** Optional Tavily key bundled with Bedrock credentials so users can opt into web tools. */ + tavilyApiKey?: string; +} - if (!accessKeyId || !secretAccessKey || !region) { - throw new Error('AWS access key ID, secret access key, and region are required.'); +const validateBedrockRegion = (region: string): void => { + if (!region) { + throw new Error('AWS region is required.'); } - if (!accessKeyId.startsWith('AKIA') && !accessKeyId.startsWith('ASIA')) { - throw new Error('Please enter a valid AWS access key ID.'); + if (!/^[a-z]{2}(?:-gov)?-[a-z]+-\d+$/.test(region)) { + throw new Error('Invalid AWS region. Please enter a region like us-east-1 or us-west-2.'); } - if (secretAccessKey.length < 20) { - throw new Error('Please enter a valid AWS secret access key.'); + // The `global.` Bedrock inference profile (used by getBedrockValidationModelId + // / getBedrockRegionalPrefix in connection.ts) is only published in the + // commercial AWS partition. GovCloud (`us-gov-*`) and China (`cn-*`) regions + // would silently fail at runtime with "model not found", so reject up front. + if (region.startsWith('us-gov-') || region.startsWith('cn-')) { + throw new Error( + `AWS region "${region}" is not supported. The Anthropic models on Bedrock ` + + `(Haiku 4.5, Sonnet 4.6, Opus 4.7) are only available via the global. ` + + `inference profile in the commercial AWS partition — GovCloud and China ` + + `partitions are not supported. Use a commercial region like us-east-1 or eu-west-1.` + ); } +}; - // List of valid AWS regions - const validRegions = [ - 'us-east-1', 'us-west-2', 'us-west-1', 'eu-west-1', 'eu-central-1', - 'ap-southeast-1', 'ap-southeast-2', 'ap-northeast-1', 'ap-northeast-2', - 'ap-south-1', 'ca-central-1', 'sa-east-1', 'eu-west-2', 'eu-west-3', - 'eu-north-1', 'ap-east-1', 'me-south-1', 'af-south-1', 'ap-southeast-3' - ]; +const getBedrockValidationModelId = async (region: string): Promise => { + const { getBedrockValidationModelId: resolveValidationModelId } = await import('./connection'); + return resolveValidationModelId(region); +}; - if (!validRegions.includes(region)) { - throw new Error('Invalid AWS region. Please select a valid region like us-east-1, us-west-2, etc.'); - } +/** + * Validate AWS Bedrock credentials by making a minimal test API call. + */ +export const validateAwsCredentials = async (credentials: AwsBedrockValidationInput): Promise => { + const authType = credentials.authType === 'api_key' ? 'api_key' : 'iam'; + const region = credentials.region?.trim() ?? ''; + const tavilyApiKey = credentials.tavilyApiKey?.trim() || undefined; + + validateBedrockRegion(region); try { - logInfo('Validating AWS Bedrock credentials...'); + logInfo(`Validating AWS Bedrock ${authType === 'api_key' ? 'API key' : 'IAM credentials'}...`); - const bedrock = createAmazonBedrock({ - region: region, - accessKeyId: accessKeyId, - secretAccessKey: secretAccessKey, - sessionToken: sessionToken, - }); + if (authType === 'api_key') { + const apiKey = credentials.apiKey?.trim() ?? ''; + if (!apiKey) { + throw new Error('Amazon Bedrock API key is required.'); + } + + const bedrock = createBedrockAnthropic({ + region, + apiKey, + }); + const bedrockClient = bedrock(await getBedrockValidationModelId(region)); + + await generateText({ + model: bedrockClient, + maxOutputTokens: 1, + messages: [{ role: 'user', content: 'Hi' }] + }); + + const authCredentials: AuthCredentials = { + loginMethod: LoginMethod.AWS_BEDROCK, + secrets: { + authType: 'api_key', + apiKey, + region, + tavilyApiKey, + } + }; + await storeAuthCredentials(authCredentials); + + logInfo('AWS Bedrock API key validated successfully'); + return { token: apiKey }; + } + + const accessKeyId = credentials.accessKeyId?.trim() ?? ''; + const secretAccessKey = credentials.secretAccessKey?.trim() ?? ''; + const sessionToken = credentials.sessionToken?.trim() || undefined; - // Get regional prefix based on AWS region and construct model ID - const { getBedrockRegionalPrefix } = await import('./connection'); - const regionalPrefix = getBedrockRegionalPrefix(region); - const modelId = `${regionalPrefix}.anthropic.claude-3-5-haiku-20241022-v1:0`; - const bedrockClient = bedrock(modelId); + if (!accessKeyId || !secretAccessKey) { + throw new Error('AWS access key ID and secret access key are required.'); + } + + if (!accessKeyId.startsWith('AKIA') && !accessKeyId.startsWith('ASIA')) { + throw new Error('Please enter a valid AWS access key ID.'); + } + + if (secretAccessKey.length < 20) { + throw new Error('Please enter a valid AWS secret access key.'); + } + + const bedrock = createBedrockAnthropic({ + region, + accessKeyId, + secretAccessKey, + sessionToken, + }); + const bedrockClient = bedrock(await getBedrockValidationModelId(region)); - // Make a minimal test call to validate credentials await generateText({ model: bedrockClient, maxOutputTokens: 1, messages: [{ role: 'user', content: 'Hi' }] }); - logInfo('AWS Bedrock credentials validated successfully'); - - // Store credentials const authCredentials: AuthCredentials = { loginMethod: LoginMethod.AWS_BEDROCK, secrets: { + authType: 'iam', accessKeyId, secretAccessKey, region, - sessionToken + sessionToken, + tavilyApiKey, } }; await storeAuthCredentials(authCredentials); + logInfo('AWS Bedrock IAM credentials validated successfully'); return { token: accessKeyId }; } catch (error) { logError('AWS Bedrock credential validation failed', error); - throw new Error('Validation failed. Please check your AWS credentials and ensure you have access to Amazon Bedrock.'); + const detail = error instanceof Error ? error.message : String(error); + throw new Error(`Validation failed. Please check your AWS Bedrock authentication details and model access. (${detail})`); } }; @@ -645,11 +722,45 @@ export const getAwsBedrockCredentials = async (): Promise => { +export const getTavilyApiKey = async (): Promise => { + const secrets = await getAwsBedrockCredentials(); + return secrets?.tavilyApiKey?.trim() || undefined; +}; + +/** + * Update the Tavily API key on the stored Bedrock credentials. + * Pass undefined or empty string to clear it. + * + * Throws if no Bedrock credentials are stored — Tavily is currently a Bedrock-only opt-in. + */ +export const setTavilyApiKey = async (apiKey: string | undefined): Promise => { + const credentials = await getAuthCredentials(); + if (!credentials || credentials.loginMethod !== LoginMethod.AWS_BEDROCK) { + throw new Error('Tavily API key is only configurable when signed in via AWS Bedrock.'); + } + const trimmed = apiKey?.trim() || undefined; + const updated: AuthCredentials = { + loginMethod: LoginMethod.AWS_BEDROCK, + secrets: { + ...credentials.secrets, + tavilyApiKey: trimmed, + } as AwsBedrockSecrets, + }; + await storeAuthCredentials(updated); +}; + +/** + * Logout and clear only MI Copilot authentication credentials. + * The WSO2 platform session is owned by the platform extension and is intentionally left untouched. + */ +export const logout = async (isUserLogout: boolean = true): Promise => { await clearAuthCredentials(); + if (isUserLogout) { + await setExplicitLogoutState(); + } }; // ================================== diff --git a/workspaces/mi/mi-extension/src/ai-features/connection.ts b/workspaces/mi/mi-extension/src/ai-features/connection.ts index a53d1a02e36..ab160509429 100644 --- a/workspaces/mi/mi-extension/src/ai-features/connection.ts +++ b/workspaces/mi/mi-extension/src/ai-features/connection.ts @@ -15,7 +15,7 @@ // under the License. import { createAnthropic } from "@ai-sdk/anthropic"; -import { createAmazonBedrock } from "@ai-sdk/amazon-bedrock"; +import { createBedrockAnthropic } from "@ai-sdk/amazon-bedrock/anthropic"; import * as vscode from "vscode"; import { getAccessToken, @@ -31,44 +31,48 @@ import { logInfo, logDebug, logError } from "./copilot/logger"; export const ANTHROPIC_HAIKU_4_5 = "claude-haiku-4-5"; export const ANTHROPIC_SONNET_4_6 = "claude-sonnet-4-6"; -export const ANTHROPIC_OPUS_4_6 = "claude-opus-4-6"; +export const ANTHROPIC_OPUS_4_7 = "claude-opus-4-7"; // Backward-compatible alias for existing imports. export const ANTHROPIC_SONNET_4_5 = ANTHROPIC_SONNET_4_6; export type AnthropicModel = | typeof ANTHROPIC_HAIKU_4_5 | typeof ANTHROPIC_SONNET_4_6 - | typeof ANTHROPIC_OPUS_4_6; + | typeof ANTHROPIC_OPUS_4_7; -// Bedrock model ID mappings +// Bedrock inference-profile IDs (without the regional prefix). +// Base IDs verified against `aws bedrock list-inference-profiles`. const BEDROCK_MODEL_MAP: Record = { - [ANTHROPIC_HAIKU_4_5]: "anthropic.claude-3-5-haiku-20241022-v1:0", - [ANTHROPIC_SONNET_4_6]: "anthropic.claude-sonnet-4-6-20250619-v1:0", - [ANTHROPIC_OPUS_4_6]: "anthropic.claude-opus-4-6-20250619-v1:0", + [ANTHROPIC_HAIKU_4_5]: "anthropic.claude-haiku-4-5-20251001-v1:0", + [ANTHROPIC_SONNET_4_6]: "anthropic.claude-sonnet-4-6", + [ANTHROPIC_OPUS_4_7]: "anthropic.claude-opus-4-7", }; /** - * Get the regional prefix for Bedrock model IDs based on AWS region. - * Cross-region inference requires a regional prefix (e.g., us., eu.). + * Bedrock 4.x models can only be invoked through an inference profile, not as + * on-demand foundation models. Region-pinned profiles (us./eu./ap./...) are + * not published in every AWS region for every model, so we always use the + * `global.` profile — it is published for all three models we support and is + * accessible from any Bedrock-enabled region. + * + * Trade-off: `global.` may cost slightly more per token than a region-pinned + * profile and offers no data-residency guarantee. If a user needs region + * pinning we will need to add a setting and a region→profile lookup. */ -export const getBedrockRegionalPrefix = (region: string): string => { - const prefix = region.split('-')[0]; - switch (prefix) { - case 'us': - case 'eu': - case 'ap': - case 'ca': - case 'sa': - case 'me': - case 'af': - return prefix; - default: - return 'us'; - } +const BEDROCK_INFERENCE_PROFILE_PREFIX = 'global'; + +export const getBedrockRegionalPrefix = (_region: string): string => BEDROCK_INFERENCE_PROFILE_PREFIX; + +/** + * Resolve the Bedrock inference-profile ID used to validate AWS credentials. + * Uses Haiku 4.5 — cheapest of the three. + */ +export const getBedrockValidationModelId = (region: string): string => { + const regionalPrefix = getBedrockRegionalPrefix(region); + return `${regionalPrefix}.${BEDROCK_MODEL_MAP[ANTHROPIC_HAIKU_4_5]}`; }; let cachedAnthropic: ReturnType | null = null; -let cachedBedrock: ReturnType | null = null; let cachedAuthMethod: LoginMethod | null = null; let reLoginPromptInFlight = false; @@ -298,9 +302,14 @@ export const getAnthropicProvider = async (): Promise; + provider: ReturnType; credentials: Awaited> & {}; }> => { const credentials = await getAwsBedrockCredentials(); @@ -308,15 +317,20 @@ const getBedrockProvider = async (): Promise<{ throw new Error("Authentication failed: Unable to get AWS Bedrock credentials"); } - // Always recreate to ensure fresh credentials - cachedBedrock = createAmazonBedrock({ - region: credentials.region, - accessKeyId: credentials.accessKeyId, - secretAccessKey: credentials.secretAccessKey, - sessionToken: credentials.sessionToken, - }); - - return { provider: cachedBedrock, credentials }; + // Always recreate to ensure fresh credentials. + const provider = credentials.authType === 'api_key' + ? createBedrockAnthropic({ + region: credentials.region, + apiKey: credentials.apiKey, + }) + : createBedrockAnthropic({ + region: credentials.region, + accessKeyId: credentials.accessKeyId, + secretAccessKey: credentials.secretAccessKey, + sessionToken: credentials.sessionToken, + }); + + return { provider, credentials }; }; export const getAnthropicClient = async (model: AnthropicModel): Promise => { @@ -358,7 +372,7 @@ export function resolveMainModelId(settings: { mainModelPreset: string; mainMode if (settings.mainModelCustomId) { return settings.mainModelCustomId; } - return settings.mainModelPreset === 'opus' ? ANTHROPIC_OPUS_4_6 : ANTHROPIC_SONNET_4_6; + return settings.mainModelPreset === 'opus' ? ANTHROPIC_OPUS_4_7 : ANTHROPIC_SONNET_4_6; } /** @@ -372,13 +386,12 @@ export function resolveSubModelId(settings: { subModelPreset: string; subModelCu } /** - * Returns cache control options for prompt caching - * @returns Cache control options for Anthropic or Bedrock + * Returns cache control options for prompt caching. + * + * Both the direct Anthropic provider and the Bedrock-Anthropic provider + * (`@ai-sdk/amazon-bedrock/anthropic`) speak the Anthropic protocol under the + * hood, so the providerOptions key is always `anthropic` — even on Bedrock. */ export const getProviderCacheControl = async (): Promise> => { - const loginMethod = await getLoginMethod(); - if (loginMethod === LoginMethod.AWS_BEDROCK) { - return { bedrock: { cacheControl: { type: "ephemeral" } } }; - } return { anthropic: { cacheControl: { type: "ephemeral" } } }; }; diff --git a/workspaces/mi/mi-extension/src/ai-features/utils/sanitize-text.ts b/workspaces/mi/mi-extension/src/ai-features/utils/sanitize-text.ts new file mode 100644 index 00000000000..902ccca88ab --- /dev/null +++ b/workspaces/mi/mi-extension/src/ai-features/utils/sanitize-text.ts @@ -0,0 +1,44 @@ +/** + * Copyright (c) 2026, WSO2 LLC. (https://www.wso2.com/) All Rights Reserved. + * + * WSO2 LLC. licenses this file to you under the Apache License, + * Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/** + * Strips ANSI escape sequences and stray ASCII control bytes from text + * destined for the Anthropic Messages API. + * + * Why: Maven/Gradle/npm captured-to-file logs embed ANSI color codes + * (ESC + `[...m`). When such text becomes a tool-result value, the + * Copilot proxy fails the request with `unexpected control character in + * string` — JSON.stringify produces a valid `` escape, but the + * upstream JSON parser still rejects raw control chars elsewhere in the + * pipeline. Stripping at the tool boundary removes the bytes before they + * can leak into the wire body. + * + * Preserves \t, \n, \r since those are common in tool output and survive + * JSON serialization without issue. + */ +// eslint-disable-next-line no-control-regex +const ANSI_ESCAPE_RE = /\x1b(?:\[[0-9;?]*[A-Za-z]|\][^\x07\x1b]*(?:\x07|\x1b\\)|[@-Z\\-_])/g; +// eslint-disable-next-line no-control-regex +const STRAY_CONTROL_RE = /[\x00-\x08\x0B\x0C\x0E-\x1F]/g; + +export function stripAnsiAndControl(input: string): string { + if (!input) { + return input; + } + return input.replace(ANSI_ESCAPE_RE, '').replace(STRAY_CONTROL_RE, ''); +} diff --git a/workspaces/mi/mi-extension/src/rpc-managers/agent-mode/rpc-manager.ts b/workspaces/mi/mi-extension/src/rpc-managers/agent-mode/rpc-manager.ts index 1dec93138e5..4546f6f094a 100644 --- a/workspaces/mi/mi-extension/src/rpc-managers/agent-mode/rpc-manager.ts +++ b/workspaces/mi/mi-extension/src/rpc-managers/agent-mode/rpc-manager.ts @@ -812,7 +812,6 @@ export class MIAgentPanelRpcManager implements MIAgentPanelAPI { files: request.files, images: request.images, thinking: request.thinking ?? true, - webAccessPreapproved: request.webAccessPreapproved, projectPath: this.projectUri, sessionId: this.currentSessionId || undefined, abortSignal: abortController.signal, diff --git a/workspaces/mi/mi-extension/src/rpc-managers/ai-features/rpc-handler.ts b/workspaces/mi/mi-extension/src/rpc-managers/ai-features/rpc-handler.ts index d1e3cd42aec..8c1b6ec90bd 100644 --- a/workspaces/mi/mi-extension/src/rpc-managers/ai-features/rpc-handler.ts +++ b/workspaces/mi/mi-extension/src/rpc-managers/ai-features/rpc-handler.ts @@ -25,6 +25,8 @@ import { GenerateSuggestionsRequest, GenerateCodeRequest, hasAnthropicApiKey, + getTavilyApiKey, + setTavilyApiKey, isMiCopilotLoggedIn, fetchUsage, generateUnitTest, @@ -51,6 +53,8 @@ export function registerMIAiPanelRpcHandlers(messenger: MessengerAPI, projectUri messenger.onRequest(generateCode, (request: GenerateCodeRequest) => rpcManager.generateCode(request)); messenger.onRequest(abortCodeGeneration, () => rpcManager.abortCodeGeneration()); messenger.onRequest(hasAnthropicApiKey, () => rpcManager.hasAnthropicApiKey()); + messenger.onRequest(getTavilyApiKey, () => rpcManager.getTavilyApiKey()); + messenger.onRequest(setTavilyApiKey, (request: { apiKey: string }) => rpcManager.setTavilyApiKey(request)); messenger.onRequest(isMiCopilotLoggedIn, () => rpcManager.isMiCopilotLoggedIn()); messenger.onRequest(fetchUsage, () => rpcManager.fetchUsage()); diff --git a/workspaces/mi/mi-extension/src/rpc-managers/ai-features/rpc-manager.ts b/workspaces/mi/mi-extension/src/rpc-managers/ai-features/rpc-manager.ts index b1c68563669..c5c8ef1771b 100644 --- a/workspaces/mi/mi-extension/src/rpc-managers/ai-features/rpc-manager.ts +++ b/workspaces/mi/mi-extension/src/rpc-managers/ai-features/rpc-manager.ts @@ -47,7 +47,7 @@ import { MiDiagramRpcManager } from "../mi-diagram/rpc-manager"; import { generateSuggestions as generateSuggestionsFromLLM } from "../../ai-features/copilot/suggestions/suggestions"; import { fillIdpSchema } from '../../ai-features/copilot/idp/fill_schema'; import { codeDiagnostics } from "../../ai-features/copilot/diagnostics/diagnostics"; -import { getCopilotUsageApiUrl, getLoginMethod } from '../../ai-features/auth'; +import { getCopilotUsageApiUrl, getLoginMethod, getTavilyApiKey, setTavilyApiKey } from '../../ai-features/auth'; import { LoginMethod } from '@wso2/mi-core'; import { logInfo, logWarn, logError, logDebug } from '../../ai-features/copilot/logger'; import { MILanguageClient } from '../../lang-client/activator'; @@ -635,6 +635,28 @@ export class MIAIPanelRpcManager implements MIAIPanelAPI { return loginMethod === LoginMethod.MI_INTEL; } + /** + * Read the Tavily API key bundled with Bedrock credentials. + * Returns undefined for non-Bedrock auth methods or when the key is unset. + */ + async getTavilyApiKey(): Promise { + return await getTavilyApiKey(); + } + + /** + * Update the Tavily API key on the stored Bedrock credentials. + * Empty string clears the key. Bedrock-only. + */ + async setTavilyApiKey(request: { apiKey: string }): Promise<{ success: boolean; error?: string }> { + try { + await setTavilyApiKey(request.apiKey); + return { success: true }; + } catch (error) { + const message = error instanceof Error ? error.message : String(error); + return { success: false, error: message }; + } + } + /** * Fetches usage information from backend and updates state machine * Only works for MI_INTEL users diff --git a/workspaces/mi/mi-extension/src/rpc-managers/mi-diagram/rpc-manager.ts b/workspaces/mi/mi-extension/src/rpc-managers/mi-diagram/rpc-manager.ts index 87d7ccf806d..c8714962628 100644 --- a/workspaces/mi/mi-extension/src/rpc-managers/mi-diagram/rpc-manager.ts +++ b/workspaces/mi/mi-extension/src/rpc-managers/mi-diagram/rpc-manager.ts @@ -4987,11 +4987,11 @@ ${keyValuesXML}`; async logoutFromMIAccount(): Promise { const confirm = await vscode.window.showWarningMessage( - 'Are you sure you want to logout?', + 'Sign out of WSO2 Integrator Copilot? This only clears MI Copilot credentials and keeps your WSO2 platform session active.', { modal: true }, - 'Yes' + 'Sign out' ); - if (confirm === 'Yes') { + if (confirm === 'Sign out') { await logoutFromCopilot(); StateMachineAI.sendEvent(AI_EVENT_TYPE.LOGOUT); } else { diff --git a/workspaces/mi/mi-rpc-client/src/rpc-clients/ai-features/rpc-client.ts b/workspaces/mi/mi-rpc-client/src/rpc-clients/ai-features/rpc-client.ts index e0eb5ae30e9..13dbb205c51 100644 --- a/workspaces/mi/mi-rpc-client/src/rpc-clients/ai-features/rpc-client.ts +++ b/workspaces/mi/mi-rpc-client/src/rpc-clients/ai-features/rpc-client.ts @@ -27,6 +27,8 @@ import { AbortCodeGenerationResponse, abortCodeGeneration, hasAnthropicApiKey, + getTavilyApiKey, + setTavilyApiKey, isMiCopilotLoggedIn, fetchUsage, GenerateUnitTestRequest, @@ -80,6 +82,17 @@ export class MiAiPanelRpcClient implements MIAIPanelAPI { return this._messenger.sendRequest(hasAnthropicApiKey, HOST_EXTENSION); } + // ================================== + // Tavily API Key (Bedrock-only BYOK) + // ================================== + getTavilyApiKey(): Promise { + return this._messenger.sendRequest(getTavilyApiKey, HOST_EXTENSION); + } + + setTavilyApiKey(request: { apiKey: string }): Promise<{ success: boolean; error?: string }> { + return this._messenger.sendRequest(setTavilyApiKey, HOST_EXTENSION, request); + } + // ================================== // MI Copilot Login Status // ================================== diff --git a/workspaces/mi/mi-visualizer/src/views/AIPanel/component/AIChatFooter.tsx b/workspaces/mi/mi-visualizer/src/views/AIPanel/component/AIChatFooter.tsx index 66526bf87c0..757e1e69036 100644 --- a/workspaces/mi/mi-visualizer/src/views/AIPanel/component/AIChatFooter.tsx +++ b/workspaces/mi/mi-visualizer/src/views/AIPanel/component/AIChatFooter.tsx @@ -30,7 +30,6 @@ import Attachments from "./Attachments"; // Tool name constant const SHELL_TOOL_NAMES = new Set(['shell', 'bash']); const EXIT_PLAN_MODE_TOOL_NAME = 'exit_plan_mode'; -const WEB_ACCESS_PREFERENCE_KEY = 'mi-agent-web-access-enabled'; function appendThinkingPlaceholder(content: string, thinkingId: string): string { return `${content}\n\n`; @@ -70,7 +69,12 @@ function appendThinkingDelta(content: string, thinkingId: string, delta: string) content.includes(``); if (!hasExistingBlock) { - return appendThinkingPlaceholder(content, thinkingId).replace("", `${delta}`); + // Build the new placeholder directly with the delta inside. The previous + // approach (appendThinkingPlaceholder + .replace("", …)) + // matched the FIRST in content, so a delta arriving without + // its start (e.g. during panel reconnect / event replay) would inject + // into a prior finalized block instead of the new one. + return `${content}\n\n${delta}`; } return updateThinkingContent(content, thinkingId, (current) => current + delta); @@ -163,10 +167,6 @@ function getApprovalFallbackContent( return 'Agent recommends entering Plan mode. Do you want to switch now?'; case 'exit_plan_mode_without_plan': return 'Agent wants to exit Plan mode without a full plan. Do you want to continue?'; - case 'web_search': - return 'Agent wants permission to run a web search.'; - case 'web_fetch': - return 'Agent wants permission to fetch a web page.'; case 'shell_command': return 'Agent wants permission to run a shell command.'; case 'continue_after_limit': @@ -180,9 +180,6 @@ function getApprovalTitle(approvalKind: PlanApprovalKind | undefined): string { switch (approvalKind) { case 'exit_plan_mode': return 'Plan Approval'; - case 'web_search': - case 'web_fetch': - return 'Web Access Approval'; case 'shell_command': return 'Shell Access Approval'; case 'continue_after_limit': @@ -393,13 +390,6 @@ const AIChatFooter: React.FC = ({ isUsageExceeded = false }) // Mode switcher state // Mode switcher is now a pill group (no dropdown menu needed) - const [isWebAccessEnabled, setIsWebAccessEnabled] = useState(() => { - try { - return localStorage.getItem(WEB_ACCESS_PREFERENCE_KEY) === 'true'; - } catch { - return false; - } - }); // Manual compact state const [isCompacting, setIsCompacting] = useState(false); @@ -1270,7 +1260,6 @@ const AIChatFooter: React.FC = ({ isUsageExceeded = false }) files, images, thinking: isThinkingEnabled, - webAccessPreapproved: isWebAccessEnabled, chatHistory: chatHistory, modelSettings, }); @@ -1359,14 +1348,6 @@ const AIChatFooter: React.FC = ({ isUsageExceeded = false }) setPendingMentionCursorPosition(null); }, [pendingMentionCursorPosition, currentUserPrompt]); - useEffect(() => { - try { - localStorage.setItem(WEB_ACCESS_PREFERENCE_KEY, String(isWebAccessEnabled)); - } catch { - // Ignore localStorage errors in restricted environments - } - }, [isWebAccessEnabled]); - // Set up agent event listener useEffect(() => { if (rpcClient) { @@ -2810,35 +2791,6 @@ const AIChatFooter: React.FC = ({ isUsageExceeded = false }) })} - {/* Web search toggle */} - - - - {/* Context usage indicator — always visible */} = ({ onOpenSettings }) => { /> + + + + )} + + {!tavilyInputOpen && tavilyKey && ( +
+

+ Tavily key saved. +

+ +
+ )} + + {tavilyStatus.kind === 'saved' && ( +

+ {tavilyStatus.message} +

+ )} + {tavilyStatus.kind === 'error' && ( +

+ {tavilyStatus.message} +

+ )} + + + + + )} + {/* Account */}
@@ -186,8 +484,8 @@ const SettingsPanel: React.FC = ({ onClose, isByok }) => {

Sign out

{isByok - ? "End your session and clear credentials" - : "End your session and disconnect from AI services"} + ? "Clear MI Copilot credentials stored by this extension" + : "Sign out of MI Copilot while staying signed in to the WSO2 platform"}

+ {" "}or{" "} + {" "}— optional at login; needed later to enable web_search / web_fetch on AWS Bedrock. You can add or change it in Settings. + + + {displayError && ( + + + {displayError} + + )} + - - Back + + {isValidating ? "Validating..." : "Connect to AWS Bedrock"} + + + Cancel diff --git a/workspaces/mi/mi-visualizer/src/views/AddArtifact/index.tsx b/workspaces/mi/mi-visualizer/src/views/AddArtifact/index.tsx index ca21e06e963..7ad252c6980 100644 --- a/workspaces/mi/mi-visualizer/src/views/AddArtifact/index.tsx +++ b/workspaces/mi/mi-visualizer/src/views/AddArtifact/index.tsx @@ -258,7 +258,7 @@ export function AddArtifactView() { - + Describe your Integration to generate with AI @@ -302,7 +302,7 @@ export function AddArtifactView() { disabled={inputAiPrompt.length === 0} onClick={handleGenerateWithAI} > - +   Generate diff --git a/workspaces/mi/mi-visualizer/src/views/DisabledWindow/index.tsx b/workspaces/mi/mi-visualizer/src/views/DisabledWindow/index.tsx index 21d3c0910d4..80a9d344a1f 100644 --- a/workspaces/mi/mi-visualizer/src/views/DisabledWindow/index.tsx +++ b/workspaces/mi/mi-visualizer/src/views/DisabledWindow/index.tsx @@ -81,10 +81,10 @@ export const DisabledMessage = (props: { showProjectHeader?: boolean }) => { /> @@ -92,7 +92,7 @@ export const DisabledMessage = (props: { showProjectHeader?: boolean }) => { Troubleshooting Guide
  • Check your internet connection
  • -
  • Try logging out and logging in again
  • +
  • Try signing out of MI Copilot and signing in again
  • Try restarting VSCode
  • diff --git a/workspaces/mi/mi-visualizer/src/views/Forms/IDPConnectorForm/IdpHeaderSchemaGeneration.tsx b/workspaces/mi/mi-visualizer/src/views/Forms/IDPConnectorForm/IdpHeaderSchemaGeneration.tsx index 622097b3469..2fb67bc1e3d 100644 --- a/workspaces/mi/mi-visualizer/src/views/Forms/IDPConnectorForm/IdpHeaderSchemaGeneration.tsx +++ b/workspaces/mi/mi-visualizer/src/views/Forms/IDPConnectorForm/IdpHeaderSchemaGeneration.tsx @@ -17,8 +17,7 @@ */ import styled from "@emotion/styled"; -import { Typography, Button } from "@wso2/ui-toolkit"; -import { Codicon } from "@wso2/ui-toolkit"; +import { Typography, Button, Codicon, Icon } from "@wso2/ui-toolkit"; import React from "react"; const HeaderContainer = styled.div` @@ -72,7 +71,7 @@ export function IdpHeaderSchemaGeneration({ onClick={generateSchema} disabled={isLoading} > - +   Extract Schema diff --git a/workspaces/mi/mi-visualizer/src/views/Forms/IDPConnectorForm/InitialTryOutView.tsx b/workspaces/mi/mi-visualizer/src/views/Forms/IDPConnectorForm/InitialTryOutView.tsx index 9ac8adb068e..66aa0cd0b30 100644 --- a/workspaces/mi/mi-visualizer/src/views/Forms/IDPConnectorForm/InitialTryOutView.tsx +++ b/workspaces/mi/mi-visualizer/src/views/Forms/IDPConnectorForm/InitialTryOutView.tsx @@ -17,7 +17,7 @@ */ import styled from "@emotion/styled"; -import { Codicon,Button,Typography,AutoComplete} from "@wso2/ui-toolkit"; +import { Codicon, Button, Typography, AutoComplete, Icon } from "@wso2/ui-toolkit"; const IconContainer = styled.div` height: 70px; @@ -71,7 +71,7 @@ export function InitialTryOutView({ appearance="primary" onClick={fillSchema} > - +   Tryout diff --git a/workspaces/mi/mi-visualizer/src/views/Forms/Tests/TestCaseForm.tsx b/workspaces/mi/mi-visualizer/src/views/Forms/Tests/TestCaseForm.tsx index 682c818c37d..532f96221c6 100644 --- a/workspaces/mi/mi-visualizer/src/views/Forms/Tests/TestCaseForm.tsx +++ b/workspaces/mi/mi-visualizer/src/views/Forms/Tests/TestCaseForm.tsx @@ -641,7 +641,7 @@ export function TestCaseForm(props: TestCaseFormProps) { appearance="primary" onClick={() => setShowAIDialog(true)} > -   +   Generate Test Case with AI )} @@ -704,7 +704,7 @@ export function TestCaseForm(props: TestCaseFormProps) { onClick={() => handleAIGeneration()} disabled={!aiPrompt.trim()} > -   +   Generate diff --git a/workspaces/mi/mi-visualizer/src/views/Forms/Tests/TestSuiteForm.tsx b/workspaces/mi/mi-visualizer/src/views/Forms/Tests/TestSuiteForm.tsx index 16248b4ec6a..f3e0533365c 100644 --- a/workspaces/mi/mi-visualizer/src/views/Forms/Tests/TestSuiteForm.tsx +++ b/workspaces/mi/mi-visualizer/src/views/Forms/Tests/TestSuiteForm.tsx @@ -1031,7 +1031,7 @@ export function TestSuiteForm(props: TestSuiteFormProps) { appearance="primary" onClick={handleSubmit(handleCreateUnitTests)} > -   +   Generate Unit Tests with AI