From a248d107da06b0d1fd3fc24cdd3172c3ce2c8c4d Mon Sep 17 00:00:00 2001 From: Ben McMorran Date: Mon, 21 Oct 2024 15:20:07 -0700 Subject: [PATCH 01/13] Check-in lmTools API to avoid build breaks (#12872) --- Extension/.gitignore | 4 + Extension/package.json | 2 +- Extension/vscode.proposed.lmTools.d.ts | 407 +++++++++++++++++++++++++ 3 files changed, 412 insertions(+), 1 deletion(-) create mode 100644 Extension/vscode.proposed.lmTools.d.ts diff --git a/Extension/.gitignore b/Extension/.gitignore index 1adad30d07..06e718e788 100644 --- a/Extension/.gitignore +++ b/Extension/.gitignore @@ -35,3 +35,7 @@ src/nativeStrings.ts vscode*.d.ts .scripts/_* + +# The lmTools API is still changing frequently. We want to avoid spontaneous +# build breaks just because the upstream API changed in VS Code Insiders. +!vscode.proposed.lmTools.d.ts diff --git a/Extension/package.json b/Extension/package.json index 182e2ccfc6..bdc0895c0b 100644 --- a/Extension/package.json +++ b/Extension/package.json @@ -6512,7 +6512,7 @@ "translations-generate": "set NODE_OPTIONS=--no-experimental-fetch && gulp translations-generate", "translations-import": "gulp translations-import", "import-edge-strings": "ts-node -T ./.scripts/import_edge_strings.ts", - "prep:dts": "yarn verify dts --quiet || (npx vscode-dts dev && npx vscode-dts main)", + "prep:dts": "yarn verify dts --quiet || (npx vscode-dts dev && npx vscode-dts main && git checkout -- vscode.proposed.lmTools.d.ts)", "build": "yarn prep:dts && echo [Building TypeScript code] && tsc --build tsconfig.json" }, "devDependencies": { diff --git a/Extension/vscode.proposed.lmTools.d.ts b/Extension/vscode.proposed.lmTools.d.ts new file mode 100644 index 0000000000..2b40bd7b0b --- /dev/null +++ b/Extension/vscode.proposed.lmTools.d.ts @@ -0,0 +1,407 @@ +/*--------------------------------------------------------------------------------------------- + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for license information. + *--------------------------------------------------------------------------------------------*/ + +// version: 10 +// https://github.com/microsoft/vscode/issues/213274 + +declare module 'vscode' { + + export namespace lm { + /** + * Register a LanguageModelTool. The tool must also be registered in the package.json `languageModelTools` contribution + * point. A registered tool is available in the {@link lm.tools} list for any extension to see. But in order for it to + * be seen by a language model, it must be passed in the list of available tools in {@link LanguageModelChatRequestOptions.tools}. + */ + export function registerTool(name: string, tool: LanguageModelTool): Disposable; + + /** + * A list of all available tools that were registered by all extensions using {@link lm.registerTool}. They can be called + * with {@link lm.invokeTool} with a set of parameters that match their declared `parametersSchema`. + */ + export const tools: readonly LanguageModelToolInformation[]; + + /** + * Invoke a tool listed in {@link lm.tools} by name with the given parameters. + * + * The caller must pass a {@link LanguageModelToolInvocationOptions.toolInvocationToken}, which comes from + * {@link ChatRequest.toolInvocationToken} when the tool is being invoked by a by a {@link ChatParticipant}, and + * associates the invocation to a chat session. + * + * The tool will return a {@link LanguageModelToolResult} which contains an array of {@link LanguageModelTextPart} and + * {@link LanguageModelPromptTsxPart}. If the tool caller is using `@vscode/prompt-tsx`, it can incorporate the response + * parts into its prompt using a `ToolResult`. If not, the parts can be passed along to the {@link LanguageModelChat} via + * a User message with a {@link LanguageModelToolResultPart}. + * + * If a chat participant wants to preserve tool results for requests across multiple turns, it can store tool results in + * the {@link ChatResult.metadata} returned from the handler and retrieve them on the next turn from + * {@link ChatResponseTurn.result}. + */ + export function invokeTool(name: string, options: LanguageModelToolInvocationOptions, token: CancellationToken): Thenable; + } + + /** + * A tool that is available to the language model via {@link LanguageModelChatRequestOptions}. A language model uses all the + * properties of this interface to decide which tool to call, and how to call it. + */ + export interface LanguageModelChatTool { + /** + * The name of the tool. + */ + name: string; + + /** + * The description of the tool. + */ + description: string; + + /** + * A JSON schema for the parameters this tool accepts. + */ + parametersSchema?: object; + } + + /** + * A tool-calling mode for the language model to use. + */ + export enum LanguageModelChatToolMode { + /** + * The language model can choose to call a tool or generate a message. Is the default. + */ + Auto = 1, + + /** + * The language model must call one of the provided tools. Note- some models only support a single tool when using this + * mode. TODO@API - do we throw, or just pick the first tool? Or only offer an API that allows callers to pick a single + * tool? Go back to `toolChoice?: string`? + */ + Required = 2 + } + + export interface LanguageModelChatRequestOptions { + + /** + * An optional list of tools that are available to the language model. These could be registered tools available via + * {@link lm.tools}, or private tools that are just implemented within the calling extension. + * + * If the LLM requests to call one of these tools, it will return a {@link LanguageModelToolCallPart} in + * {@link LanguageModelChatResponse.stream}. It's the caller's responsibility to invoke the tool. If it's a tool + * registered in {@link lm.tools}, that means calling {@link lm.invokeTool}. + * + * Then, the tool result can be provided to the LLM by creating an Assistant-type {@link LanguageModelChatMessage} with a + * {@link LanguageModelToolCallPart}, followed by a User-type message with a {@link LanguageModelToolResultPart}. + */ + tools?: LanguageModelChatTool[]; + + /** + * The tool-selecting mode to use. {@link LanguageModelChatToolMode.Auto} by default. + */ + toolMode?: LanguageModelChatToolMode; + } + + /** + * A language model response part indicating a tool call, returned from a {@link LanguageModelChatResponse}, and also can be + * included as a content part on a {@link LanguageModelChatMessage}, to represent a previous tool call in a chat request. + */ + export class LanguageModelToolCallPart { + /** + * The name of the tool to call. + */ + name: string; + + /** + * The ID of the tool call. This is a unique identifier for the tool call within the chat request. + */ + callId: string; + + /** + * The parameters with which to call the tool. + */ + parameters: object; + + /** + * Create a new LanguageModelToolCallPart. + */ + constructor(name: string, callId: string, parameters: object); + } + + /** + * A language model response part containing a piece of text, returned from a {@link LanguageModelChatResponse}. + */ + export class LanguageModelTextPart { + /** + * The text content of the part. + */ + value: string; + + /** + * Construct a text part with the given content. + * @param value The text content of the part. + */ + constructor(value: string); + } + + /** + * A language model response part containing a PromptElementJSON from `@vscode/prompt-tsx`. + * @see {@link LanguageModelToolResult} + */ + export class LanguageModelPromptTsxPart { + /** + * The value of the part. + */ + value: unknown; + + /** + * The mimeType of this part, exported from the `@vscode/prompt-tsx` library. + */ + mime: string; + + /** + * Construct a prompt-tsx part with the given content. + * @param value The value of the part, the result of `renderPromptElementJSON` from `@vscode/prompt-tsx`. + * @param mime The mimeType of the part, exported from `@vscode/prompt-tsx` as `contentType`. + */ + constructor(value: unknown, mime: string); + } + + export interface LanguageModelChatResponse { + /** + * A stream of parts that make up the response. Could be extended with more types in the future. A + * {@link LanguageModelTextPart} is part of the assistant's response to be shown to the user. A + * {@link LanguageModelToolCallPart} is a request from the language model to call a tool. + */ + stream: AsyncIterable; + } + + /** + * The result of a tool call. Can only be included in the content of a User message. + */ + export class LanguageModelToolResultPart { + /** + * The ID of the tool call. + */ + callId: string; + + /** + * The value of the tool result. + */ + content: (LanguageModelTextPart | LanguageModelPromptTsxPart | unknown)[]; + + /** + * @param callId The ID of the tool call. + * @param content The content of the tool result. + */ + constructor(callId: string, content: (LanguageModelTextPart | LanguageModelPromptTsxPart | unknown)[]); + } + + export interface LanguageModelChatMessage { + /** + * A heterogeneous array of other things that a message can contain as content. Some parts may be message-type specific + * for some models. + */ + content2: (string | LanguageModelToolResultPart | LanguageModelToolCallPart)[]; + } + + /** + * A result returned from a tool invocation. If using `@vscode/prompt-tsx`, this result may be rendered using a `ToolResult`. + */ + export class LanguageModelToolResult { + /** + * A list of tool result content parts. Includes `unknown` becauses this list may be extended with new content types in + * the future. + * @see {@link lm.invokeTool}. + */ + content: (LanguageModelTextPart | LanguageModelPromptTsxPart | unknown)[]; + + /** + * Create a LanguageModelToolResult + * @param content A list of tool result content parts + */ + constructor(content: (LanguageModelTextPart | LanguageModelPromptTsxPart | unknown)[]); + } + + /** + * A token that can be passed to {@link lm.invokeTool} when invoking a tool inside the context of handling a chat request. + */ + export type ChatParticipantToolToken = unknown; + + /** + * Options provided for tool invocation. + */ + export interface LanguageModelToolInvocationOptions { + /** + * When this tool is being invoked by a {@link ChatParticipant} within the context of a chat request, this token should be + * passed from {@link ChatRequest.toolInvocationToken}. In that case, a progress bar will be automatically shown for the + * tool invocation in the chat response view, and if the tool requires user confirmation, it will show up inline in the + * chat view. If the tool is being invoked outside of a chat request, `undefined` should be passed instead. + * + * If a tool invokes another tool during its invocation, it can pass along the `toolInvocationToken` that it received. + */ + toolInvocationToken: ChatParticipantToolToken | undefined; + + /** + * The parameters with which to invoke the tool. The parameters must match the schema defined in + * {@link LanguageModelToolInformation.parametersSchema} + */ + parameters: T; + + /** + * Options to hint at how many tokens the tool should return in its response, and enable the tool to count tokens + * accurately. + */ + tokenizationOptions?: LanguageModelToolTokenizationOptions; + } + + /** + * Options related to tokenization for a tool invocation. + */ + export interface LanguageModelToolTokenizationOptions { + /** + * If known, the maximum number of tokens the tool should emit in its result. + */ + tokenBudget: number; + + /** + * Count the number of tokens in a message using the model specific tokenizer-logic. + * @param text A string. + * @param token Optional cancellation token. See {@link CancellationTokenSource} for how to create one. + * @returns A thenable that resolves to the number of tokens. + */ + countTokens(text: string, token?: CancellationToken): Thenable; + } + + /** + * Information about a registered tool available in {@link lm.tools}. + */ + export interface LanguageModelToolInformation { + /** + * A unique name for the tool. + */ + readonly name: string; + + /** + * A description of this tool that may be passed to a language model. + */ + readonly description: string; + + /** + * A JSON schema for the parameters this tool accepts. + */ + readonly parametersSchema: object | undefined; + + /** + * A set of tags, declared by the tool, that roughly describe the tool's capabilities. A tool user may use these to filter + * the set of tools to just ones that are relevant for the task at hand. + */ + readonly tags: readonly string[]; + } + + /** + * When this is returned in {@link PreparedToolInvocation}, the user will be asked to confirm before running the tool. These + * messages will be shown with buttons that say "Continue" and "Cancel". + */ + export interface LanguageModelToolConfirmationMessages { + /** + * The title of the confirmation message. + */ + title: string; + + /** + * The body of the confirmation message. + */ + message: string | MarkdownString; + } + + /** + * Options for {@link LanguageModelTool.prepareInvocation}. + */ + export interface LanguageModelToolInvocationPrepareOptions { + /** + * The parameters that the tool is being invoked with. + */ + parameters: T; + } + + /** + * A tool that can be invoked by a call to a {@link LanguageModelChat}. + */ + export interface LanguageModelTool { + /** + * Invoke the tool with the given parameters and return a result. + * + * The provided {@link LanguageModelToolInvocationOptions.parameters} are currently not validated against the declared + * schema, but will be in the future. + */ + invoke(options: LanguageModelToolInvocationOptions, token: CancellationToken): ProviderResult; + + /** + * Called once before a tool is invoked. It's recommended to implement this to customize the progress message that appears + * while the tool is running, and to provide a more useful message with context from the invocation parameters. Can also + * signal that a tool needs user confirmation before running, if appropriate. Must be free of side-effects. A call to + * `prepareInvocation` is not necessarily followed by a call to `invoke`. + */ + prepareInvocation?(options: LanguageModelToolInvocationPrepareOptions, token: CancellationToken): ProviderResult; + } + + /** + * The result of a call to {@link LanguageModelTool.prepareInvocation}. + */ + export interface PreparedToolInvocation { + /** + * A customized progress message to show while the tool runs. + */ + invocationMessage?: string; + + /** + * The presence of this property indicates that the user should be asked to confirm before running the tool. The user + * should be asked for confirmation for any tool that has a side-effect or may potentially be dangerous. + */ + confirmationMessages?: LanguageModelToolConfirmationMessages; + } + + /** + * A reference to a tool that the user manually attached to their request, either using the `#`-syntax inline, or as an + * attachment via the paperclip button. + */ + export interface ChatLanguageModelToolReference { + /** + * The tool name. Refers to a tool listed in {@link lm.tools}. + */ + readonly name: string; + + /** + * The start and end index of the reference in the {@link ChatRequest.prompt prompt}. When undefined, the reference was + * not part of the prompt text. + * + * *Note* that the indices take the leading `#`-character into account which means they can be used to modify the prompt + * as-is. + */ + readonly range?: [start: number, end: number]; + } + + export interface ChatRequest { + /** + * The list of tools that the user attached to their request. + * + * When a tool reference is present, the chat participant should make a chat request using + * {@link LanguageModelChatToolMode.Required} to force the language model to generate parameters for the tool. Then, the + * participant can use {@link lm.invokeTool} to use the tool attach the result to its request for the user's prompt. The + * tool may contribute useful extra context for the user's request. + */ + readonly toolReferences: readonly ChatLanguageModelToolReference[]; + + /** + * A token that can be passed to {@link lm.invokeTool} when invoking a tool inside the context of handling a chat request. + * This associates the tool invocation to a chat session. + */ + readonly toolInvocationToken: ChatParticipantToolToken; + } + + export interface ChatRequestTurn { + /** + * The list of tools were attached to this request. + */ + readonly toolReferences?: readonly ChatLanguageModelToolReference[]; + } +} From 1f7f2f77adb457ef2d986cd99c1c977d6b2f2cb9 Mon Sep 17 00:00:00 2001 From: Ben McMorran Date: Tue, 22 Oct 2024 09:08:31 -0700 Subject: [PATCH 02/13] Switch to stable lmTools API (#12874) * Revert "Check-in lmTools API to avoid build breaks (#12872)" This reverts commit a248d107da06b0d1fd3fc24cdd3172c3ce2c8c4d. * Switch to stable lmTools API --- Extension/.gitignore | 4 - Extension/package.json | 5 +- Extension/vscode.proposed.lmTools.d.ts | 407 ------------------------- 3 files changed, 2 insertions(+), 414 deletions(-) delete mode 100644 Extension/vscode.proposed.lmTools.d.ts diff --git a/Extension/.gitignore b/Extension/.gitignore index 06e718e788..1adad30d07 100644 --- a/Extension/.gitignore +++ b/Extension/.gitignore @@ -35,7 +35,3 @@ src/nativeStrings.ts vscode*.d.ts .scripts/_* - -# The lmTools API is still changing frequently. We want to avoid spontaneous -# build breaks just because the upstream API changed in VS Code Insiders. -!vscode.proposed.lmTools.d.ts diff --git a/Extension/package.json b/Extension/package.json index bdc0895c0b..cad8ec768c 100644 --- a/Extension/package.json +++ b/Extension/package.json @@ -38,8 +38,7 @@ "Snippets" ], "enabledApiProposals": [ - "terminalDataWriteEvent", - "lmTools" + "terminalDataWriteEvent" ], "capabilities": { "untrustedWorkspaces": { @@ -6512,7 +6511,7 @@ "translations-generate": "set NODE_OPTIONS=--no-experimental-fetch && gulp translations-generate", "translations-import": "gulp translations-import", "import-edge-strings": "ts-node -T ./.scripts/import_edge_strings.ts", - "prep:dts": "yarn verify dts --quiet || (npx vscode-dts dev && npx vscode-dts main && git checkout -- vscode.proposed.lmTools.d.ts)", + "prep:dts": "yarn verify dts --quiet || (npx vscode-dts dev && npx vscode-dts main)", "build": "yarn prep:dts && echo [Building TypeScript code] && tsc --build tsconfig.json" }, "devDependencies": { diff --git a/Extension/vscode.proposed.lmTools.d.ts b/Extension/vscode.proposed.lmTools.d.ts deleted file mode 100644 index 2b40bd7b0b..0000000000 --- a/Extension/vscode.proposed.lmTools.d.ts +++ /dev/null @@ -1,407 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -// version: 10 -// https://github.com/microsoft/vscode/issues/213274 - -declare module 'vscode' { - - export namespace lm { - /** - * Register a LanguageModelTool. The tool must also be registered in the package.json `languageModelTools` contribution - * point. A registered tool is available in the {@link lm.tools} list for any extension to see. But in order for it to - * be seen by a language model, it must be passed in the list of available tools in {@link LanguageModelChatRequestOptions.tools}. - */ - export function registerTool(name: string, tool: LanguageModelTool): Disposable; - - /** - * A list of all available tools that were registered by all extensions using {@link lm.registerTool}. They can be called - * with {@link lm.invokeTool} with a set of parameters that match their declared `parametersSchema`. - */ - export const tools: readonly LanguageModelToolInformation[]; - - /** - * Invoke a tool listed in {@link lm.tools} by name with the given parameters. - * - * The caller must pass a {@link LanguageModelToolInvocationOptions.toolInvocationToken}, which comes from - * {@link ChatRequest.toolInvocationToken} when the tool is being invoked by a by a {@link ChatParticipant}, and - * associates the invocation to a chat session. - * - * The tool will return a {@link LanguageModelToolResult} which contains an array of {@link LanguageModelTextPart} and - * {@link LanguageModelPromptTsxPart}. If the tool caller is using `@vscode/prompt-tsx`, it can incorporate the response - * parts into its prompt using a `ToolResult`. If not, the parts can be passed along to the {@link LanguageModelChat} via - * a User message with a {@link LanguageModelToolResultPart}. - * - * If a chat participant wants to preserve tool results for requests across multiple turns, it can store tool results in - * the {@link ChatResult.metadata} returned from the handler and retrieve them on the next turn from - * {@link ChatResponseTurn.result}. - */ - export function invokeTool(name: string, options: LanguageModelToolInvocationOptions, token: CancellationToken): Thenable; - } - - /** - * A tool that is available to the language model via {@link LanguageModelChatRequestOptions}. A language model uses all the - * properties of this interface to decide which tool to call, and how to call it. - */ - export interface LanguageModelChatTool { - /** - * The name of the tool. - */ - name: string; - - /** - * The description of the tool. - */ - description: string; - - /** - * A JSON schema for the parameters this tool accepts. - */ - parametersSchema?: object; - } - - /** - * A tool-calling mode for the language model to use. - */ - export enum LanguageModelChatToolMode { - /** - * The language model can choose to call a tool or generate a message. Is the default. - */ - Auto = 1, - - /** - * The language model must call one of the provided tools. Note- some models only support a single tool when using this - * mode. TODO@API - do we throw, or just pick the first tool? Or only offer an API that allows callers to pick a single - * tool? Go back to `toolChoice?: string`? - */ - Required = 2 - } - - export interface LanguageModelChatRequestOptions { - - /** - * An optional list of tools that are available to the language model. These could be registered tools available via - * {@link lm.tools}, or private tools that are just implemented within the calling extension. - * - * If the LLM requests to call one of these tools, it will return a {@link LanguageModelToolCallPart} in - * {@link LanguageModelChatResponse.stream}. It's the caller's responsibility to invoke the tool. If it's a tool - * registered in {@link lm.tools}, that means calling {@link lm.invokeTool}. - * - * Then, the tool result can be provided to the LLM by creating an Assistant-type {@link LanguageModelChatMessage} with a - * {@link LanguageModelToolCallPart}, followed by a User-type message with a {@link LanguageModelToolResultPart}. - */ - tools?: LanguageModelChatTool[]; - - /** - * The tool-selecting mode to use. {@link LanguageModelChatToolMode.Auto} by default. - */ - toolMode?: LanguageModelChatToolMode; - } - - /** - * A language model response part indicating a tool call, returned from a {@link LanguageModelChatResponse}, and also can be - * included as a content part on a {@link LanguageModelChatMessage}, to represent a previous tool call in a chat request. - */ - export class LanguageModelToolCallPart { - /** - * The name of the tool to call. - */ - name: string; - - /** - * The ID of the tool call. This is a unique identifier for the tool call within the chat request. - */ - callId: string; - - /** - * The parameters with which to call the tool. - */ - parameters: object; - - /** - * Create a new LanguageModelToolCallPart. - */ - constructor(name: string, callId: string, parameters: object); - } - - /** - * A language model response part containing a piece of text, returned from a {@link LanguageModelChatResponse}. - */ - export class LanguageModelTextPart { - /** - * The text content of the part. - */ - value: string; - - /** - * Construct a text part with the given content. - * @param value The text content of the part. - */ - constructor(value: string); - } - - /** - * A language model response part containing a PromptElementJSON from `@vscode/prompt-tsx`. - * @see {@link LanguageModelToolResult} - */ - export class LanguageModelPromptTsxPart { - /** - * The value of the part. - */ - value: unknown; - - /** - * The mimeType of this part, exported from the `@vscode/prompt-tsx` library. - */ - mime: string; - - /** - * Construct a prompt-tsx part with the given content. - * @param value The value of the part, the result of `renderPromptElementJSON` from `@vscode/prompt-tsx`. - * @param mime The mimeType of the part, exported from `@vscode/prompt-tsx` as `contentType`. - */ - constructor(value: unknown, mime: string); - } - - export interface LanguageModelChatResponse { - /** - * A stream of parts that make up the response. Could be extended with more types in the future. A - * {@link LanguageModelTextPart} is part of the assistant's response to be shown to the user. A - * {@link LanguageModelToolCallPart} is a request from the language model to call a tool. - */ - stream: AsyncIterable; - } - - /** - * The result of a tool call. Can only be included in the content of a User message. - */ - export class LanguageModelToolResultPart { - /** - * The ID of the tool call. - */ - callId: string; - - /** - * The value of the tool result. - */ - content: (LanguageModelTextPart | LanguageModelPromptTsxPart | unknown)[]; - - /** - * @param callId The ID of the tool call. - * @param content The content of the tool result. - */ - constructor(callId: string, content: (LanguageModelTextPart | LanguageModelPromptTsxPart | unknown)[]); - } - - export interface LanguageModelChatMessage { - /** - * A heterogeneous array of other things that a message can contain as content. Some parts may be message-type specific - * for some models. - */ - content2: (string | LanguageModelToolResultPart | LanguageModelToolCallPart)[]; - } - - /** - * A result returned from a tool invocation. If using `@vscode/prompt-tsx`, this result may be rendered using a `ToolResult`. - */ - export class LanguageModelToolResult { - /** - * A list of tool result content parts. Includes `unknown` becauses this list may be extended with new content types in - * the future. - * @see {@link lm.invokeTool}. - */ - content: (LanguageModelTextPart | LanguageModelPromptTsxPart | unknown)[]; - - /** - * Create a LanguageModelToolResult - * @param content A list of tool result content parts - */ - constructor(content: (LanguageModelTextPart | LanguageModelPromptTsxPart | unknown)[]); - } - - /** - * A token that can be passed to {@link lm.invokeTool} when invoking a tool inside the context of handling a chat request. - */ - export type ChatParticipantToolToken = unknown; - - /** - * Options provided for tool invocation. - */ - export interface LanguageModelToolInvocationOptions { - /** - * When this tool is being invoked by a {@link ChatParticipant} within the context of a chat request, this token should be - * passed from {@link ChatRequest.toolInvocationToken}. In that case, a progress bar will be automatically shown for the - * tool invocation in the chat response view, and if the tool requires user confirmation, it will show up inline in the - * chat view. If the tool is being invoked outside of a chat request, `undefined` should be passed instead. - * - * If a tool invokes another tool during its invocation, it can pass along the `toolInvocationToken` that it received. - */ - toolInvocationToken: ChatParticipantToolToken | undefined; - - /** - * The parameters with which to invoke the tool. The parameters must match the schema defined in - * {@link LanguageModelToolInformation.parametersSchema} - */ - parameters: T; - - /** - * Options to hint at how many tokens the tool should return in its response, and enable the tool to count tokens - * accurately. - */ - tokenizationOptions?: LanguageModelToolTokenizationOptions; - } - - /** - * Options related to tokenization for a tool invocation. - */ - export interface LanguageModelToolTokenizationOptions { - /** - * If known, the maximum number of tokens the tool should emit in its result. - */ - tokenBudget: number; - - /** - * Count the number of tokens in a message using the model specific tokenizer-logic. - * @param text A string. - * @param token Optional cancellation token. See {@link CancellationTokenSource} for how to create one. - * @returns A thenable that resolves to the number of tokens. - */ - countTokens(text: string, token?: CancellationToken): Thenable; - } - - /** - * Information about a registered tool available in {@link lm.tools}. - */ - export interface LanguageModelToolInformation { - /** - * A unique name for the tool. - */ - readonly name: string; - - /** - * A description of this tool that may be passed to a language model. - */ - readonly description: string; - - /** - * A JSON schema for the parameters this tool accepts. - */ - readonly parametersSchema: object | undefined; - - /** - * A set of tags, declared by the tool, that roughly describe the tool's capabilities. A tool user may use these to filter - * the set of tools to just ones that are relevant for the task at hand. - */ - readonly tags: readonly string[]; - } - - /** - * When this is returned in {@link PreparedToolInvocation}, the user will be asked to confirm before running the tool. These - * messages will be shown with buttons that say "Continue" and "Cancel". - */ - export interface LanguageModelToolConfirmationMessages { - /** - * The title of the confirmation message. - */ - title: string; - - /** - * The body of the confirmation message. - */ - message: string | MarkdownString; - } - - /** - * Options for {@link LanguageModelTool.prepareInvocation}. - */ - export interface LanguageModelToolInvocationPrepareOptions { - /** - * The parameters that the tool is being invoked with. - */ - parameters: T; - } - - /** - * A tool that can be invoked by a call to a {@link LanguageModelChat}. - */ - export interface LanguageModelTool { - /** - * Invoke the tool with the given parameters and return a result. - * - * The provided {@link LanguageModelToolInvocationOptions.parameters} are currently not validated against the declared - * schema, but will be in the future. - */ - invoke(options: LanguageModelToolInvocationOptions, token: CancellationToken): ProviderResult; - - /** - * Called once before a tool is invoked. It's recommended to implement this to customize the progress message that appears - * while the tool is running, and to provide a more useful message with context from the invocation parameters. Can also - * signal that a tool needs user confirmation before running, if appropriate. Must be free of side-effects. A call to - * `prepareInvocation` is not necessarily followed by a call to `invoke`. - */ - prepareInvocation?(options: LanguageModelToolInvocationPrepareOptions, token: CancellationToken): ProviderResult; - } - - /** - * The result of a call to {@link LanguageModelTool.prepareInvocation}. - */ - export interface PreparedToolInvocation { - /** - * A customized progress message to show while the tool runs. - */ - invocationMessage?: string; - - /** - * The presence of this property indicates that the user should be asked to confirm before running the tool. The user - * should be asked for confirmation for any tool that has a side-effect or may potentially be dangerous. - */ - confirmationMessages?: LanguageModelToolConfirmationMessages; - } - - /** - * A reference to a tool that the user manually attached to their request, either using the `#`-syntax inline, or as an - * attachment via the paperclip button. - */ - export interface ChatLanguageModelToolReference { - /** - * The tool name. Refers to a tool listed in {@link lm.tools}. - */ - readonly name: string; - - /** - * The start and end index of the reference in the {@link ChatRequest.prompt prompt}. When undefined, the reference was - * not part of the prompt text. - * - * *Note* that the indices take the leading `#`-character into account which means they can be used to modify the prompt - * as-is. - */ - readonly range?: [start: number, end: number]; - } - - export interface ChatRequest { - /** - * The list of tools that the user attached to their request. - * - * When a tool reference is present, the chat participant should make a chat request using - * {@link LanguageModelChatToolMode.Required} to force the language model to generate parameters for the tool. Then, the - * participant can use {@link lm.invokeTool} to use the tool attach the result to its request for the user's prompt. The - * tool may contribute useful extra context for the user's request. - */ - readonly toolReferences: readonly ChatLanguageModelToolReference[]; - - /** - * A token that can be passed to {@link lm.invokeTool} when invoking a tool inside the context of handling a chat request. - * This associates the tool invocation to a chat session. - */ - readonly toolInvocationToken: ChatParticipantToolToken; - } - - export interface ChatRequestTurn { - /** - * The list of tools were attached to this request. - */ - readonly toolReferences?: readonly ChatLanguageModelToolReference[]; - } -} From e58e96369f7bc3e0fb6dfb95a689cd107cf0fc2b Mon Sep 17 00:00:00 2001 From: Colen Garoutte-Carson <49173979+Colengms@users.noreply.github.com> Date: Tue, 22 Oct 2024 15:31:38 -0700 Subject: [PATCH 03/13] Prevent redundant progressive squiggle updates (#12876) --- Extension/src/LanguageServer/client.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Extension/src/LanguageServer/client.ts b/Extension/src/LanguageServer/client.ts index 04dcaba701..7d4d81d5ad 100644 --- a/Extension/src/LanguageServer/client.ts +++ b/Extension/src/LanguageServer/client.ts @@ -2387,7 +2387,9 @@ export class DefaultClient implements Client { } this.updateInactiveRegions(intelliSenseResult.uri, intelliSenseResult.inactiveRegions, intelliSenseResult.clearExistingInactiveRegions, intelliSenseResult.isCompletePass); - this.updateSquiggles(intelliSenseResult.uri, intelliSenseResult.diagnostics, intelliSenseResult.clearExistingDiagnostics); + if (intelliSenseResult.clearExistingDiagnostics || intelliSenseResult.diagnostics.length > 0) { + this.updateSquiggles(intelliSenseResult.uri, intelliSenseResult.diagnostics, intelliSenseResult.clearExistingDiagnostics); + } } private updateSquiggles(uriString: string, diagnostics: IntelliSenseDiagnostic[], startNewSet: boolean): void { From 5416e796220500e984b71ab622bc781a16187098 Mon Sep 17 00:00:00 2001 From: Bob Brown Date: Thu, 24 Oct 2024 13:42:43 -0700 Subject: [PATCH 04/13] Values set in settings are not immediately available for use (#12881) --- Extension/src/LanguageServer/client.ts | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/Extension/src/LanguageServer/client.ts b/Extension/src/LanguageServer/client.ts index 7d4d81d5ad..e46743815e 100644 --- a/Extension/src/LanguageServer/client.ts +++ b/Extension/src/LanguageServer/client.ts @@ -1046,7 +1046,7 @@ export class DefaultClient implements Client { if (index === paths.length - 1) { action = "disable"; settings.defaultCompilerPath = ""; - await this.configuration.updateCompilerPathIfSet(settings.defaultCompilerPath); + await this.configuration.updateCompilerPathIfSet(""); configurationSelected = true; await this.showPrompt(sender); return ui.ShowConfigureIntelliSenseButton(false, this, ConfigurationType.CompilerPath, "disablePrompt"); @@ -1066,7 +1066,7 @@ export class DefaultClient implements Client { configurationSelected = true; action = "compiler browsed"; settings.defaultCompilerPath = result[0].fsPath; - await this.configuration.updateCompilerPathIfSet(settings.defaultCompilerPath); + await this.configuration.updateCompilerPathIfSet(result[0].fsPath); void SessionState.trustedCompilerFound.set(true); } else { configurationSelected = true; @@ -1084,8 +1084,9 @@ export class DefaultClient implements Client { return ui.ShowConfigureIntelliSenseButton(false, this, ConfigurationType.CompileCommands, showButtonSender); } else { action = "select compiler"; - settings.defaultCompilerPath = util.isCl(paths[index]) ? "cl.exe" : paths[index]; - await this.configuration.updateCompilerPathIfSet(settings.defaultCompilerPath); + const newCompiler: string = util.isCl(paths[index]) ? "cl.exe" : paths[index]; + settings.defaultCompilerPath = newCompiler; + await this.configuration.updateCompilerPathIfSet(newCompiler); void SessionState.trustedCompilerFound.set(true); } } From 8cb1deff7602b844ec35a134fecc2d91ba42c851 Mon Sep 17 00:00:00 2001 From: Sean McManus Date: Thu, 24 Oct 2024 13:58:32 -0700 Subject: [PATCH 05/13] Update changelog for 1.23.0 (#12880) * Update changelog for 1.23.0. --- Extension/CHANGELOG.md | 19 ++++++++++++++++++- Extension/package.json | 2 +- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/Extension/CHANGELOG.md b/Extension/CHANGELOG.md index 8891897fa2..606fb27f54 100644 --- a/Extension/CHANGELOG.md +++ b/Extension/CHANGELOG.md @@ -1,11 +1,28 @@ # C/C++ for Visual Studio Code Changelog +## Version 1.23.0: October 24, 2024 +### Enhancements +* Update to clang-format and clang-tidy 19.1.2. [#12824](https://github.com/microsoft/vscode-cpptools/issues/12824) + +### Bug Fixes +* Fix some translation issues. [#7824](https://github.com/microsoft/vscode-cpptools/issues/7824), [#12439](https://github.com/microsoft/vscode-cpptools/issues/12439), [#12440](https://github.com/microsoft/vscode-cpptools/issues/12440), [#12441](https://github.com/microsoft/vscode-cpptools/issues/12441) +* Fix a bug with 'Select IntelliSense Configuration'. [#12705](https://github.com/microsoft/vscode-cpptools/issues/12705) +* Fix newlines being removed from hover markdown code blocks. [#12794](https://github.com/microsoft/vscode-cpptools/issues/12794) +* Fix clang-format using `-` instead of `--` args. [#12819](https://github.com/microsoft/vscode-cpptools/issues/12819) +* Fix processing of `compile_commands.json` generated by the clang `-MJ` option. [#12837](https://github.com/microsoft/vscode-cpptools/issues/12837) +* Fix handling of `-I` and `-isystem` with the same path. [#12842](https://github.com/microsoft/vscode-cpptools/issues/12842) +* Fix stale colorization due to delays in updating the open file version. [PR #12851](https://github.com/microsoft/vscode-cpptools/pull/12851) +* Fix redundant progressive squiggle updates. [PR #12876](https://github.com/microsoft/vscode-cpptools/pull/12876) +* Fix inactive regions with multi-byte UTF-8 characters. [#12879](https://github.com/microsoft/vscode-cpptools/issues/12879) +* Fix some duplicate requests potentially not getting discarded. +* Fix a random crash in `start_process_and_wait_for_exit`. + ## Version 1.22.10: October 21, 2024 ### Bug Fixes * Fix the 'Extract to Function' feature not working. * Fix the 'Go to Next/Prev Preprocessor Conditional' feature not working. -## Version 1.22.9: October 10, 2024 +## Version 1.22.9: October 14, 2024 ### Performance Improvements * Initialization performance improvements. [#12030](https://github.com/microsoft/vscode-cpptools/issues/12030) - Some processing is parallelized and started earlier (populating the filename cache, discovering files). [#11954](https://github.com/microsoft/vscode-cpptools/issues/11954), [#12169](https://github.com/microsoft/vscode-cpptools/issues/12169) diff --git a/Extension/package.json b/Extension/package.json index cad8ec768c..97e425dcd0 100644 --- a/Extension/package.json +++ b/Extension/package.json @@ -2,7 +2,7 @@ "name": "cpptools", "displayName": "C/C++", "description": "C/C++ IntelliSense, debugging, and code browsing.", - "version": "1.22.9-main", + "version": "1.23.0-main", "publisher": "ms-vscode", "icon": "LanguageCCPP_color_128x.png", "readme": "README.md", From 4138750b3b1e30cff35344d78bf9a1b4e89c9b8f Mon Sep 17 00:00:00 2001 From: Ben McMorran Date: Tue, 29 Oct 2024 12:05:17 -0700 Subject: [PATCH 06/13] Enable #cpp for all users (#12898) * Enable #cpp for all users * Address PR comments --- Extension/src/Debugger/configurationProvider.ts | 2 +- Extension/src/LanguageServer/extension.ts | 12 +++++++++--- Extension/src/common.ts | 4 ++++ 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/Extension/src/Debugger/configurationProvider.ts b/Extension/src/Debugger/configurationProvider.ts index 2b9ce38ac2..067983feac 100644 --- a/Extension/src/Debugger/configurationProvider.ts +++ b/Extension/src/Debugger/configurationProvider.ts @@ -331,7 +331,7 @@ export class DebugConfigurationProvider implements vscode.DebugConfigurationProv // Run deploy steps if (config.deploySteps && config.deploySteps.length !== 0) { - const codeVersion: number[] = vscode.version.split('.').map(num => parseInt(num, undefined)); + const codeVersion: number[] = util.getVsCodeVersion(); if ((util.isNumber(codeVersion[0]) && codeVersion[0] < 1) || (util.isNumber(codeVersion[0]) && codeVersion[0] === 1 && util.isNumber(codeVersion[1]) && codeVersion[1] < 69)) { void logger.getOutputChannelLogger().showErrorMessage(localize("vs.code.1.69+.required", "'deploySteps' require VS Code 1.69+.")); return undefined; diff --git a/Extension/src/LanguageServer/extension.ts b/Extension/src/LanguageServer/extension.ts index c70abfb26b..02dd3e8861 100644 --- a/Extension/src/LanguageServer/extension.ts +++ b/Extension/src/LanguageServer/extension.ts @@ -252,9 +252,15 @@ export async function activate(): Promise { activeDocument = activeEditor.document; } - if (util.extensionContext && new CppSettings().experimentalFeatures) { - const tool = vscode.lm.registerTool('cpptools-lmtool-configuration', new CppConfigurationLanguageModelTool()); - disposables.push(tool); + if (util.extensionContext) { + // lmTools wasn't stabilized until 1.95, but (as of October 2024) + // cpptools can be installed on older versions of VS Code. See + // https://github.com/microsoft/vscode-cpptools/blob/main/Extension/package.json#L14 + const version = util.getVsCodeVersion(); + if (version[0] > 1 || (version[0] === 1 && version[1] >= 95)) { + const tool = vscode.lm.registerTool('cpptools-lmtool-configuration', new CppConfigurationLanguageModelTool()); + disposables.push(tool); + } } await registerRelatedFilesProvider(); diff --git a/Extension/src/common.ts b/Extension/src/common.ts index c962a719cd..dbd0bcdd63 100644 --- a/Extension/src/common.ts +++ b/Extension/src/common.ts @@ -1814,3 +1814,7 @@ export function findExePathInArgs(args: CommandString[]): string | undefined { return undefined; } + +export function getVsCodeVersion(): number[] { + return vscode.version.split('.').map(num => parseInt(num, undefined)); +} From d56740e4989f61c105424988d4db8f083c642e0b Mon Sep 17 00:00:00 2001 From: Sean McManus Date: Tue, 29 Oct 2024 12:40:07 -0700 Subject: [PATCH 07/13] Update changelog (2nd time). (#12899) * Update changelog (2nd time). --- Extension/CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Extension/CHANGELOG.md b/Extension/CHANGELOG.md index 606fb27f54..d00d6843ef 100644 --- a/Extension/CHANGELOG.md +++ b/Extension/CHANGELOG.md @@ -1,8 +1,9 @@ # C/C++ for Visual Studio Code Changelog -## Version 1.23.0: October 24, 2024 +## Version 1.23.0: October 29, 2024 ### Enhancements * Update to clang-format and clang-tidy 19.1.2. [#12824](https://github.com/microsoft/vscode-cpptools/issues/12824) +* Enable `#cpp` with GitHub Copilot chat without `C_Cpp.experimentalFeatures` enabled. [PR #12898](https://github.com/microsoft/vscode-cpptools/pull/12898) ### Bug Fixes * Fix some translation issues. [#7824](https://github.com/microsoft/vscode-cpptools/issues/7824), [#12439](https://github.com/microsoft/vscode-cpptools/issues/12439), [#12440](https://github.com/microsoft/vscode-cpptools/issues/12440), [#12441](https://github.com/microsoft/vscode-cpptools/issues/12441) From f51404d9dbd11c67844583099833be943aef261d Mon Sep 17 00:00:00 2001 From: Sean McManus Date: Tue, 29 Oct 2024 17:57:47 -0700 Subject: [PATCH 08/13] Switch to @vscode/dts. (#12901) --- Extension/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Extension/package.json b/Extension/package.json index 97e425dcd0..f183a39f5c 100644 --- a/Extension/package.json +++ b/Extension/package.json @@ -6511,7 +6511,7 @@ "translations-generate": "set NODE_OPTIONS=--no-experimental-fetch && gulp translations-generate", "translations-import": "gulp translations-import", "import-edge-strings": "ts-node -T ./.scripts/import_edge_strings.ts", - "prep:dts": "yarn verify dts --quiet || (npx vscode-dts dev && npx vscode-dts main)", + "prep:dts": "yarn verify dts --quiet || (npx @vscode/dts dev && npx @vscode/dts main)", "build": "yarn prep:dts && echo [Building TypeScript code] && tsc --build tsconfig.json" }, "devDependencies": { From e45dbcec9bbef37cc1ecec0a38c9e34a612631aa Mon Sep 17 00:00:00 2001 From: Colen Garoutte-Carson <49173979+Colengms@users.noreply.github.com> Date: Tue, 29 Oct 2024 18:46:13 -0700 Subject: [PATCH 09/13] Fix issue with requests in protocolFilter.ts causing stalls (#12906) --- .../src/LanguageServer/protocolFilter.ts | 62 ++++++------------- 1 file changed, 18 insertions(+), 44 deletions(-) diff --git a/Extension/src/LanguageServer/protocolFilter.ts b/Extension/src/LanguageServer/protocolFilter.ts index 4dddbbcc22..b292c67b16 100644 --- a/Extension/src/LanguageServer/protocolFilter.ts +++ b/Extension/src/LanguageServer/protocolFilter.ts @@ -8,6 +8,7 @@ import * as path from 'path'; import * as vscode from 'vscode'; import { Middleware } from 'vscode-languageclient'; import * as util from '../common'; +import { logAndReturn } from '../Utility/Async/returns'; import { Client } from './client'; import { clients } from './extension'; import { shouldChangeFromCToCpp } from './utils'; @@ -18,14 +19,8 @@ export const ServerCancelled: number = -32802; let anyFileOpened: boolean = false; export function createProtocolFilter(): Middleware { - // Disabling lint for invoke handlers - const invoke1 = (a: any, next: (a: any) => any): any => clients.ActiveClient.enqueue(() => next(a)); - const invoke2 = (a: any, b: any, next: (a: any, b: any) => any): any => clients.ActiveClient.enqueue(() => next(a, b)); - const invoke3 = (a: any, b: any, c: any, next: (a: any, b: any, c: any) => any): any => clients.ActiveClient.enqueue(() => next(a, b, c)); - const invoke4 = (a: any, b: any, c: any, d: any, next: (a: any, b: any, c: any, d: any) => any): any => clients.ActiveClient.enqueue(() => next(a, b, c, d)); - return { - didOpen: async (document, sendMessage) => clients.ActiveClient.enqueue(async () => { + didOpen: async (document, sendMessage) => { if (!util.isCpp(document)) { return; } @@ -41,62 +36,41 @@ export function createProtocolFilter(): Middleware { const mappingString: string = baseFileName + "@" + document.fileName; client.addFileAssociations(mappingString, "cpp"); client.sendDidChangeSettings(); - document = await vscode.languages.setTextDocumentLanguage(document, "cpp"); + // This will cause the file to be closed and reopened. + void vscode.languages.setTextDocumentLanguage(document, "cpp"); + return; } // client.takeOwnership() will call client.TrackedDocuments.add() again, but that's ok. It's a Set. client.onDidOpenTextDocument(document); client.takeOwnership(document); - await sendMessage(document); - - // For a file already open when we activate, sometimes we don't get any notifications about visible - // or active text editors, visible ranges, or text selection. As a workaround, we trigger - // onDidChangeVisibleTextEditors here, only for the first file opened. - if (!anyFileOpened) { - anyFileOpened = true; - const cppEditors: vscode.TextEditor[] = vscode.window.visibleTextEditors.filter(e => util.isCpp(e.document)); - await client.onDidChangeVisibleTextEditors(cppEditors); - } + void sendMessage(document).then(() => { + // For a file already open when we activate, sometimes we don't get any notifications about visible + // or active text editors, visible ranges, or text selection. As a workaround, we trigger + // onDidChangeVisibleTextEditors here, only for the first file opened. + if (!anyFileOpened) { + anyFileOpened = true; + const cppEditors: vscode.TextEditor[] = vscode.window.visibleTextEditors.filter(e => util.isCpp(e.document)); + client.onDidChangeVisibleTextEditors(cppEditors).catch(logAndReturn.undefined); + } + }); } } - }), - didChange: invoke1, - willSave: invoke1, + }, willSaveWaitUntil: async (event, sendMessage) => { - // await clients.ActiveClient.ready; - // Don't use awaitUntilLanguageClientReady. - // Otherwise, the message can be delayed too long. const me: Client = clients.getClientFor(event.document.uri); if (me.TrackedDocuments.has(event.document.uri.toString())) { return sendMessage(event); } return []; }, - didSave: invoke1, - didClose: async (document, sendMessage) => clients.ActiveClient.enqueue(async () => { + didClose: async (document, sendMessage) => { const me: Client = clients.getClientFor(document.uri); const uriString: string = document.uri.toString(); if (me.TrackedDocuments.has(uriString)) { me.onDidCloseTextDocument(document); me.TrackedDocuments.delete(uriString); - await sendMessage(document); - } - }), - provideCompletionItem: invoke4, - resolveCompletionItem: invoke2, - provideHover: async (document, position, token, next: (document: any, position: any, token: any) => any) => clients.ActiveClient.enqueue(async () => { - const me: Client = clients.getClientFor(document.uri); - if (me.TrackedDocuments.has(document.uri.toString())) { - return next(document, position, token); + void sendMessage(document); } - return null; - }), - provideSignatureHelp: invoke4, - provideDefinition: invoke3, - provideReferences: invoke4, - provideDocumentHighlights: invoke3, - provideDeclaration: invoke3, - workspace: { - didChangeConfiguration: invoke1 } }; } From 1462b0dacb09060edadb54798c066f54b1f695d7 Mon Sep 17 00:00:00 2001 From: Bob Brown Date: Wed, 30 Oct 2024 10:54:22 -0700 Subject: [PATCH 10/13] Normalize messages for bad paths (#12904) --- Extension/src/LanguageServer/configurations.ts | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Extension/src/LanguageServer/configurations.ts b/Extension/src/LanguageServer/configurations.ts index 3a02622b0a..3339514cc2 100644 --- a/Extension/src/LanguageServer/configurations.ts +++ b/Extension/src/LanguageServer/configurations.ts @@ -1948,7 +1948,7 @@ export class CppProperties { compilerPath = checkPathExists.path; } if (!compilerPathExists) { - compilerMessage = localize('cannot.find2', "Cannot find \"{0}\".", compilerPath); + compilerMessage = localize('cannot.find', "Cannot find: {0}", compilerPath); newSquiggleMetrics.PathNonExistent++; } if (compilerMessage) { @@ -1975,7 +1975,7 @@ export class CppProperties { dotConfigPath = checkPathExists.path; } if (!dotConfigPathExists) { - dotConfigMessage = localize('cannot.find2', "Cannot find \"{0}\".", dotConfigPath); + dotConfigMessage = localize('cannot.find', "Cannot find: {0}", dotConfigPath); newSquiggleMetrics.PathNonExistent++; } else if (dotConfigPath && !util.checkFileExistsSync(dotConfigPath)) { dotConfigMessage = localize("path.is.not.a.file", "Path is not a file: {0}", dotConfigPath); @@ -2083,7 +2083,7 @@ export class CppProperties { } else { badPath = `"${expandedPaths[0]}"`; } - message = localize('cannot.find2', "Cannot find {0}", badPath); + message = localize('cannot.find', "Cannot find: {0}", badPath); newSquiggleMetrics.PathNonExistent++; } else { // Check for file versus path mismatches. @@ -2141,7 +2141,7 @@ export class CppProperties { endOffset = curOffset + curMatch.length; let message: string; if (!pathExists) { - message = localize('cannot.find2', "Cannot find \"{0}\".", expandedPaths[0]); + message = localize('cannot.find', "Cannot find: {0}", expandedPaths[0]); newSquiggleMetrics.PathNonExistent++; const diagnostic: vscode.Diagnostic = new vscode.Diagnostic( new vscode.Range(document.positionAt(envTextStartOffSet + curOffset), From dc8140199b1a1b0bc90c14e4b791ec40d78f8152 Mon Sep 17 00:00:00 2001 From: Luca <681992+lukka@users.noreply.github.com> Date: Wed, 30 Oct 2024 15:32:31 -0700 Subject: [PATCH 11/13] Ensure only wel known values are passed in to the LLM #2293227 (#12907) Co-authored-by: Ben McMorran --- Extension/src/LanguageServer/lmTool.ts | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/Extension/src/LanguageServer/lmTool.ts b/Extension/src/LanguageServer/lmTool.ts index ed5be61a00..6a39e4e91d 100644 --- a/Extension/src/LanguageServer/lmTool.ts +++ b/Extension/src/LanguageServer/lmTool.ts @@ -75,11 +75,29 @@ export class CppConfigurationLanguageModelTool implements vscode.LanguageModelTo for (const key in knownValues) { const knownKey = key as keyof ChatContextResult; if (knownValues[knownKey] && chatContext[knownKey]) { - chatContext[knownKey] = knownValues[knownKey][chatContext[knownKey]] || chatContext[knownKey]; + // Clear the value if it's not in the known values. + chatContext[knownKey] = knownValues[knownKey][chatContext[knownKey]] || ""; } } - return `The user is working on a ${chatContext.language} project. The project uses language version ${chatContext.standardVersion}, compiles using the ${chatContext.compiler} compiler, targets the ${chatContext.targetPlatform} platform, and targets the ${chatContext.targetArchitecture} architecture.`; + let contextString = ""; + if (chatContext.language) { + contextString += `The user is working on a ${chatContext.language} project. `; + } + if (chatContext.standardVersion) { + contextString += `The project uses language version ${chatContext.standardVersion}. `; + } + if (chatContext.compiler) { + contextString += `The project compiles using the ${chatContext.compiler} compiler. `; + } + if (chatContext.targetPlatform) { + contextString += `The project targets the ${chatContext.targetPlatform} platform. `; + } + if (chatContext.targetArchitecture) { + contextString += `The project targets the ${chatContext.targetArchitecture} architecture. `; + } + + return contextString; } catch { await this.reportError(); From fcd0b0c25f7933104a3d7ae01e69a872d5f5ab52 Mon Sep 17 00:00:00 2001 From: Ben McMorran Date: Wed, 30 Oct 2024 17:02:51 -0700 Subject: [PATCH 12/13] Add telemetry for #cpp failures (#12910) --- Extension/src/LanguageServer/lmTool.ts | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/Extension/src/LanguageServer/lmTool.ts b/Extension/src/LanguageServer/lmTool.ts index 6a39e4e91d..c3fad8b6eb 100644 --- a/Extension/src/LanguageServer/lmTool.ts +++ b/Extension/src/LanguageServer/lmTool.ts @@ -51,6 +51,7 @@ export class CppConfigurationLanguageModelTool implements vscode.LanguageModelTo } private async getContext(token: vscode.CancellationToken): Promise { + const telemetryProperties: Record = {}; try { const currentDoc = vscode.window.activeTextEditor?.document; if (!currentDoc || (!util.isCpp(currentDoc) && !util.isHeaderFile(currentDoc.uri))) { @@ -62,16 +63,6 @@ export class CppConfigurationLanguageModelTool implements vscode.LanguageModelTo return 'No configuration information is available for the active document.'; } - telemetry.logLanguageModelToolEvent( - 'cpp', - { - "language": chatContext.language, - "compiler": chatContext.compiler, - "standardVersion": chatContext.standardVersion, - "targetPlatform": chatContext.targetPlatform, - "targetArchitecture": chatContext.targetArchitecture - }); - for (const key in knownValues) { const knownKey = key as keyof ChatContextResult; if (knownValues[knownKey] && chatContext[knownKey]) { @@ -83,25 +74,33 @@ export class CppConfigurationLanguageModelTool implements vscode.LanguageModelTo let contextString = ""; if (chatContext.language) { contextString += `The user is working on a ${chatContext.language} project. `; + telemetryProperties["language"] = chatContext.language; } if (chatContext.standardVersion) { contextString += `The project uses language version ${chatContext.standardVersion}. `; + telemetryProperties["standardVersion"] = chatContext.standardVersion; } if (chatContext.compiler) { contextString += `The project compiles using the ${chatContext.compiler} compiler. `; + telemetryProperties["compiler"] = chatContext.compiler; } if (chatContext.targetPlatform) { contextString += `The project targets the ${chatContext.targetPlatform} platform. `; + telemetryProperties["targetPlatform"] = chatContext.targetPlatform; } if (chatContext.targetArchitecture) { contextString += `The project targets the ${chatContext.targetArchitecture} architecture. `; + telemetryProperties["targetArchitecture"] = chatContext.targetArchitecture; } return contextString; } catch { await this.reportError(); + telemetryProperties["error"] = "true"; return ""; + } finally { + telemetry.logLanguageModelToolEvent('cpp', telemetryProperties); } } From 390046ccf41b5a00ae5583c0a6d3805a2d8d8e05 Mon Sep 17 00:00:00 2001 From: Colen Garoutte-Carson <49173979+Colengms@users.noreply.github.com> Date: Mon, 4 Nov 2024 18:08:15 -0800 Subject: [PATCH 13/13] Fix processing of bools and numbers in editorConfig files (#12923) --- Extension/src/LanguageServer/editorConfig.ts | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/Extension/src/LanguageServer/editorConfig.ts b/Extension/src/LanguageServer/editorConfig.ts index 1ac8452b46..21a73673c6 100644 --- a/Extension/src/LanguageServer/editorConfig.ts +++ b/Extension/src/LanguageServer/editorConfig.ts @@ -92,7 +92,17 @@ function parseEditorConfigContent(content: string): Record { const [key, ...values] = line.split('='); if (key && values.length > 0) { const trimmedKey = key.trim(); - const value = values.join('=').trim(); + let value: any = values.join('=').trim(); + + // Convert boolean-like and numeric values. + if (value.toLowerCase() === 'true') { + value = true; + } else if (value.toLowerCase() === 'false') { + value = false; + } else if (!isNaN(Number(value))) { + value = Number(value); + } + if (currentSection) { // Ensure the current section is initialized. if (!config[currentSection]) { @@ -114,7 +124,7 @@ function getEditorConfig(filePath: string): any { const rootDir: string = path.parse(currentDir).root; // Traverse from the file's directory to the root directory. - for (;;) { + for (; ;) { const editorConfigPath: string = path.join(currentDir, '.editorconfig'); if (fs.existsSync(editorConfigPath)) { const configFileContent: string = fs.readFileSync(editorConfigPath, 'utf-8'); @@ -139,7 +149,7 @@ function getEditorConfig(filePath: string): any { }); // Check if the current .editorconfig is the root. - if (configData['*']?.root?.toLowerCase() === 'true') { + if (configData['*']?.root) { break; // Stop searching after processing the root = true file. } }