Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 3 additions & 4 deletions src/extension/intents/node/toolCallingLoop.ts
Original file line number Diff line number Diff line change
Expand Up @@ -761,8 +761,8 @@ export abstract class ToolCallingLoop<TOptions extends IToolCallingLoopOptions =
span.setAttribute(GenAiAttr.REQUEST_MODEL, endpoint.model);
} catch { /* endpoint not available yet, will be set on response */ }

// Always capture user input message for the debug panel
{
// Capture user input message — gated by captureContent to prevent OTLP leakage
if (this._otelService.config.captureContent) {
const userMessage = this.turn.request.message;
span.setAttribute(GenAiAttr.INPUT_MESSAGES, truncateForOTel(JSON.stringify([
{ role: 'user', parts: [{ type: 'text', content: userMessage }] }
Expand Down Expand Up @@ -800,8 +800,7 @@ export abstract class ToolCallingLoop<TOptions extends IToolCallingLoopOptions =
[GenAiAttr.USAGE_OUTPUT_TOKENS]: totalOutputTokens,
...(lastResolvedModel ? { [GenAiAttr.RESPONSE_MODEL]: lastResolvedModel } : {}),
});
// Always capture agent output message and tool definitions for the debug panel
{
if (this._otelService.config.captureContent) {
const lastRound = result.toolCallRounds.at(-1);
if (lastRound?.response) {
const responseText = Array.isArray(lastRound.response) ? lastRound.response.join('') : lastRound.response;
Comment on lines +803 to 806
Copy link

Copilot AI Apr 2, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This new captureContent gate encloses setting OUTPUT_MESSAGES/TOOL_DEFINITIONS, but TOOL_DEFINITIONS is still serialized without truncateForOTel(). Tool schemas/descriptions can get large enough to exceed backend attribute limits and cause OTLP export/batch failures; elsewhere (e.g. emitInferenceDetailsEvent) content attributes are truncated. Consider truncating the serialized tool definitions too for consistency and exporter robustness when captureContent is enabled.

Copilot uses AI. Check for mistakes.
Expand Down
248 changes: 248 additions & 0 deletions src/extension/intents/test/node/toolCallingLoopContentGating.spec.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,248 @@
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/

import { Raw } from '@vscode/prompt-tsx';
import { afterEach, beforeEach, describe, expect, it } from 'vitest';
import type { ChatRequest, LanguageModelChat, LanguageModelToolInformation } from 'vscode';
import { ChatFetchResponseType, ChatResponse } from '../../../../platform/chat/common/commonTypes';
import { toTextPart } from '../../../../platform/chat/common/globalStringUtils';
import { IEndpointProvider } from '../../../../platform/endpoint/common/endpointProvider';
import { IChatEndpoint, IEmbeddingsEndpoint } from '../../../../platform/networking/common/networking';
import { GenAiAttr, GenAiOperationName } from '../../../../platform/otel/common/genAiAttributes';
import { IOTelService } from '../../../../platform/otel/common/otelService';
import { CapturingOTelService } from '../../../../platform/otel/common/test/capturingOTelService';
import { CancellationTokenSource } from '../../../../util/vs/base/common/cancellation';
import { Event } from '../../../../util/vs/base/common/event';
import { DisposableStore } from '../../../../util/vs/base/common/lifecycle';
import { generateUuid } from '../../../../util/vs/base/common/uuid';
import { IInstantiationService } from '../../../../util/vs/platform/instantiation/common/instantiation';
import { Conversation, Turn } from '../../../prompt/common/conversation';
import { IBuildPromptContext } from '../../../prompt/common/intents';
import { IBuildPromptResult, nullRenderPromptResult } from '../../../prompt/node/intents';
import { createExtensionUnitTestingServices } from '../../../test/node/services';
import { IToolCallingLoopOptions, ToolCallingLoop } from '../../node/toolCallingLoop';

class MockEndpointProvider implements IEndpointProvider {
declare readonly _serviceBrand: undefined;
readonly onDidModelsRefresh = Event.None;

async getAllCompletionModels() { return []; }
async getAllChatEndpoints() { return [this._createEndpoint()]; }
async getChatEndpoint() { return this._createEndpoint(); }
async getEmbeddingsEndpoint(): Promise<IEmbeddingsEndpoint> { throw new Error('Not implemented'); }

private _createEndpoint(): IChatEndpoint {
return {
model: 'gpt-4o',
modelProvider: 'github',
family: 'gpt-4o',
name: 'test-endpoint',
version: '1.0',
maxOutputTokens: 4096,
modelMaxPromptTokens: 128000,
supportsToolCalls: true,
supportsVision: false,
supportsPrediction: false,
showInModelPicker: true,
isDefault: true,
isFallback: false,
policy: 'enabled' as const,
urlOrRequestMetadata: 'mock://endpoint',
tokenizer: 'cl100k_base',
acquireTokenizer: () => ({
countMessagesTokens: async () => 100,
countMessageTokens: async () => 10,
countToolTokens: async () => 50,
encode: () => [],
free: () => { },
}),
} as unknown as IChatEndpoint;
}
}

/**
* Integration tests that exercise the real ToolCallingLoop code path to verify
* content attributes are properly gated behind captureContent.
*
* Unlike the original contentGating.spec.ts (which duplicated the if-check inline),
* these tests call through ToolCallingLoop.run() so they will fail if the
* captureContent guards are removed from the production code.
*/

class ContentGatingTestToolCallingLoop extends ToolCallingLoop<IToolCallingLoopOptions> {
protected override async buildPrompt(_buildPromptContext: IBuildPromptContext): Promise<IBuildPromptResult> {
return {
...nullRenderPromptResult(),
messages: [{ role: Raw.ChatRole.User, content: [toTextPart('fix my code')] }],
};
}

protected override async getAvailableTools(): Promise<LanguageModelToolInformation[]> {
return [
{ name: 'readFile', description: 'Read a file from the workspace', inputSchema: {}, tags: [], source: undefined },
{ name: 'writeFile', description: 'Write content to a file', inputSchema: {}, tags: [], source: undefined },
];
}

protected override async fetch(): Promise<ChatResponse> {
return {
type: ChatFetchResponseType.Success,
value: 'Here is the fix for your code.',
requestId: 'req-123',
serverRequestId: undefined,
usage: {
prompt_tokens: 50,
completion_tokens: 10,
total_tokens: 60,
},
resolvedModel: 'gpt-4o',
};
}
}

const chatPanelLocation: ChatRequest['location'] = 1;

function createMockChatRequest(overrides: Partial<ChatRequest> = {}): ChatRequest {
return {
prompt: 'fix my code',
command: undefined,
references: [],
location: chatPanelLocation,
location2: undefined,
attempt: 0,
enableCommandDetection: false,
isParticipantDetected: false,
toolReferences: [],
toolInvocationToken: {} as ChatRequest['toolInvocationToken'],
model: { family: 'test' } as LanguageModelChat,
tools: new Map(),
id: generateUuid(),
sessionId: generateUuid(),
sessionResource: {} as ChatRequest['sessionResource'],
hasHooksEnabled: false,
...overrides,
} satisfies ChatRequest;
}

function createConversation(prompt: string): Conversation {
return new Conversation(generateUuid(), [
new Turn(generateUuid(), { type: 'user', message: prompt }),
]);
}

describe('ToolCallingLoop content gating (integration)', () => {
let disposables: DisposableStore;
let tokenSource: CancellationTokenSource;

beforeEach(() => {
disposables = new DisposableStore();
tokenSource = new CancellationTokenSource();
disposables.add(tokenSource);
});

afterEach(() => {
disposables.dispose();
});

function createLoopWithOTel(captureContent: boolean) {
const otel = new CapturingOTelService({ captureContent });
const serviceCollection = disposables.add(createExtensionUnitTestingServices());
serviceCollection.define(IOTelService, otel);
serviceCollection.define(IEndpointProvider, new MockEndpointProvider());
const accessor = serviceCollection.createTestingAccessor();
disposables.add(accessor);
const instantiationService = accessor.get(IInstantiationService);

const request = createMockChatRequest();
const loop = instantiationService.createInstance(
ContentGatingTestToolCallingLoop,
{
conversation: createConversation(request.prompt),
toolCallLimit: 1,
request,
},
);
disposables.add(loop);
return { otel, loop };
}

it('does NOT set content attributes on agent span when captureContent is false', async () => {
const { otel, loop } = createLoopWithOTel(false);

await loop.run(undefined, tokenSource.token);

const agentSpan = otel.findSpans('invoke_agent')[0];
expect(agentSpan).toBeDefined();

// Content attributes must NOT be set
expect(agentSpan.attributes[GenAiAttr.INPUT_MESSAGES]).toBeUndefined();
expect(agentSpan.attributes[GenAiAttr.OUTPUT_MESSAGES]).toBeUndefined();
expect(agentSpan.attributes[GenAiAttr.TOOL_DEFINITIONS]).toBeUndefined();
expect(agentSpan.events.filter(e => e.name === 'user_message')).toHaveLength(0);

// Non-content attributes should still be set
expect(agentSpan.attributes[GenAiAttr.AGENT_NAME]).toBeDefined();
expect(agentSpan.attributes[GenAiAttr.OPERATION_NAME]).toBe(GenAiOperationName.INVOKE_AGENT);
});

it('sets content attributes on agent span when captureContent is true', async () => {
const { otel, loop } = createLoopWithOTel(true);

await loop.run(undefined, tokenSource.token);

const agentSpan = otel.findSpans('invoke_agent')[0];
expect(agentSpan).toBeDefined();

// Content attributes must be set
expect(agentSpan.attributes[GenAiAttr.INPUT_MESSAGES]).toBeDefined();
expect(agentSpan.attributes[GenAiAttr.OUTPUT_MESSAGES]).toBeDefined();
expect(agentSpan.attributes[GenAiAttr.TOOL_DEFINITIONS]).toBeDefined();

// user_message event should be emitted
const userMessageEvents = agentSpan.events.filter(e => e.name === 'user_message');
expect(userMessageEvents).toHaveLength(1);
});

it('INPUT_MESSAGES contains the user prompt when captureContent is true', async () => {
const { otel, loop } = createLoopWithOTel(true);

await loop.run(undefined, tokenSource.token);

const agentSpan = otel.findSpans('invoke_agent')[0];
const inputMessages = agentSpan.attributes[GenAiAttr.INPUT_MESSAGES] as string;
expect(inputMessages).toContain('fix my code');
});

it('OUTPUT_MESSAGES contains the response text when captureContent is true', async () => {
const { otel, loop } = createLoopWithOTel(true);

await loop.run(undefined, tokenSource.token);

const agentSpan = otel.findSpans('invoke_agent')[0];
const outputMessages = agentSpan.attributes[GenAiAttr.OUTPUT_MESSAGES] as string;
expect(outputMessages).toContain('Here is the fix for your code.');
});

it('TOOL_DEFINITIONS contains the available tools when captureContent is true', async () => {
const { otel, loop } = createLoopWithOTel(true);

await loop.run(undefined, tokenSource.token);

const agentSpan = otel.findSpans('invoke_agent')[0];
const toolDefs = agentSpan.attributes[GenAiAttr.TOOL_DEFINITIONS] as string;
expect(toolDefs).toContain('readFile');
expect(toolDefs).toContain('writeFile');
});

it('user_message event content matches user prompt when captureContent is true', async () => {
const { otel, loop } = createLoopWithOTel(true);

await loop.run(undefined, tokenSource.token);

const agentSpan = otel.findSpans('invoke_agent')[0];
const userMessageEvent = agentSpan.events.find(e => e.name === 'user_message');
expect(userMessageEvent?.attributes?.content).toBe('fix my code');
});
});
12 changes: 6 additions & 6 deletions src/extension/prompt/node/chatMLFetcher.ts
Original file line number Diff line number Diff line change
Expand Up @@ -243,8 +243,8 @@ export class ChatMLFetcherImpl extends AbstractChatMLFetcher {
// Tag span with debug name so orphaned spans (title, progressMessages, etc.) are identifiable
otelInferenceSpan?.setAttribute(GenAiAttr.AGENT_NAME, debugName);

// Extract and set structured prompt sections for the debug panel
if (otelInferenceSpan) {
// Extract and set structured prompt sections — gated by captureContent to prevent OTLP leakage
if (otelInferenceSpan && this._otelService.config.captureContent) {
// Support both Chat Completions API (messages) and Responses API (input) formats
const capiMessages = (requestBody.messages ?? requestBody.input) as ReadonlyArray<{ role?: string; content?: string | unknown[] }> | undefined;
// User request: last user-role message
Expand Down Expand Up @@ -278,8 +278,8 @@ export class ChatMLFetcherImpl extends AbstractChatMLFetcher {
}
}

// Always capture full request content for the debug panel
if (otelInferenceSpan) {
// Capture full request content — gated by captureContent to prevent OTLP leakage
if (otelInferenceSpan && this._otelService.config.captureContent) {
const capiMessages = (requestBody.messages ?? requestBody.input) as ReadonlyArray<{ role?: string; content?: string | unknown[] }> | undefined;
if (capiMessages) {
// Normalize non-string content (Anthropic arrays, Responses API parts) to strings for OTel schema
Comment on lines +281 to 285
Copy link

Copilot AI Apr 2, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

GenAiAttr.INPUT_MESSAGES is now gated on captureContent, but this block still sets other content-bearing attributes (notably CopilotChatAttr.USER_REQUEST and GenAiAttr.SYSTEM_INSTRUCTIONS) regardless of captureContent. SYSTEM_INSTRUCTIONS is explicitly classified as opt-in content in genAiAttributes.ts, and genAiEvents.ts only emits it when otel.config.captureContent is true. To avoid continued prompt leakage when captureContent=false (and to match the documented single-flag behavior), gate these attributes (and any other prompt/response content attributes set on this span) behind this._otelService.config.captureContent as well.

Copilot uses AI. Check for mistakes.
Expand Down Expand Up @@ -391,8 +391,8 @@ export class ChatMLFetcherImpl extends AbstractChatMLFetcher {
: {}),
});
}
// Always capture response content for the debug panel
if (otelInferenceSpan && result.type === ChatFetchResponseType.Success) {
// Capture response content — gated by captureContent to prevent OTLP leakage
if (otelInferenceSpan && this._otelService.config.captureContent && result.type === ChatFetchResponseType.Success) {
const responseText = streamRecorder.deltas.map(d => d.text).join('');
const toolCalls = streamRecorder.deltas
.filter(d => d.copilotToolCalls?.length)
Expand Down
30 changes: 30 additions & 0 deletions src/platform/otel/common/test/contentGating.spec.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/

import { describe, expect, it } from 'vitest';
import { CapturingOTelService } from './capturingOTelService';

/**
* CapturingOTelService correctly exposes the captureContent config flag.
*
* Integration tests that exercise the real ToolCallingLoop code path live in
* src/extension/intents/test/node/toolCallingLoopContentGating.spec.ts.
*/
describe('CapturingOTelService captureContent config', () => {
it('defaults captureContent to false when OTEL is enabled without explicit flag', () => {
const otel = new CapturingOTelService();
expect(otel.config.captureContent).toBe(false);
});

it('respects captureContent=true override', () => {
const otel = new CapturingOTelService({ captureContent: true });
expect(otel.config.captureContent).toBe(true);
});

it('respects captureContent=false override', () => {
const otel = new CapturingOTelService({ captureContent: false });
expect(otel.config.captureContent).toBe(false);
});
});