Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions src/copilot-wrapper/inline_chat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ class InlineChat {

if (!userQuestion) return;

// Get AI response
// Get AI response using enhanced chat history
const aiResponse = await getAIResponseWithHistory(
userQuestion,
"inline_chat",
Expand Down Expand Up @@ -188,7 +188,7 @@ class InlineChat {
// Get file content
const fileContent = editor.document.getText();

// Get AI response with context
// Get AI response with context using enhanced chat history
const aiResponse = await getAIResponseWithHistory(
transcription,
"inline_chat",
Expand Down
102 changes: 32 additions & 70 deletions src/services/language_model.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import { renderPrompt } from "@vscode/prompt-tsx";
import * as vscode from "vscode";
import { ChatHistoryManager } from "../utils/chat_history_manager";
import { ChatHistoryPrompt } from "../utils/history";

const BASE_PROMPT = `You are Cheerleader, an enthusiastic and supportive coding companion!
Your role is to help developers write better code while keeping their spirits high with positive energy.
Expand Down Expand Up @@ -168,15 +169,7 @@ export async function getAIResponseWithTools(
* @param documentUri - The URI of the document to which the conversation belongs.
* @param options - Additional options for the language model.
* @returns The AI's response to the user's message.
*
* The order we construct the messages is important:
* 1. We start with the base prompt.
* 2. We add the custom prompt if provided.
* 3. We add the user message if provided.
* 4. We add the conversation history.
* 5. We add the file context if provided.
*
* This can be improved using prompt-tsx in the future...
* @note The new implementation uses Prompt-TSX to directly render and prioritize the prompt.
*/
export async function getAIResponseWithHistory(
userText: string | null = null,
Expand All @@ -193,61 +186,28 @@ export async function getAIResponseWithHistory(
family: family,
});

if (!model) {
throw new Error("No language model available");
}

// const { messages } = await renderPrompt(
// BASE_PROMPT,
// { userQuery: userText },
// { modelMaxPromptTokens: 4096 },
// model
// )

// Get chat history and ensure we're in the right mode
const historyManager = ChatHistoryManager.getInstance();
historyManager.switchMode(mode);
const history = historyManager.getHistory();

// Prepare messages starting with base prompt
const messages = [vscode.LanguageModelChatMessage.User(BASE_PROMPT)];

// Add custom prompt if provided
if (options.customPrompt) {
messages.push(vscode.LanguageModelChatMessage.User(options.customPrompt));
}

// Add current user message if provided
if (userText) {
messages.push(vscode.LanguageModelChatMessage.User(userText));
}

messages.push(
vscode.LanguageModelChatMessage.User(
"The fowlowing is the conversation history:"
)
);

// Add conversation history
for (const turn of history) {
if (turn.role === "user") {
messages.push(vscode.LanguageModelChatMessage.User(turn.content));
} else if (turn.role === "assistant") {
messages.push(vscode.LanguageModelChatMessage.Assistant(turn.content));
} else if (turn.role === "system") {
// System messages are treated as user messages since VS Code API doesn't have System type
messages.push(vscode.LanguageModelChatMessage.User(turn.content));
}
}

// Add file context if provided (to the end of the messages)
if (options.fileContext) {
messages.push(
vscode.LanguageModelChatMessage.User(
`File context: ${options.fileContext}`
)
);
}
if (!model) {
throw new Error("No language model available");
}

// Get chat history and ensure we're in the right mode
const historyManager = ChatHistoryManager.getInstance();
historyManager.switchMode(mode);

// Render the prompt using VSCode's Prompt TSX
const { messages } = await renderPrompt(
ChatHistoryPrompt,
{
history: historyManager.getHistory(),
baseInstructions: BASE_PROMPT,
userQuery: userText || undefined,
fileContext: options.fileContext ? [options.fileContext] : undefined,
customInstructions: options.customPrompt,
mode: mode
},
{ modelMaxPromptTokens: 4096 },
model
);

// Get response from the model
const chatResponse = await model.sendRequest(
Expand All @@ -262,19 +222,21 @@ export async function getAIResponseWithHistory(
fullResponse += fragment;
}

// Add the conversation turns to history
// Add the conversation turns to history before returning
if (userText) {
historyManager.addTurn({
role: "user",
content: userText,
timestamp: new Date(),
});
}
historyManager.addTurn({
role: "assistant",
content: fullResponse,
timestamp: new Date(),
});
if (fullResponse) {
historyManager.addTurn({
role: "assistant",
content: fullResponse,
timestamp: new Date(),
});
}

return fullResponse;
} catch (error) {
Expand Down
155 changes: 155 additions & 0 deletions src/utils/chat_history_example.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,155 @@
import {
AssistantMessage,
BasePromptElementProps,
PrioritizedList,
PromptElement,
PromptPiece,
PromptSizing,
SystemMessage,
UserMessage,
} from "@vscode/prompt-tsx";
import {
CancellationToken,
ChatContext,
ChatRequestTurn,
ChatResponseMarkdownPart,
ChatResponseTurn,
Progress,
} from "vscode";

interface IMyPromptProps extends BasePromptElementProps {
history: ChatContext["history"];
userQuery: string;
fileContext?: string[]; // Add support for file context
}

/**
* Including conversation history in your prompt is important as it allows the
* user to ask followup questions to previous messages. However, you want to
* make sure its priority is treated appropriately because history can
* grow very large over time. We follow the priority pattern suggsted by vscode:
*
* 1. The base prompt instructions, then
* 2. The current user query, then
* 3. The last couple turns of chat history, then
* 4. Any supporting data, then
* 5. As much of the remaining history as you can fit.
*
* For this reason, we split the history in two parts in the prompt, where
* recent prompt turns are prioritized above general contextual information.
*/
export class MyPrompt extends PromptElement<IMyPromptProps> {
render() {
return (
<>
<SystemMessage priority={100}>
Here are your base instructions. They have the highest priority
because you want to make sure they're always included!
</SystemMessage>
{/* The remainder of the history has the lowest priority since it's less relevant */}
<HistoryMessages
history={this.props.history.slice(0, -2)}
priority={0}
/>
{/* The last 2 history messages are preferred over any workspace context */}
<HistoryMessages history={this.props.history.slice(-2)} priority={80} />
{/* File context gets medium-high priority */}
{this.props.fileContext && (
<UserMessage priority={85}>
Here are the relevant files for context:
{this.props.fileContext.map((file) => `\n\n${file}`)}
</UserMessage>
)}
{/* The user query is right behind the system message in priority */}
<UserMessage priority={90}>{this.props.userQuery}</UserMessage>
<UserMessage priority={70}>
With a slightly lower priority, you can include some contextual data
about the workspace or files here...
</UserMessage>
</>
);
}
}

interface IHistoryProps extends BasePromptElementProps {
history: ChatContext["history"];
newer: number; // last 2 message priority values
older: number; // previous message priority values
passPriority: true; // require this prop be set!
}

/**
* We can wrap up this history element to be a little easier to use. `prompt-tsx`
* has a `passPriority` attribute which allows an element to act as a 'pass-through'
* container, so that its children are pruned as if they were direct children of
* the parent. With this component, the elements
*
* ```
* <HistoryMessages history={this.props.history.slice(0, -2)} priority={0} />
* <HistoryMessages history={this.props.history.slice(-2)} priority={80} />
* ```
*
* ...can equivalently be expressed as:
*
* ```
* <History history={this.props.history} passPriority older={0} recentPriority={80} />
* ```
*/
export class History extends PromptElement<IHistoryProps> {
render(): PromptPiece {
return (
<>
<HistoryMessages
history={this.props.history.slice(0, -2)}
priority={this.props.older}
/>
<HistoryMessages
history={this.props.history.slice(0, -2)}
priority={this.props.newer}
/>
</>
);
}
}

interface IHistoryMessagesProps extends BasePromptElementProps {
history: ChatContext["history"];
}

/**
* The History element simply lists user and assistant messages from the chat
* context. If things like tool calls or file trees are relevant for, your
* case, you can make this element more complex to handle those cases.
*/
export class HistoryMessages extends PromptElement<IHistoryMessagesProps> {
render(): PromptPiece {
const history: (UserMessage | AssistantMessage)[] = [];
for (const turn of this.props.history) {
if (turn instanceof ChatRequestTurn) {
history.push(<UserMessage>{turn.prompt}</UserMessage>);
} else if (turn instanceof ChatResponseTurn) {
history.push(
<AssistantMessage name={turn.participant}>
{chatResponseToMarkdown(turn)}
</AssistantMessage>
);
}
}
return (
<PrioritizedList priority={0} descending={false}>
{history}
</PrioritizedList>
);
}
}

const chatResponseToMarkdown = (response: ChatResponseTurn) => {
let str = "";
for (const part of response.response) {
if (response instanceof ChatResponseMarkdownPart) {
str += part.value;
}
}

return str;
};
7 changes: 7 additions & 0 deletions src/utils/chat_history_manager.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,13 @@ export class ChatHistoryManager {
private maxTurns: number;
private history: ChatTurn[] = [];
private currentMode: string = '';

/**
* Get the current conversation mode
*/
public getCurrentMode(): string {
return this.currentMode;
}

private constructor(maxTurns = 10) {
this.maxTurns = maxTurns;
Expand Down
Loading