Skip to content

Commit 3202ec1

Browse files
committed
feat: use latest @copilot-extensions/preview-sdk and all its goodies
1 parent 95e1121 commit 3202ec1

7 files changed

+49
-72
lines changed

src/functions.ts

+7-6
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1-
import OpenAI from "openai";
1+
import type { PromptFunction, InteropMessage } from "@copilot-extensions/preview-sdk";
2+
23
import { ModelsAPI } from "./models-api.js";
34

45
// defaultModel is the model used for internal calls - for tool calling,
@@ -8,26 +9,26 @@ export const defaultModel = "gpt-4o-mini";
89
// RunnerResponse is the response from a function call.
910
export interface RunnerResponse {
1011
model: string;
11-
messages: OpenAI.ChatCompletionMessageParam[];
12+
messages: InteropMessage[];
1213
}
1314

1415
export abstract class Tool {
1516
modelsAPI: ModelsAPI;
16-
static definition: OpenAI.FunctionDefinition;
17+
static definition: PromptFunction["function"];
1718

1819
constructor(modelsAPI: ModelsAPI) {
1920
this.modelsAPI = modelsAPI;
2021
}
2122

22-
static get tool(): OpenAI.Chat.Completions.ChatCompletionTool {
23+
static get tool(): PromptFunction {
2324
return {
2425
type: "function",
2526
function: this.definition,
2627
};
2728
}
2829

2930
abstract execute(
30-
messages: OpenAI.ChatCompletionMessageParam[],
31-
args: object
31+
messages: InteropMessage[],
32+
args: Record<string, unknown>
3233
): Promise<RunnerResponse>;
3334
}

src/functions/describe-model.ts

+3-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1-
import OpenAI from "openai";
1+
import type { InteropMessage } from "@copilot-extensions/preview-sdk";
2+
23
import { RunnerResponse, defaultModel, Tool } from "../functions.js";
34

45
export class describeModel extends Tool {
@@ -19,7 +20,7 @@ export class describeModel extends Tool {
1920
};
2021

2122
async execute(
22-
messages: OpenAI.ChatCompletionMessageParam[],
23+
messages: InteropMessage[],
2324
args: { model: string }
2425
): Promise<RunnerResponse> {
2526
const [model, modelSchema] = await Promise.all([

src/functions/execute-model.ts

+3-2
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
1-
import OpenAI from "openai";
1+
import type { InteropMessage } from "@copilot-extensions/preview-sdk";
2+
23
import { RunnerResponse, Tool } from "../functions.js";
34

4-
type MessageWithReferences = OpenAI.ChatCompletionMessageParam & {
5+
type MessageWithReferences = InteropMessage & {
56
copilot_references: Reference[];
67
};
78

src/functions/list-models.ts

+3-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1-
import OpenAI from "openai";
1+
import type { InteropMessage } from "@copilot-extensions/preview-sdk";
2+
23
import { RunnerResponse, defaultModel, Tool } from "../functions.js";
34

45
export class listModels extends Tool {
@@ -15,7 +16,7 @@ export class listModels extends Tool {
1516
};
1617

1718
async execute(
18-
messages: OpenAI.ChatCompletionMessageParam[]
19+
messages: InteropMessage[]
1920
): Promise<RunnerResponse> {
2021
const models = await this.modelsAPI.listModels();
2122

src/functions/recommend-model.ts

+3-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1-
import OpenAI from "openai";
1+
import type { InteropMessage } from "@copilot-extensions/preview-sdk";
2+
23
import { RunnerResponse, defaultModel, Tool } from "../functions.js";
34

45
export class recommendModel extends Tool {
@@ -15,7 +16,7 @@ export class recommendModel extends Tool {
1516
};
1617

1718
async execute(
18-
messages: OpenAI.ChatCompletionMessageParam[]
19+
messages: InteropMessage[]
1920
): Promise<RunnerResponse> {
2021
const models = await this.modelsAPI.listModels();
2122

src/index.ts

+30-48
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
1-
import { createServer, IncomingMessage } from "node:http";
1+
import { createServer } from "node:http";
22

3-
import { verifyAndParseRequest, createAckEvent } from "@copilot-extensions/preview-sdk";
4-
import OpenAI from "openai";
3+
import { prompt, getFunctionCalls, createAckEvent, createDoneEvent, verifyAndParseRequest, createTextEvent } from "@copilot-extensions/preview-sdk";
54

65
import { describeModel } from "./functions/describe-model.js";
76
import { executeModel } from "./functions/execute-model.js";
@@ -12,6 +11,7 @@ import { ModelsAPI } from "./models-api.js";
1211

1312
const server = createServer(async (request, response) => {
1413
if (request.method === "GET") {
14+
// health check
1515
response.statusCode = 200;
1616
response.end(`OK`);
1717
return;
@@ -55,15 +55,9 @@ const server = createServer(async (request, response) => {
5555
response.write(createAckEvent().toString());
5656

5757
// List of functions that are available to be called
58-
const modelsAPI = new ModelsAPI(apiKey);
58+
const modelsAPI = new ModelsAPI();
5959
const functions = [listModels, describeModel, executeModel, recommendModel];
6060

61-
// Use the Copilot API to determine which function to execute
62-
const capiClient = new OpenAI({
63-
baseURL: "https://api.githubcopilot.com",
64-
apiKey,
65-
});
66-
6761
// Prepend a system message that includes the list of models, so that
6862
// tool calls can better select the right model to use.
6963
const models = await modelsAPI.listModels();
@@ -91,57 +85,48 @@ const server = createServer(async (request, response) => {
9185
].concat(payload.messages);
9286

9387
console.time("tool-call");
94-
const toolCaller = await capiClient.chat.completions.create({
95-
stream: false,
96-
model: "gpt-4",
97-
// @ts-expect-error - TODO @gr2m - type incompatibility between @openai/api and @copilot-extensions/preview-sdk
88+
const promptResult = await prompt({
9889
messages: toolCallMessages,
99-
tool_choice: "auto",
90+
token: apiKey,
10091
tools: functions.map((f) => f.tool),
101-
});
92+
})
10293
console.timeEnd("tool-call");
10394

95+
const [functionToCall] = getFunctionCalls(promptResult)
96+
10497
if (
105-
!toolCaller.choices[0] ||
106-
!toolCaller.choices[0].message ||
107-
!toolCaller.choices[0].message.tool_calls ||
108-
!toolCaller.choices[0].message.tool_calls[0].function
98+
!functionToCall
10999
) {
110100
console.log("No tool call found");
111-
// No tool to call, so just call the model with the original messages
112-
const stream = await capiClient.chat.completions.create({
113-
stream: true,
114-
model: "gpt-4",
115-
// @ts-expect-error - TODO @gr2m - type incompatibility between @openai/api and @copilot-extensions/preview-sdk
101+
102+
const { stream } = await prompt.stream({
116103
messages: payload.messages,
117-
});
104+
token: apiKey,
105+
})
118106

119107
for await (const chunk of stream) {
120-
const chunkStr = "data: " + JSON.stringify(chunk) + "\n\n";
121-
response.write(chunkStr);
108+
response.write(new TextDecoder().decode(chunk));
122109
}
123-
response.write("data: [DONE]\n\n");
124-
response.end();
110+
111+
response.end(createDoneEvent().toString());
125112
return;
126113
}
127114

128-
const functionToCall = toolCaller.choices[0].message.tool_calls[0].function;
129-
const args = JSON.parse(functionToCall.arguments);
115+
const args = JSON.parse(functionToCall.function.arguments);
130116

131117
console.time("function-exec");
132118
let functionCallRes: RunnerResponse;
133119
try {
134-
console.log("Executing function", functionToCall.name);
120+
console.log("Executing function", functionToCall.function.name);
135121
const funcClass = functions.find(
136-
(f) => f.definition.name === functionToCall.name
122+
(f) => f.definition.name === functionToCall.function.name
137123
);
138124
if (!funcClass) {
139125
throw new Error("Unknown function");
140126
}
141127

142128
console.log("\t with args", args);
143129
const func = new funcClass(modelsAPI);
144-
// @ts-expect-error - TODO @gr2m - type incompatibility between @openai/api and @copilot-extensions/preview-sdk
145130
functionCallRes = await func.execute(payload.messages, args);
146131
} catch (err) {
147132
console.error(err);
@@ -152,23 +137,20 @@ const server = createServer(async (request, response) => {
152137
console.timeEnd("function-exec");
153138

154139
try {
155-
const stream = await modelsAPI.inference.chat.completions.create({
140+
console.time("streaming");
141+
const { stream } = await prompt.stream({
142+
endpoint: 'https://models.inference.ai.azure.com/chat/completions',
156143
model: functionCallRes.model,
157144
messages: functionCallRes.messages,
158-
stream: true,
159-
stream_options: {
160-
include_usage: false,
161-
},
162-
});
145+
token: apiKey,
146+
})
163147

164-
console.time("streaming");
165148
for await (const chunk of stream) {
166-
const chunkStr = "data: " + JSON.stringify(chunk) + "\n\n";
167-
response.write(chunkStr);
149+
response.write(new TextDecoder().decode(chunk));
168150
}
169-
response.write("data: [DONE]\n\n");
151+
152+
response.end(createDoneEvent().toString());
170153
console.timeEnd("streaming");
171-
response.end();
172154
} catch (err) {
173155
console.error(err);
174156
response.statusCode = 500
@@ -180,12 +162,12 @@ const port = process.env.PORT || "3000"
180162
server.listen(port);
181163
console.log(`Server running at http://localhost:${port}`);
182164

183-
function getBody(request: IncomingMessage): Promise<string> {
165+
function getBody(request: any): Promise<string> {
184166
return new Promise((resolve) => {
185167
const bodyParts: any[] = [];
186168
let body;
187169
request
188-
.on("data", (chunk) => {
170+
.on("data", (chunk: Buffer) => {
189171
bodyParts.push(chunk);
190172
})
191173
.on("end", () => {

src/models-api.ts

-10
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
import OpenAI from "openai";
2-
31
// Model is the structure of a model in the model catalog.
42
export interface Model {
53
id: string;
@@ -33,16 +31,8 @@ export type ModelSchemaParameter = {
3331
};
3432

3533
export class ModelsAPI {
36-
inference: OpenAI;
3734
private _models: Model[] | null = null;
3835

39-
constructor(apiKey: string) {
40-
this.inference = new OpenAI({
41-
baseURL: "https://models.inference.ai.azure.com",
42-
apiKey,
43-
});
44-
}
45-
4636
async getModel(modelName: string): Promise<Model> {
4737
const modelRes = await fetch(
4838
"https://modelcatalog.azure-api.net/v1/model/" + modelName

0 commit comments

Comments
 (0)