Skip to content

Commit 24a0a06

Browse files
committed
feat: use latest @copilot-extensions/preview-sdk and all its goodies
1 parent 485c77a commit 24a0a06

7 files changed

+49
-71
lines changed

src/functions.ts

+7-6
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1-
import OpenAI from "openai";
1+
import type { PromptFunction, InteropMessage } from "@copilot-extensions/preview-sdk";
2+
23
import { ModelsAPI } from "./models-api.js";
34

45
// defaultModel is the model used for internal calls - for tool calling,
@@ -8,26 +9,26 @@ export const defaultModel = "gpt-4o-mini";
89
// RunnerResponse is the response from a function call.
910
export interface RunnerResponse {
1011
model: string;
11-
messages: OpenAI.ChatCompletionMessageParam[];
12+
messages: InteropMessage[];
1213
}
1314

1415
export abstract class Tool {
1516
modelsAPI: ModelsAPI;
16-
static definition: OpenAI.FunctionDefinition;
17+
static definition: PromptFunction["function"];
1718

1819
constructor(modelsAPI: ModelsAPI) {
1920
this.modelsAPI = modelsAPI;
2021
}
2122

22-
static get tool(): OpenAI.Chat.Completions.ChatCompletionTool {
23+
static get tool(): PromptFunction {
2324
return {
2425
type: "function",
2526
function: this.definition,
2627
};
2728
}
2829

2930
abstract execute(
30-
messages: OpenAI.ChatCompletionMessageParam[],
31-
args: object
31+
messages: InteropMessage[],
32+
args: Record<string, unknown>
3233
): Promise<RunnerResponse>;
3334
}

src/functions/describe-model.ts

+3-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1-
import OpenAI from "openai";
1+
import type { InteropMessage } from "@copilot-extensions/preview-sdk";
2+
23
import { RunnerResponse, defaultModel, Tool } from "../functions.js";
34

45
export class describeModel extends Tool {
@@ -19,7 +20,7 @@ export class describeModel extends Tool {
1920
};
2021

2122
async execute(
22-
messages: OpenAI.ChatCompletionMessageParam[],
23+
messages: InteropMessage[],
2324
args: { model: string }
2425
): Promise<RunnerResponse> {
2526
const [model, modelSchema] = await Promise.all([

src/functions/execute-model.ts

+3-2
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
1-
import OpenAI from "openai";
1+
import type { InteropMessage } from "@copilot-extensions/preview-sdk";
2+
23
import { RunnerResponse, Tool } from "../functions.js";
34

4-
type MessageWithReferences = OpenAI.ChatCompletionMessageParam & {
5+
type MessageWithReferences = InteropMessage & {
56
copilot_references: Reference[];
67
};
78

src/functions/list-models.ts

+3-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1-
import OpenAI from "openai";
1+
import type { InteropMessage } from "@copilot-extensions/preview-sdk";
2+
23
import { RunnerResponse, defaultModel, Tool } from "../functions.js";
34

45
export class listModels extends Tool {
@@ -15,7 +16,7 @@ export class listModels extends Tool {
1516
};
1617

1718
async execute(
18-
messages: OpenAI.ChatCompletionMessageParam[]
19+
messages: InteropMessage[]
1920
): Promise<RunnerResponse> {
2021
const models = await this.modelsAPI.listModels();
2122

src/functions/recommend-model.ts

+3-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1-
import OpenAI from "openai";
1+
import type { InteropMessage } from "@copilot-extensions/preview-sdk";
2+
23
import { RunnerResponse, defaultModel, Tool } from "../functions.js";
34

45
export class recommendModel extends Tool {
@@ -15,7 +16,7 @@ export class recommendModel extends Tool {
1516
};
1617

1718
async execute(
18-
messages: OpenAI.ChatCompletionMessageParam[]
19+
messages: InteropMessage[]
1920
): Promise<RunnerResponse> {
2021
const models = await this.modelsAPI.listModels();
2122

src/index.ts

+30-47
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
1-
import { createServer, IncomingMessage } from "node:http";
1+
import { createServer } from "node:http";
22

3-
import { verifyAndParseRequest, createAckEvent } from "@copilot-extensions/preview-sdk";
4-
import OpenAI from "openai";
3+
import { prompt, getFunctionCalls, createAckEvent, createDoneEvent, verifyAndParseRequest, createTextEvent } from "@copilot-extensions/preview-sdk";
54

65
import { describeModel } from "./functions/describe-model.js";
76
import { executeModel } from "./functions/execute-model.js";
@@ -12,6 +11,7 @@ import { ModelsAPI } from "./models-api.js";
1211

1312
const server = createServer(async (request, response) => {
1413
if (request.method === "GET") {
14+
// health check
1515
response.statusCode = 200;
1616
response.end(`OK`);
1717
return;
@@ -54,15 +54,9 @@ const server = createServer(async (request, response) => {
5454
response.write(createAckEvent().toString());
5555

5656
// List of functions that are available to be called
57-
const modelsAPI = new ModelsAPI(apiKey);
57+
const modelsAPI = new ModelsAPI();
5858
const functions = [listModels, describeModel, executeModel, recommendModel];
5959

60-
// Use the Copilot API to determine which function to execute
61-
const capiClient = new OpenAI({
62-
baseURL: "https://api.githubcopilot.com",
63-
apiKey,
64-
});
65-
6660
// Prepend a system message that includes the list of models, so that
6761
// tool calls can better select the right model to use.
6862
const models = await modelsAPI.listModels();
@@ -90,56 +84,48 @@ const server = createServer(async (request, response) => {
9084
].concat(payload.messages);
9185

9286
console.time("tool-call");
93-
const toolCaller = await capiClient.chat.completions.create({
94-
stream: false,
95-
model: "gpt-4",
87+
const promptResult = await prompt({
9688
messages: toolCallMessages,
97-
tool_choice: "auto",
89+
token: apiKey,
9890
tools: functions.map((f) => f.tool),
99-
});
91+
})
10092
console.timeEnd("tool-call");
10193

94+
const [functionToCall] = getFunctionCalls(promptResult)
95+
10296
if (
103-
!toolCaller.choices[0] ||
104-
!toolCaller.choices[0].message ||
105-
!toolCaller.choices[0].message.tool_calls ||
106-
!toolCaller.choices[0].message.tool_calls[0].function
97+
!functionToCall
10798
) {
10899
console.log("No tool call found");
109-
// No tool to call, so just call the model with the original messages
110-
const stream = await capiClient.chat.completions.create({
111-
stream: true,
112-
model: "gpt-4",
113-
// @ts-expect-error - TODO @gr2m - type incompatibility between @openai/api and @copilot-extensions/preview-sdk
100+
101+
const { stream } = await prompt.stream({
114102
messages: payload.messages,
115-
});
103+
token: apiKey,
104+
})
116105

117106
for await (const chunk of stream) {
118-
const chunkStr = "data: " + JSON.stringify(chunk) + "\n\n";
119-
response.write(chunkStr);
107+
response.write(new TextDecoder().decode(chunk));
120108
}
121-
response.write("data: [DONE]\n\n");
122-
response.end();
109+
110+
response.end(createDoneEvent().toString());
123111
return;
124112
}
125113

126-
const functionToCall = toolCaller.choices[0].message.tool_calls[0].function;
127-
const args = JSON.parse(functionToCall.arguments);
114+
const args = JSON.parse(functionToCall.function.arguments);
128115

129116
console.time("function-exec");
130117
let functionCallRes: RunnerResponse;
131118
try {
132-
console.log("Executing function", functionToCall.name);
119+
console.log("Executing function", functionToCall.function.name);
133120
const funcClass = functions.find(
134-
(f) => f.definition.name === functionToCall.name
121+
(f) => f.definition.name === functionToCall.function.name
135122
);
136123
if (!funcClass) {
137124
throw new Error("Unknown function");
138125
}
139126

140127
console.log("\t with args", args);
141128
const func = new funcClass(modelsAPI);
142-
// @ts-expect-error - TODO @gr2m - type incompatibility between @openai/api and @copilot-extensions/preview-sdk
143129
functionCallRes = await func.execute(payload.messages, args);
144130
} catch (err) {
145131
console.error(err);
@@ -150,23 +136,20 @@ const server = createServer(async (request, response) => {
150136
console.timeEnd("function-exec");
151137

152138
try {
153-
const stream = await modelsAPI.inference.chat.completions.create({
139+
console.time("streaming");
140+
const { stream } = await prompt.stream({
141+
endpoint: 'https://models.inference.ai.azure.com/chat/completions',
154142
model: functionCallRes.model,
155143
messages: functionCallRes.messages,
156-
stream: true,
157-
stream_options: {
158-
include_usage: false,
159-
},
160-
});
144+
token: apiKey,
145+
})
161146

162-
console.time("streaming");
163147
for await (const chunk of stream) {
164-
const chunkStr = "data: " + JSON.stringify(chunk) + "\n\n";
165-
response.write(chunkStr);
148+
response.write(new TextDecoder().decode(chunk));
166149
}
167-
response.write("data: [DONE]\n\n");
150+
151+
response.end(createDoneEvent().toString());
168152
console.timeEnd("streaming");
169-
response.end();
170153
} catch (err) {
171154
console.error(err);
172155
response.statusCode = 500
@@ -178,12 +161,12 @@ const port = process.env.PORT || "3000"
178161
server.listen(port);
179162
console.log(`Server running at http://localhost:${port}`);
180163

181-
function getBody(request: IncomingMessage): Promise<string> {
164+
function getBody(request: any): Promise<string> {
182165
return new Promise((resolve) => {
183166
const bodyParts: any[] = [];
184167
let body;
185168
request
186-
.on("data", (chunk) => {
169+
.on("data", (chunk: Buffer) => {
187170
bodyParts.push(chunk);
188171
})
189172
.on("end", () => {

src/models-api.ts

-10
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
import OpenAI from "openai";
2-
31
// Model is the structure of a model in the model catalog.
42
export interface Model {
53
id: string;
@@ -33,16 +31,8 @@ export type ModelSchemaParameter = {
3331
};
3432

3533
export class ModelsAPI {
36-
inference: OpenAI;
3734
private _models: Model[] | null = null;
3835

39-
constructor(apiKey: string) {
40-
this.inference = new OpenAI({
41-
baseURL: "https://models.inference.ai.azure.com",
42-
apiKey,
43-
});
44-
}
45-
4636
async getModel(modelName: string): Promise<Model> {
4737
const modelRes = await fetch(
4838
"https://modelcatalog.azure-api.net/v1/model/" + modelName

0 commit comments

Comments
 (0)