Skip to content

Commit bee45e7

Browse files
committed
bring back openai
1 parent 6a5563f commit bee45e7

9 files changed

+519
-44
lines changed

package-lock.json

+461-4
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

package.json

+2-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,8 @@
1212
"license": "ISC",
1313
"description": "",
1414
"dependencies": {
15-
"@copilot-extensions/preview-sdk": "^3.0.0"
15+
"@copilot-extensions/preview-sdk": "^3.0.0",
16+
"openai": "^4.55.0"
1617
},
1718
"devDependencies": {
1819
"@types/express": "^4.17.21",

src/functions.ts

+6-7
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
1-
import type { PromptFunction, InteropMessage } from "@copilot-extensions/preview-sdk";
2-
1+
import OpenAI from "openai";
32
import { ModelsAPI } from "./models-api.js";
43

54
// defaultModel is the model used for internal calls - for tool calling,
@@ -9,26 +8,26 @@ export const defaultModel = "gpt-4o-mini";
98
// RunnerResponse is the response from a function call.
109
export interface RunnerResponse {
1110
model: string;
12-
messages: InteropMessage[];
11+
messages: OpenAI.ChatCompletionMessageParam[];
1312
}
1413

1514
export abstract class Tool {
1615
modelsAPI: ModelsAPI;
17-
static definition: PromptFunction["function"];
16+
static definition: OpenAI.FunctionDefinition;
1817

1918
constructor(modelsAPI: ModelsAPI) {
2019
this.modelsAPI = modelsAPI;
2120
}
2221

23-
static get tool(): PromptFunction {
22+
static get tool(): OpenAI.Chat.Completions.ChatCompletionTool {
2423
return {
2524
type: "function",
2625
function: this.definition,
2726
};
2827
}
2928

3029
abstract execute(
31-
messages: InteropMessage[],
32-
args: Record<string, unknown>
30+
messages: OpenAI.ChatCompletionMessageParam[],
31+
args: object
3332
): Promise<RunnerResponse>;
3433
}

src/functions/describe-model.ts

+2-3
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
1-
import type { InteropMessage } from "@copilot-extensions/preview-sdk";
2-
1+
import OpenAI from "openai";
32
import { RunnerResponse, defaultModel, Tool } from "../functions.js";
43

54
export class describeModel extends Tool {
@@ -20,7 +19,7 @@ export class describeModel extends Tool {
2019
};
2120

2221
async execute(
23-
messages: InteropMessage[],
22+
messages: OpenAI.ChatCompletionMessageParam[],
2423
args: { model: string }
2524
): Promise<RunnerResponse> {
2625
const [model, modelSchema] = await Promise.all([

src/functions/execute-model.ts

+2-3
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,7 @@
1-
import type { InteropMessage } from "@copilot-extensions/preview-sdk";
2-
1+
import OpenAI from "openai";
32
import { RunnerResponse, Tool } from "../functions.js";
43

5-
type MessageWithReferences = InteropMessage & {
4+
type MessageWithReferences = OpenAI.ChatCompletionMessageParam & {
65
copilot_references: Reference[];
76
};
87

src/functions/list-models.ts

+2-3
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
1-
import type { InteropMessage } from "@copilot-extensions/preview-sdk";
2-
1+
import OpenAI from "openai";
32
import { RunnerResponse, defaultModel, Tool } from "../functions.js";
43

54
export class listModels extends Tool {
@@ -16,7 +15,7 @@ export class listModels extends Tool {
1615
};
1716

1817
async execute(
19-
messages: InteropMessage[]
18+
messages: OpenAI.ChatCompletionMessageParam[]
2019
): Promise<RunnerResponse> {
2120
const models = await this.modelsAPI.listModels();
2221

src/functions/recommend-model.ts

+2-3
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
1-
import type { InteropMessage } from "@copilot-extensions/preview-sdk";
2-
1+
import OpenAI from "openai";
32
import { RunnerResponse, defaultModel, Tool } from "../functions.js";
43

54
export class recommendModel extends Tool {
@@ -16,7 +15,7 @@ export class recommendModel extends Tool {
1615
};
1716

1817
async execute(
19-
messages: InteropMessage[]
18+
messages: OpenAI.ChatCompletionMessageParam[]
2019
): Promise<RunnerResponse> {
2120
const models = await this.modelsAPI.listModels();
2221

src/index.ts

+32-20
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
1-
import { createServer } from "node:http";
1+
import { createServer, type IncomingMessage } from "node:http";
22

3-
import { prompt, getFunctionCalls, createDoneEvent, verifyAndParseRequest } from "@copilot-extensions/preview-sdk";
3+
import { getFunctionCalls, createDoneEvent, verifyAndParseRequest } from "@copilot-extensions/preview-sdk";
4+
import OpenAI from "openai";
45

56
import { describeModel } from "./functions/describe-model.js";
67
import { executeModel } from "./functions/execute-model.js";
@@ -52,9 +53,15 @@ const server = createServer(async (request, response) => {
5253
}
5354

5455
// List of functions that are available to be called
55-
const modelsAPI = new ModelsAPI();
56+
const modelsAPI = new ModelsAPI(apiKey);
5657
const functions = [listModels, describeModel, executeModel, recommendModel];
5758

59+
// Use the Copilot API to determine which function to execute
60+
const capiClient = new OpenAI({
61+
baseURL: "https://api.githubcopilot.com",
62+
apiKey,
63+
});
64+
5865
// Prepend a system message that includes the list of models, so that
5966
// tool calls can better select the right model to use.
6067
const models = await modelsAPI.listModels();
@@ -82,27 +89,29 @@ const server = createServer(async (request, response) => {
8289
].concat(payload.messages);
8390

8491
console.time("tool-call");
85-
const promptResult = await prompt({
92+
const toolCaller = await capiClient.chat.completions.create({
8693
messages: toolCallMessages,
87-
token: apiKey,
94+
stream: false,
95+
model: "gpt-4",
8896
tools: functions.map((f) => f.tool),
8997
})
9098
console.timeEnd("tool-call");
9199

92-
const [functionToCall] = getFunctionCalls(promptResult)
100+
const [functionToCall] = getFunctionCalls(toolCaller)
93101

94102
if (
95103
!functionToCall
96104
) {
97105
console.log("No tool call found");
98-
99-
const { stream } = await prompt.stream({
100-
messages: payload.messages,
101-
token: apiKey,
102-
})
106+
// No tool to call, so just call the model with the original messages
107+
const stream = await capiClient.chat.completions.create({
108+
stream: true,
109+
model: "gpt-4",
110+
});
103111

104112
for await (const chunk of stream) {
105-
response.write(new TextDecoder().decode(chunk));
113+
const chunkStr = "data: " + JSON.stringify(chunk) + "\n\n";
114+
response.write(chunkStr);
106115
}
107116

108117
response.end(createDoneEvent().toString());
@@ -134,16 +143,19 @@ const server = createServer(async (request, response) => {
134143
console.timeEnd("function-exec");
135144

136145
try {
137-
console.time("streaming");
138-
const { stream } = await prompt.stream({
139-
endpoint: 'https://models.inference.ai.azure.com/chat/completions',
146+
const stream = await modelsAPI.inference.chat.completions.create({
140147
model: functionCallRes.model,
141148
messages: functionCallRes.messages,
142-
token: apiKey,
143-
})
149+
stream: true,
150+
stream_options: {
151+
include_usage: false,
152+
},
153+
});
144154

155+
console.time("streaming");
145156
for await (const chunk of stream) {
146-
response.write(new TextDecoder().decode(chunk));
157+
const chunkStr = "data: " + JSON.stringify(chunk) + "\n\n";
158+
response.write(chunkStr);
147159
}
148160

149161
response.end(createDoneEvent().toString());
@@ -159,9 +171,9 @@ const port = process.env.PORT || "3000"
159171
server.listen(port);
160172
console.log(`Server running at http://localhost:${port}`);
161173

162-
function getBody(request: any): Promise<string> {
174+
function getBody(request: IncomingMessage): Promise<string> {
163175
return new Promise((resolve) => {
164-
const bodyParts: any[] = [];
176+
const bodyParts: Buffer[] = [];
165177
let body;
166178
request
167179
.on("data", (chunk: Buffer) => {

src/models-api.ts

+10
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
import OpenAI from "openai";
2+
13
// Model is the structure of a model in the model catalog.
24
export interface Model {
35
id: string;
@@ -31,8 +33,16 @@ export type ModelSchemaParameter = {
3133
};
3234

3335
export class ModelsAPI {
36+
inference: OpenAI;
3437
private _models: Model[] | null = null;
3538

39+
constructor(apiKey: string) {
40+
this.inference = new OpenAI({
41+
baseURL: "https://models.inference.ai.azure.com",
42+
apiKey,
43+
});
44+
}
45+
3646
async getModel(modelName: string): Promise<Model> {
3747
const modelRes = await fetch(
3848
"https://modelcatalog.azure-api.net/v1/model/" + modelName

0 commit comments

Comments
 (0)