1
- import { createServer , IncomingMessage } from "node:http" ;
1
+ import { createServer } from "node:http" ;
2
2
3
- import { verifyAndParseRequest , createAckEvent } from "@copilot-extensions/preview-sdk" ;
4
- import OpenAI from "openai" ;
3
+ import { prompt , getFunctionCalls , createAckEvent , createDoneEvent , verifyAndParseRequest , createTextEvent } from "@copilot-extensions/preview-sdk" ;
5
4
6
5
import { describeModel } from "./functions/describe-model.js" ;
7
6
import { executeModel } from "./functions/execute-model.js" ;
@@ -12,6 +11,7 @@ import { ModelsAPI } from "./models-api.js";
12
11
13
12
const server = createServer ( async ( request , response ) => {
14
13
if ( request . method === "GET" ) {
14
+ // health check
15
15
response . statusCode = 200 ;
16
16
response . end ( `OK` ) ;
17
17
return ;
@@ -55,15 +55,9 @@ const server = createServer(async (request, response) => {
55
55
response . write ( createAckEvent ( ) . toString ( ) ) ;
56
56
57
57
// List of functions that are available to be called
58
- const modelsAPI = new ModelsAPI ( apiKey ) ;
58
+ const modelsAPI = new ModelsAPI ( ) ;
59
59
const functions = [ listModels , describeModel , executeModel , recommendModel ] ;
60
60
61
- // Use the Copilot API to determine which function to execute
62
- const capiClient = new OpenAI ( {
63
- baseURL : "https://api.githubcopilot.com" ,
64
- apiKey,
65
- } ) ;
66
-
67
61
// Prepend a system message that includes the list of models, so that
68
62
// tool calls can better select the right model to use.
69
63
const models = await modelsAPI . listModels ( ) ;
@@ -91,57 +85,48 @@ const server = createServer(async (request, response) => {
91
85
] . concat ( payload . messages ) ;
92
86
93
87
console . time ( "tool-call" ) ;
94
- const toolCaller = await capiClient . chat . completions . create ( {
95
- stream : false ,
96
- model : "gpt-4" ,
97
- // @ts -expect-error - TODO @gr2m - type incompatibility between @openai/api and @copilot-extensions/preview-sdk
88
+ const promptResult = await prompt ( {
98
89
messages : toolCallMessages ,
99
- tool_choice : "auto" ,
90
+ token : apiKey ,
100
91
tools : functions . map ( ( f ) => f . tool ) ,
101
- } ) ;
92
+ } )
102
93
console . timeEnd ( "tool-call" ) ;
103
94
95
+ const [ functionToCall ] = getFunctionCalls ( promptResult )
96
+
104
97
if (
105
- ! toolCaller . choices [ 0 ] ||
106
- ! toolCaller . choices [ 0 ] . message ||
107
- ! toolCaller . choices [ 0 ] . message . tool_calls ||
108
- ! toolCaller . choices [ 0 ] . message . tool_calls [ 0 ] . function
98
+ ! functionToCall
109
99
) {
110
100
console . log ( "No tool call found" ) ;
111
- // No tool to call, so just call the model with the original messages
112
- const stream = await capiClient . chat . completions . create ( {
113
- stream : true ,
114
- model : "gpt-4" ,
115
- // @ts -expect-error - TODO @gr2m - type incompatibility between @openai/api and @copilot-extensions/preview-sdk
101
+
102
+ const { stream } = await prompt . stream ( {
116
103
messages : payload . messages ,
117
- } ) ;
104
+ token : apiKey ,
105
+ } )
118
106
119
107
for await ( const chunk of stream ) {
120
- const chunkStr = "data: " + JSON . stringify ( chunk ) + "\n\n" ;
121
- response . write ( chunkStr ) ;
108
+ response . write ( new TextDecoder ( ) . decode ( chunk ) ) ;
122
109
}
123
- response . write ( "data: [DONE]\n\n" ) ;
124
- response . end ( ) ;
110
+
111
+ response . end ( createDoneEvent ( ) . toString ( ) ) ;
125
112
return ;
126
113
}
127
114
128
- const functionToCall = toolCaller . choices [ 0 ] . message . tool_calls [ 0 ] . function ;
129
- const args = JSON . parse ( functionToCall . arguments ) ;
115
+ const args = JSON . parse ( functionToCall . function . arguments ) ;
130
116
131
117
console . time ( "function-exec" ) ;
132
118
let functionCallRes : RunnerResponse ;
133
119
try {
134
- console . log ( "Executing function" , functionToCall . name ) ;
120
+ console . log ( "Executing function" , functionToCall . function . name ) ;
135
121
const funcClass = functions . find (
136
- ( f ) => f . definition . name === functionToCall . name
122
+ ( f ) => f . definition . name === functionToCall . function . name
137
123
) ;
138
124
if ( ! funcClass ) {
139
125
throw new Error ( "Unknown function" ) ;
140
126
}
141
127
142
128
console . log ( "\t with args" , args ) ;
143
129
const func = new funcClass ( modelsAPI ) ;
144
- // @ts -expect-error - TODO @gr2m - type incompatibility between @openai/api and @copilot-extensions/preview-sdk
145
130
functionCallRes = await func . execute ( payload . messages , args ) ;
146
131
} catch ( err ) {
147
132
console . error ( err ) ;
@@ -152,23 +137,20 @@ const server = createServer(async (request, response) => {
152
137
console . timeEnd ( "function-exec" ) ;
153
138
154
139
try {
155
- const stream = await modelsAPI . inference . chat . completions . create ( {
140
+ console . time ( "streaming" ) ;
141
+ const { stream } = await prompt . stream ( {
142
+ endpoint : 'https://models.inference.ai.azure.com/chat/completions' ,
156
143
model : functionCallRes . model ,
157
144
messages : functionCallRes . messages ,
158
- stream : true ,
159
- stream_options : {
160
- include_usage : false ,
161
- } ,
162
- } ) ;
145
+ token : apiKey ,
146
+ } )
163
147
164
- console . time ( "streaming" ) ;
165
148
for await ( const chunk of stream ) {
166
- const chunkStr = "data: " + JSON . stringify ( chunk ) + "\n\n" ;
167
- response . write ( chunkStr ) ;
149
+ response . write ( new TextDecoder ( ) . decode ( chunk ) ) ;
168
150
}
169
- response . write ( "data: [DONE]\n\n" ) ;
151
+
152
+ response . end ( createDoneEvent ( ) . toString ( ) ) ;
170
153
console . timeEnd ( "streaming" ) ;
171
- response . end ( ) ;
172
154
} catch ( err ) {
173
155
console . error ( err ) ;
174
156
response . statusCode = 500
@@ -180,12 +162,12 @@ const port = process.env.PORT || "3000"
180
162
server . listen ( port ) ;
181
163
console . log ( `Server running at http://localhost:${ port } ` ) ;
182
164
183
- function getBody ( request : IncomingMessage ) : Promise < string > {
165
+ function getBody ( request : any ) : Promise < string > {
184
166
return new Promise ( ( resolve ) => {
185
167
const bodyParts : any [ ] = [ ] ;
186
168
let body ;
187
169
request
188
- . on ( "data" , ( chunk ) => {
170
+ . on ( "data" , ( chunk : Buffer ) => {
189
171
bodyParts . push ( chunk ) ;
190
172
} )
191
173
. on ( "end" , ( ) => {
0 commit comments