-
Notifications
You must be signed in to change notification settings - Fork 1.1k
/
Copy pathInferenceServerManager.ts
535 lines (459 loc) · 17.3 KB
/
InferenceServerManager.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
import path, { resolve } from 'path';
const { spawn } = require('child_process'); // eslint-disable-line
import fs from 'fs';
import axios from "axios";
import { EventEmitter } from 'events';
import { ICompletions, IModel, IParams, InsertionParams,
CompletionParams, GenerationParams, ModelType, AIRequestType,
IStreamResponse, ChatHistory, downloadLatestReleaseExecutable,
buildSolgptPrompt } from "@remix/remix-ai-core"
import { platform } from 'os';
class ServerStatusTimer {
private intervalId: NodeJS.Timeout | null = null;
public interval: number;
private task: () => void;
constructor(task: () => void, interval: number) {
this.task = task;
this.interval = interval;
}
start(): void {
if (this.intervalId === null) {
this.intervalId = setInterval(() => {
this.task();
}, this.interval);
}
}
stop(): void {
if (this.intervalId !== null) {
clearInterval(this.intervalId);
this.intervalId = null;
}
}
isRunning(): boolean {
return this.intervalId !== null;
}
}
export class InferenceManager implements ICompletions {
isReady: boolean = false
selectedModels: IModel[] = []
event: EventEmitter
modelCacheDir: string = undefined
serverCacheDir: string = undefined
private inferenceProcess: any=null
port = 5501
inferenceURL = 'http://127.0.0.1:' + this.port
private static instance=null
stateTimer: ServerStatusTimer
private constructor(modelDir:string) {
this.event = new EventEmitter()
this.modelCacheDir = path.join(modelDir, 'models')
this.serverCacheDir = path.join(modelDir, 'inferenceServer')
this.stateTimer= new ServerStatusTimer(() => { this._processStatus()}, 20000)
}
static getInstance(modelDir:string){
if (!InferenceManager.instance) {
// check if there is a process already running
if (!modelDir) {
console.error('model directory is required to create InferenceManager instance')
return null
}
console.log('Creating new InferenceManager instance')
InferenceManager.instance = new InferenceManager(modelDir)
}
return InferenceManager.instance
}
// init the backend with a new model
async init(model:IModel) {
try {
await this._downloadModel(model)
if (model.downloadPath === undefined) {
console.log('Model not downloaded or not found')
return
}
console.log('Model downloaded at', model.downloadPath)
if (this.inferenceProcess === null) await this._startServer()
// check if resources are met before initializing the models
this._handleResources(true)
console.log('Initializing model request', model.modelType)
switch (model.modelType) {
case ModelType.CODE_COMPLETION_INSERTION || ModelType.CODE_COMPLETION:{
console.log('Initializing Completion Model')
const res = await this._makeRequest('init_completion', { model_path: model.downloadPath })
console.log('code completion res is', res?.data?.status)
if (res?.data?.status === "success") {
this.isReady = true
console.log('Completion Model initialized successfully')
} else {
this.isReady = false
console.error('Error initializing the model', res.data?.error)
}
break;
}
case ModelType.GENERAL:{
const res = await this._makeRequest('init', { model_path: model.downloadPath })
if (res.data?.status === "success") {
this.isReady = true
console.log('General Model initialized successfully')
} else {
this.isReady = false
console.error('Error initializing the model', res.data?.error)
}
break;
}
}
this.stateTimer.start() // double call on init completion and general
this.selectedModels.push(model)
} catch (error) {
console.error('Error initializing the model', error)
this.isReady = false
InferenceManager.instance = null
}
}
async _processStatus() {
// check if the server is running
const options = { headers: { 'Content-Type': 'application/json', } }
const state = await axios.get(this.inferenceURL+"/state", options)
if (!state.data?.status) {
console.log('Inference server not running')
InferenceManager.instance = null
this.stateTimer.interval += this.stateTimer.interval
if (this.stateTimer.interval >= 60000) {
// attempt to restart the server
console.log('Attempting to restart the server')
this.stopInferenceServer()
this._startServer()
this.stateTimer.interval = 20000
}
} else {
// Server is running with successful request
// console.log('Inference server is running')
// console.log('completion is running', state.data?.completion)
// console.log('general is running', state.data?.general)
}
// this._handleResources()
}
async _handleResources(logger:boolean=false) {
// check resource usage
const options = { headers: { 'Content-Type': 'application/json', } }
const res = await axios.get(this.inferenceURL+"/sys", options)
if (res.data?.status) {
const max_memory = res.data.memory.total
const used_memory = res.data.memory.used
const memory_usage = res.data.memory.percent * 100
const gpu_available = res.data.gpus
for (const model of this.selectedModels) {
if (model.modelReqs.minSysMemory > max_memory) {
if (logger) console.warn('Insufficient memory for the model')
}
if (model.modelReqs.minSysMemory > used_memory) {
if (logger) console.warn('Insufficient memory for the model')
}
if (model.modelReqs.GPURequired) {
if (gpu_available.length < 1) {
if (logger)console.warn('GPU required for desktop inference but not available')
}
}
}
}
}
async _downloadModel(model:IModel): Promise<string> {
if (this.modelCacheDir === undefined) {
console.log('Model cache directory not provided')
return
} else {
const outputLocationPath = path.join(this.modelCacheDir, model.modelName);
console.log('output location path is', outputLocationPath)
if (fs.existsSync(outputLocationPath)) {
model.downloadPath = outputLocationPath
console.log('Model already exists in the output location', outputLocationPath);
return;
}
console.log('Downloading model from', model.downloadUrl);
// Make a HEAD request to get the file size
const { headers } = await axios.head(model.downloadUrl);
const totalSize = parseInt(headers['content-length'], 10);
// Create a write stream to save the file
const writer = fs.createWriteStream(outputLocationPath);
// Start the file download
const response = await axios({
method: 'get',
url: model.downloadUrl,
responseType: 'stream'
});
let downloadedSize = 0;
response.data.on('data', (chunk: Buffer) => {
downloadedSize += chunk.length;
const progress = (Number((downloadedSize / totalSize) * 100).toFixed(2));
console.log(`Downloaded ${progress}%`);
this.event.emit('download', progress);
});
response.data.pipe(writer);
this.event.emit('ready')
model.downloadPath = outputLocationPath
console.log('LLama Download complete');
return new Promise((resolve, reject) => {
writer.on('finish', resolve);
writer.on('error', reject);
});
}
}
private async _downloadInferenceServer() {
const execPath = this._getServerPath()
try {
if (fs.existsSync(execPath)) {
console.log('Inference server already downloaded')
return true
} else {
downloadLatestReleaseExecutable(process.platform, this.serverCacheDir)
if (fs.existsSync(execPath)) {return true } else {return false}
}
} catch (error) {
console.error('Error downloading Inference server:', error)
return false
}
}
private _getServerPath() {
// get cpu arch
const arch = process.arch
let exec_suffix = ''
if (arch === 'x64') {
exec_suffix = 'x64'
} else if (arch === 'arm' || arch === 'arm64') {
exec_suffix = 'arm'
} else {
throw new Error('Unsupported CPU architecture')
}
// get platform name and return the path to the python script
let exec_name = ''
if (process.platform === 'win32') {
exec_name = 'InferenceServer-' + process.platform + '.exe'
} else if (process.platform === 'linux') {
exec_name = 'InferenceServer-' + process.platform + '_' + exec_suffix
} else if (process.platform === 'darwin') {
exec_name = 'InferenceServer-' + 'mac'
} else {
throw new Error('Unsupported platform')
}
return path.join(this.serverCacheDir, exec_name);
}
private async _handleExistingServer() {
// check if the server is already running, kill it
try {
const options = { headers: { 'Content-Type': 'application/json', } }
const state = await axios.get(this.inferenceURL+"/state", options)
if (state.data?.status) {
console.log('Found existing Inference server running')
this.stopInferenceServer()
await axios.post(this.inferenceURL+"/kill", options)
}
} catch (error) {
// catch connection refused
console.log('No existing Inference server running')
}
}
private async _startServer() {
const serverAvailable = await this._downloadInferenceServer()
if (!serverAvailable) {
console.error('Inference server not available for this platform')
return
}
// kill existing server if running
this._handleExistingServer()
return new Promise<void>((resolve, reject) => {
let serverPath = ""
try {
serverPath = this._getServerPath();
fs.chmodSync(serverPath, '755')
} catch (error) {
console.error('Error script path:', error);
return reject(error)
}
// Check if the file exists
if (!fs.existsSync(serverPath)) {
return reject(new Error(`Inference server not found at ${serverPath}`));
}
// Check file permissions
try {
fs.accessSync(serverPath, fs.constants.X_OK);
} catch (err) {
reject(new Error(`No execute permission on ${serverPath}`));
}
const spawnArgs = [this.port];
// console.log(`Spawning process: ${serverPath} ${spawnArgs.join(' ')}`);
this.inferenceProcess = spawn(serverPath, spawnArgs);
this.inferenceProcess.stdout.on('data', (data) => {
console.log(`Inference server output: ${data}`);
if (data.includes('Running on http://')) {
console.log('Inference server started successfully');
resolve();
}
});
this.inferenceProcess.stderr.on('data', (data) => {
console.error(`Inference log: ${data}`);
if (data.includes('Address already in use')) {
console.error(`Port ${this.port} is already in use. Please stop the existing server and try again`);
reject(new Error(`Port ${this.port} is already in use`));
}
resolve();
});
this.inferenceProcess.on('error', (err) => {
console.error('Failed to start Inference server:', err);
reject(err);
});
this.inferenceProcess.on('close', (code) => {
console.log(`Inference server process exited with code ${code}`);
if (code !== 0) {
reject(new Error(`Inference server exited with code ${code}`));
}
});
});
}
stopInferenceServer() {
if (this.inferenceProcess) {
this.inferenceProcess.kill();
this.inferenceProcess = null;
}
}
private async _makeInferenceRequest(endpoint, payload, rType:AIRequestType){
try {
this.event.emit('onInference')
const options = { headers: { 'Content-Type': 'application/json', } }
const response = await axios.post(`${this.inferenceURL}/${endpoint}`, payload, options)
const userPrompt = payload[Object.keys(payload)[0]]
this.event.emit('onInferenceDone')
if (response.data?.generatedText) {
if (rType === AIRequestType.GENERAL) {
ChatHistory.pushHistory(userPrompt, response.data.generatedText)
}
return response.data.generatedText
} else { return "" }
} catch (error) {
ChatHistory.clearHistory()
console.error('Error making request to Inference server:', error.message);
}
}
private async _streamInferenceRequest(endpoint, payload){
try {
this.event.emit('onInference')
const options = { headers: { 'Content-Type': 'application/json', } }
const response = await axios({
method: 'post',
url: `${this.inferenceURL}/${endpoint}`,
data: payload,
headers: {
"Content-Type": "application/json",
"Accept": "text/event-stream",
}
, responseType: 'stream' });
const userPrompt = payload.prompt
let resultText = ""
if (payload.return_stream_response) {
return response
}
response.data.on('data', (chunk: Buffer) => {
try {
const parsedData = JSON.parse(chunk.toString());
if (parsedData.isGenerating) {
this.event.emit('onStreamResult', parsedData.generatedText);
resultText = resultText + parsedData.generatedText
} else {
resultText = resultText + parsedData.generatedText
// no additional check for streamed results
ChatHistory.pushHistory(userPrompt, resultText)
return parsedData.generatedText
}
} catch (error) {
ChatHistory.clearHistory()
console.error('Error parsing JSON:', error);
}
});
return "" // return empty string for now as payload is/will be handled in event
} catch (error) {
ChatHistory.clearHistory()
console.error('Error making stream request to Inference server:', error.message);
}
finally {
this.event.emit('onInferenceDone')
}
}
private async _makeRequest(endpoint, payload){
// makes a simple request to the inference server
try {
const options = { headers: { 'Content-Type': 'application/json', } }
const response = await axios.post(`${this.inferenceURL}/${endpoint}`, payload, options)
this.event.emit('onInferenceDone')
return response
} catch (error) {
console.error('Error making request to Inference server:', error.message);
}
}
async code_completion(prompt, promptAfter, ctxFiles, currentFileName , params:IParams=CompletionParams): Promise<any> {
if (!this.isReady) {
console.log('model not ready yet')
return
}
// as of now no prompt required
const payload = { prompt, 'context':promptAfter, ctxFiles, currentFileName, ...params }
return this._makeInferenceRequest('code_completion', payload, AIRequestType.COMPLETION)
}
async code_insertion(msg_pfx: string, msg_sfx: string, ctxFiles, currentFileName, params:IParams=InsertionParams): Promise<any> {
if (!this.isReady) {
console.log('model not ready yet')
return
}
const payload = { code_pfx:msg_pfx, code_sfx:msg_sfx, ctxFiles, currentFileName, ...params }
return this._makeInferenceRequest('code_insertion', payload, AIRequestType.COMPLETION)
}
async code_generation(prompt: string, params:IParams=GenerationParams): Promise<any> {
if (!this.isReady) {
console.log('model not ready yet')
return
}
return this._makeInferenceRequest('code_generation', { prompt, ...params }, AIRequestType.GENERAL)
}
async code_explaining(code:string, context:string, params:IParams=GenerationParams): Promise<any> {
if (!this.isReady) {
console.log('model not ready yet')
return
}
if (params.stream_result) {
return this._streamInferenceRequest('code_explaining', { prompt: code, context, ...params })
} else {
return this._makeInferenceRequest('code_explaining', { prompt: code, context, ...params }, AIRequestType.GENERAL)
}
}
async error_explaining(prompt: string, params:IParams=GenerationParams): Promise<any>{
if (!this.isReady) {
console.log('model not ready yet')
return ""
}
if (params.stream_result) {
return this._streamInferenceRequest('error_explaining', { prompt, ...params })
} else {
return this._makeInferenceRequest('error_explaining', { prompt, ...params }, AIRequestType.GENERAL)
}
}
async solidity_answer(userPrompt: string, params:IParams=GenerationParams): Promise<any> {
if (!this.isReady) {
console.log('model not ready yet')
return
}
let modelOP = undefined
for (const model of this.selectedModels) {
if (model.modelType === ModelType.GENERAL) {
modelOP = model.modelOP
}
}
const prompt = buildSolgptPrompt(userPrompt, modelOP)
if (params.stream_result) {
return this._streamInferenceRequest('solidity_answer', { prompt, ...params })
} else {
return this._makeInferenceRequest('solidity_answer', { prompt, ...params }, AIRequestType.GENERAL)
}
}
async generate(userPrompt, options:IParams=GenerationParams): Promise<any> {
const payload = { prompt: userPrompt, "endpoint":"generate", ...options }
if (options.stream_result) return this._streamInferenceRequest(payload, AIRequestType.GENERAL)
else return this._makeRequest(payload, AIRequestType.GENERAL)
}
}