Skip to content

Commit 63179e1

Browse files
authored
Merge pull request #51 from donvito/refactor/providers
Refactor/providers
2 parents 3295aba + 965ccc4 commit 63179e1

File tree

9 files changed

+654
-639
lines changed

9 files changed

+654
-639
lines changed

src/services/ai.ts

Lines changed: 38 additions & 96 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,4 @@
11
import { z } from "zod";
2-
import * as openaiService from "./openai";
3-
import * as ollamaService from "./ollama";
4-
import * as anthropicService from "./anthropic";
5-
import * as openrouterService from "./openrouter";
6-
import * as lmstudioService from "./lmstudio";
7-
import * as aigatewayService from "./aigateway";
82
import {
93
openaiConfig,
104
ollamaConfig,
@@ -15,6 +9,8 @@ import {
159
isServiceEnabled
1610
} from "../config/services";
1711
import { llmRequestSchema } from "../schemas/v1/llm";
12+
import { serviceRegistry } from "./registry";
13+
import type { ProviderName } from "./interfaces";
1814

1915
enum Provider {
2016
openai = 'openai',
@@ -26,7 +22,7 @@ enum Provider {
2622
}
2723

2824
// Service types
29-
export type AIService = 'openai' | 'anthropic' | 'ollama' | 'openrouter' | 'lmstudio' | 'aigateway';
25+
export type AIService = ProviderName;
3026

3127
export interface ImageDescriptionResponse {
3228
model: string;
@@ -83,28 +79,24 @@ export async function generateImageResponse(
8379
stream: boolean = false,
8480
temperature: number = 0.3
8581
): Promise<ImageDescriptionResponse> {
86-
87-
try {
88-
let result;
89-
90-
switch (service) {
91-
case 'ollama':
92-
result = await ollamaService.describeImage(images, model, stream, temperature);
93-
return {
94-
...result,
95-
usage: {
96-
input_tokens: result.prompt_eval_count || 0,
97-
output_tokens: result.eval_count || 0,
98-
total_tokens: (result.prompt_eval_count || 0) + (result.eval_count || 0),
99-
},
100-
service: service
101-
};
102-
default:
103-
throw new Error(`Vision capabilities not supported for service: ${service}`);
104-
}
105-
} catch (error) {
106-
throw error;
82+
const provider = serviceRegistry.get(service);
83+
if (!provider) {
84+
throw new Error(`Provider not registered: ${service}`);
85+
}
86+
if (typeof provider.describeImage !== 'function') {
87+
throw new Error(`Vision capabilities not supported for service: ${service}`);
10788
}
89+
90+
const result = await provider.describeImage(images, model, stream, temperature);
91+
return {
92+
...result,
93+
usage: {
94+
input_tokens: result.prompt_eval_count || 0,
95+
output_tokens: result.eval_count || 0,
96+
total_tokens: (result.prompt_eval_count || 0) + (result.eval_count || 0),
97+
},
98+
service: service
99+
};
108100
}
109101

110102
/**
@@ -115,26 +107,9 @@ export async function getAvailableModels(service: AIService): Promise<string[]>
115107
return [];
116108
}
117109

118-
switch (service) {
119-
case Provider.openai:
120-
return openaiService.getAvailableModels();
121-
122-
case Provider.anthropic:
123-
return anthropicService.getAvailableModels();
124-
125-
case Provider.ollama:
126-
return await ollamaService.getAvailableModels();
127-
128-
case Provider.openrouter:
129-
return await openrouterService.getAvailableModels();
130-
case Provider.lmstudio:
131-
return await lmstudioService.getAvailableModels();
132-
case Provider.aigateway:
133-
return await aigatewayService.getAvailableModels();
134-
135-
default:
136-
return [];
137-
}
110+
const provider = serviceRegistry.get(service);
111+
if (!provider) return [];
112+
return provider.getAvailableModels();
138113
}
139114

140115
/**
@@ -208,32 +183,21 @@ export async function processStructuredOutputRequest(
208183
config: z.infer<typeof llmRequestSchema>,
209184
temperature: number = 0
210185
): Promise<any> {
211-
const provider = config.provider;
186+
const providerName = config.provider as ProviderName;
212187
const model = config.model;
213188

214-
switch (provider) {
215-
case Provider.ollama:
216-
return await ollamaService.generateChatStructuredResponse(prompt, schema, model, temperature);
217-
case Provider.openai:
218-
return await openaiService.generateChatStructuredResponse(prompt, schema, model, temperature);
219-
case Provider.anthropic:
220-
return await anthropicService.generateChatStructuredResponse(prompt, schema, model, temperature);
221-
case Provider.openrouter:
222-
return await openrouterService.generateChatStructuredResponse(prompt, schema, model, temperature);
223-
case Provider.lmstudio:
224-
return await lmstudioService.generateChatStructuredResponse(prompt, schema, model, temperature);
225-
case Provider.aigateway:
226-
return await aigatewayService.generateChatStructuredResponse(prompt, schema, model, temperature);
227-
default:
228-
throw new Error(`Unsupported service: ${provider}`);
189+
const provider = serviceRegistry.get(providerName);
190+
if (!provider) {
191+
throw new Error(`Unsupported service: ${providerName}`);
229192
}
193+
return provider.generateChatStructuredResponse(prompt, schema, model, temperature);
230194
}
231195

232196
export async function processTextOutputRequest(
233197
prompt: string,
234198
config: z.infer<typeof llmRequestSchema>,
235199
): Promise<any> {
236-
const provider = config.provider;
200+
const providerName = config.provider as ProviderName;
237201
const model = config.model;
238202
const stream = config.stream || false;
239203

@@ -244,47 +208,25 @@ export async function processTextOutputRequest(
244208
return processTextOutputStreamRequest(prompt, config);
245209
}
246210

247-
switch (provider) {
248-
case Provider.ollama:
249-
return await ollamaService.generateChatTextResponse(prompt, model);
250-
case Provider.openai:
251-
return await openaiService.generateChatTextResponse(prompt, model);
252-
case Provider.anthropic:
253-
return await anthropicService.generateChatTextResponse(prompt, model);
254-
case Provider.openrouter:
255-
return await openrouterService.generateChatTextResponse(prompt, model);
256-
case Provider.lmstudio:
257-
return await lmstudioService.generateChatTextResponse(prompt, model);
258-
case Provider.aigateway:
259-
return await aigatewayService.generateChatTextResponse(prompt, model);
260-
default:
261-
throw new Error(`Unsupported service: ${provider}`);
211+
const provider = serviceRegistry.get(providerName);
212+
if (!provider) {
213+
throw new Error(`Unsupported service: ${providerName}`);
262214
}
215+
return provider.generateChatTextResponse(prompt, model);
263216
}
264217

265218
export async function processTextOutputStreamRequest(
266219
prompt: string,
267220
config: z.infer<typeof llmRequestSchema>,
268221
): Promise<any> {
269-
const provider = config.provider;
222+
const providerName = config.provider as ProviderName;
270223
const model = config.model;
271224

272225
console.log('STREAMING MODEL TO USE', model);
273226

274-
switch (provider) {
275-
case Provider.ollama:
276-
return await ollamaService.generateChatTextStreamResponse(prompt, model);
277-
case Provider.openai:
278-
return await openaiService.generateChatTextStreamResponse(prompt, model);
279-
case Provider.anthropic:
280-
return await anthropicService.generateChatTextStreamResponse(prompt, model);
281-
case Provider.openrouter:
282-
return await openrouterService.generateChatTextStreamResponse(prompt, model);
283-
case Provider.lmstudio:
284-
return await lmstudioService.generateChatTextStreamResponse(prompt, model);
285-
case Provider.aigateway:
286-
return await aigatewayService.generateChatTextStreamResponse(prompt, model);
287-
default:
288-
throw new Error(`Unsupported service: ${provider}`);
227+
const provider = serviceRegistry.get(providerName);
228+
if (!provider) {
229+
throw new Error(`Unsupported service: ${providerName}`);
289230
}
231+
return provider.generateChatTextStreamResponse(prompt, model);
290232
}

0 commit comments

Comments
 (0)