1- import { z } from 'zod'
2- import { aigatewayConfig } from '../config/services'
1+ import { z } from 'zod' ;
2+ import { aigatewayConfig } from '../config/services' ;
33import { createOpenAICompatible } from '@ai-sdk/openai-compatible' ;
4- import { generateText , streamText , generateObject } from 'ai'
5- import OpenAI from 'openai'
6- import type { AIProvider } from './interfaces'
4+ import { generateText , streamText , generateObject } from 'ai' ;
5+ import type { AIProvider } from './interfaces' ;
76
8- const normalizedBase = ( aigatewayConfig . baseURL || '' ) . replace ( / \/ $ / , '' )
9- const AIGATEWAY_BASE_URL = `${ normalizedBase } `
7+ const normalizedBase = ( aigatewayConfig . baseURL || '' ) . replace ( / \/ $ / , '' ) ;
8+ const AIGATEWAY_BASE_URL = `${ normalizedBase } ` ;
109
1110const aigateway = createOpenAICompatible ( {
1211 name : 'aigateway' ,
1312 baseURL : `${ aigatewayConfig . baseURL } ` ,
1413 apiKey : `${ aigatewayConfig . apiKey } `
15- } )
14+ } ) ;
15+
16+ class AIGatewayProvider implements AIProvider {
17+ name = 'aigateway' as const ;
18+
19+ async generateChatStructuredResponse (
20+ prompt : string ,
21+ schema : z . ZodType ,
22+ model : string = aigatewayConfig . model ,
23+ temperature : number = 0
24+ ) : Promise < any > {
25+ try {
26+ const result = await generateObject ( {
27+ model : aigateway ( model || aigatewayConfig . model ) ,
28+ schema,
29+ prompt,
30+ temperature,
31+ } ) ;
32+
33+ return {
34+ object : result . object ,
35+ finishReason : result . finishReason ,
36+ usage : {
37+ promptTokens : result . usage ?. promptTokens || 0 ,
38+ completionTokens : result . usage ?. completionTokens || 0 ,
39+ totalTokens : result . usage ?. totalTokens || 0 ,
40+ } ,
41+ warnings : result . warnings ,
42+ } ;
43+ } catch ( error ) {
44+ throw new Error ( `AI Gateway structured response error: ${ error } ` ) ;
45+ }
46+ }
47+
48+ async generateChatTextResponse (
49+ prompt : string ,
50+ model ?: string ,
51+ temperature : number = 0
52+ ) : Promise < any > {
53+ try {
54+ const modelToUse = aigateway ( model || aigatewayConfig . chatModel ) ;
1655
17- export async function generateChatStructuredResponse < T extends z . ZodType > (
18- prompt : string ,
19- schema : T ,
20- model : string = aigatewayConfig . model ,
21- temperature : number = 0
22- ) : Promise < any > {
23- try {
24- const result = await generateObject ( {
25- model : aigateway ( model || aigatewayConfig . model ) ,
26- schema,
56+ const result = await generateText ( {
57+ model : modelToUse ,
2758 prompt,
2859 temperature,
60+ toolChoice : 'none' ,
2961 } ) ;
3062
31- return {
32- object : result . object ,
33- finishReason : result . finishReason ,
34- usage : {
35- promptTokens : result . usage ?. promptTokens || 0 ,
36- completionTokens : result . usage ?. completionTokens || 0 ,
37- totalTokens : result . usage ?. totalTokens || 0 ,
38- } ,
39- warnings : result . warnings ,
40- } ;
41- } catch ( error ) {
42- throw new Error ( `AI Gateway structured response error: ${ error } ` ) ;
43- }
44- }
45-
46- export async function generateChatTextResponse (
47- prompt : string ,
48- model ?: string ,
49- temperature : number = 0
50- ) : Promise < any > {
51-
52- const modelToUse = aigateway ( model || aigatewayConfig . chatModel ) ;
53-
54- const result = await generateText ( {
55- model : modelToUse ,
56- prompt : prompt ,
57- temperature : temperature ,
58- toolChoice : 'none' ,
59- } ) ;
60-
61- return result ;
62- }
63-
64- export async function generateChatTextStreamResponse (
65- prompt : string ,
66- model ?: string ,
67- temperature : number = 0
68- ) : Promise < any > {
69-
70- const modelToUse = aigateway ( model || aigatewayConfig . chatModel ) ;
71-
72- const result = await streamText ( {
73- model : modelToUse ,
74- prompt : prompt ,
75- temperature : temperature ,
76- toolChoice : 'none' ,
77- } ) ;
78-
79- return result ;
80- }
81-
82- export async function getAvailableModels ( ) : Promise < string [ ] > {
83- try {
84- const response = await fetch ( `${ AIGATEWAY_BASE_URL } /v1/models` )
85- if ( ! response . ok ) return [ ]
86- const data = await response . json ( )
87- if ( Array . isArray ( data ?. data ) ) {
88- return data . data . map ( ( m : any ) => m . id ) . filter ( ( id : any ) => typeof id === 'string' )
63+ return result ;
64+ } catch ( error ) {
65+ console . error ( 'AI Gateway text response error: ' , error ) ;
66+ throw new Error ( `AI Gateway text response error: ${ error } ` ) ;
8967 }
90- return [ ]
91- } catch ( error ) {
92- return [ ]
9368 }
94- }
95-
96- function parseAiGatewayStructuredResponse < T > (
97- completion : OpenAI . Chat . Completions . ChatCompletion ,
98- schema : z . ZodType < T > ,
99- modelFallback : string
100- ) {
101- const choice = Array . isArray ( completion ?. choices ) ? completion . choices [ 0 ] : undefined
102- const contentRaw = choice ?. message ?. content
10369
104- if ( typeof contentRaw !== 'string' ) {
105- throw new Error ( 'AI Gateway returned non-string content for structured response' )
106- }
70+ async generateChatTextStreamResponse (
71+ prompt : string ,
72+ model ?: string ,
73+ temperature : number = 0
74+ ) : Promise < any > {
75+ try {
76+ const modelToUse = aigateway ( model || aigatewayConfig . chatModel ) ;
10777
108- let parsedObject : unknown
109- try {
110- parsedObject = JSON . parse ( contentRaw )
111- } catch ( err ) {
112- throw new Error ( `Failed to parse assistant JSON content: ${ String ( err ) } ` )
113- }
78+ const result = await streamText ( {
79+ model : modelToUse ,
80+ prompt ,
81+ temperature ,
82+ toolChoice : 'none' ,
83+ } ) ;
11484
115- const validation = schema . safeParse ( parsedObject )
116- if ( ! validation . success ) {
117- throw new Error ( `Response failed schema validation: ${ validation . error . message } ` )
85+ return result ;
86+ } catch ( error ) {
87+ console . error ( 'AI Gateway streaming response error: ' , error ) ;
88+ throw new Error ( `AI Gateway streaming response error: ${ error } ` ) ;
89+ }
11890 }
11991
120- return {
121- object : validation . data ,
122- finishReason : ( choice as any ) ?. finish_reason ?? ( choice as any ) ?. finishReason ?? null ,
123- usage : {
124- promptTokens : ( completion as any ) ?. usage ?. prompt_tokens ?? 0 ,
125- completionTokens : ( completion as any ) ?. usage ?. completion_tokens ?? 0 ,
126- totalTokens : ( completion as any ) ?. usage ?. total_tokens ?? 0 ,
127- } ,
128- id : completion ?. id ,
129- model : ( completion as any ) ?. model ?? modelFallback ,
92+ async getAvailableModels ( ) : Promise < string [ ] > {
93+ try {
94+ const response = await fetch ( `${ AIGATEWAY_BASE_URL } /v1/models` ) ;
95+ if ( ! response . ok ) return [ ] ;
96+ const data = await response . json ( ) ;
97+ if ( Array . isArray ( data ?. data ) ) {
98+ return data . data
99+ . map ( ( m : any ) => m . id )
100+ . filter ( ( id : any ) => typeof id === 'string' ) ;
101+ }
102+ return [ ] ;
103+ } catch ( _error ) {
104+ return [ ] ;
105+ }
130106 }
131107}
132108
133- const provider : AIProvider = {
134- name : 'aigateway' ,
135- generateChatStructuredResponse,
136- generateChatTextResponse,
137- generateChatTextStreamResponse,
138- getAvailableModels,
139- // no vision support
140- } ;
109+ const provider = new AIGatewayProvider ( ) ;
141110
142111export default provider ;
143- export { AIGATEWAY_BASE_URL }
112+ export { AIGATEWAY_BASE_URL } ;
0 commit comments