forked from pollinations/pollinations
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathgenericOpenAIClient.js
More file actions
305 lines (263 loc) · 11 KB
/
genericOpenAIClient.js
File metadata and controls
305 lines (263 loc) · 11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
import fetch from "node-fetch";
import debug from "debug";
import {
validateAndNormalizeMessages,
cleanNullAndUndefined,
generateRequestId,
cleanUndefined,
normalizeOptions,
convertSystemToUserMessages,
} from "./textGenerationUtils.js";
import { createSseStreamConverter } from "./sseStreamConverter.js";
const log = debug(`pollinations:genericopenai`);
const errorLog = debug(`pollinations:error`);
/**
* Generic OpenAI-compatible API client function
* @param {Array} messages - Array of messages for the conversation
* @param {Object} options - Options for the request (default: {})
* @param {Object} config - Configuration for the client
* @param {string|Function} config.endpoint - API endpoint URL or function that returns the URL
* @param {string} config.authHeaderName - Name of the auth header (default: 'Authorization')
* @param {Function} config.authHeaderValue - Function that returns the auth header value
* @param {Object} config.defaultOptions - Default options for the client
* @param {Function} config.formatResponse - Optional function to format the response
* @param {Object} config.additionalHeaders - Optional additional headers to include in requests
* @returns {Object} - API response object
*/
export async function genericOpenAIClient(messages, options = {}, config) {
const {
endpoint,
authHeaderName = "Authorization",
authHeaderValue,
defaultOptions = {},
formatResponse = null,
additionalHeaders = {},
} = config;
const startTime = Date.now();
const requestId = generateRequestId();
log(`[${requestId}] Starting generic openai generation request`, {
timestamp: new Date().toISOString(),
messageCount: messages?.length || 0,
options,
});
// Declare normalizedOptions and modelName in outer scope so they're available in catch block
let normalizedOptions;
let modelName;
try {
// Check if API key is available
if (!authHeaderValue()) {
throw new Error(`Generic OpenAI API key is not set`);
}
// Normalize options with defaults
normalizedOptions = normalizeOptions(options, defaultOptions);
// Use the model name directly (mapping is now handled upstream)
modelName = normalizedOptions.model;
// Validate and normalize messages
const validatedMessages = validateAndNormalizeMessages(messages);
// System message handling is now done via transforms before reaching this client
const processedMessages = validatedMessages;
// Build request body using spread - normalization already handled upstream
const requestBody = {
model: modelName,
messages: processedMessages,
...normalizedOptions,
};
// Clean undefined and null values
const cleanedRequestBody = cleanNullAndUndefined(requestBody);
log(
`[${requestId}] Cleaned request body (removed null and undefined values):`,
JSON.stringify(cleanedRequestBody, null, 2),
);
const finalRequestBody = cleanedRequestBody;
log(`[${requestId}] Sending request to Generic OpenAI API`, {
timestamp: new Date().toISOString(),
model: cleanedRequestBody.model,
maxTokens: cleanedRequestBody.max_tokens,
temperature: cleanedRequestBody.temperature,
});
log(
`[${requestId}] Final request body:`,
JSON.stringify(finalRequestBody, null, 2),
);
// Determine the endpoint URL
const endpointUrl =
typeof endpoint === "function"
? endpoint(modelName, normalizedOptions)
: endpoint;
// Prepare headers
const headers = {
[authHeaderName]: authHeaderValue(),
"Content-Type": "application/json",
...additionalHeaders,
};
// Remove the additionalHeaders property from the request body as it's not part of the API
if (finalRequestBody.additionalHeaders) {
delete finalRequestBody.additionalHeaders;
}
log(`[${requestId}] Request headers:`, headers);
log(
`[${requestId}] Request body:`,
JSON.stringify(finalRequestBody, null, 2),
);
// Make API request
const response = await fetch(endpointUrl, {
method: "POST",
headers,
body: JSON.stringify(finalRequestBody),
});
// Handle streaming response
if (normalizedOptions.stream) {
log(
`[${requestId}] Streaming response from Generic OpenAI API, status: ${response.status}, statusText: ${response.statusText}`,
);
const responseHeaders = Object.fromEntries([
...response.headers.entries(),
]);
// Check if the response is successful for streaming
if (!response.ok) {
const errorText = await response.text();
let errorDetails = null;
try {
errorDetails = JSON.parse(errorText);
} catch (e) {
errorDetails = errorText;
}
// Build a cleaner error message
const errorMessage = `${response.status} ${response.statusText}`;
const error = new Error(errorMessage);
error.status = response.status;
error.details = errorDetails;
error.model = modelName;
throw error;
}
// Check if the response is SSE (text/event-stream)
log(`[${requestId}] Streaming response headers:`, responseHeaders);
let streamToReturn = response.body;
if (response.body && formatResponse) {
// Map each SSE event chunk's delta through formatResponse
streamToReturn = response.body.pipe(
createSseStreamConverter((json) => {
// Defensive: extract delta from OpenAI chunk
const delta = json?.choices?.[0]?.delta;
if (!delta) return json; // fallback: passthrough
// Some formatResponse expect the full chunk, some just the delta
// We'll pass the delta as the first arg, and the full chunk as second if needed
let mapped = formatResponse(delta, json);
// If formatResponse returns null/undefined, fallback to original delta
if (mapped == null) mapped = delta;
// Re-wrap in OpenAI chunk structure for downstream
return {
...json,
choices: [
{
...json.choices[0],
delta: mapped,
},
],
};
}),
);
}
return {
id: `${"genericopenai".toLowerCase()}-${requestId}`,
object: "chat.completion.chunk",
created: Math.floor(startTime / 1000),
model: modelName,
stream: true,
responseStream: streamToReturn, // This is the (possibly transformed) stream,
choices: [
{
delta: { content: "" },
finish_reason: null,
index: 0,
},
],
error: !response.ok
? {
message: `Generic OpenAI API error: ${response.status} ${response.statusText}`,
}
: undefined,
};
}
log(`[${requestId}] Received response from Generic OpenAI API`, {
timestamp: new Date().toISOString(),
status: response.status,
statusText: response.statusText,
headers: Object.fromEntries([...response.headers.entries()]),
});
// Handle error responses
if (!response.ok) {
const errorText = await response.text();
let errorDetails = null;
try {
errorDetails = JSON.parse(errorText);
} catch (e) {
errorDetails = errorText;
}
// Build a cleaner error message
const errorMessage = `${response.status} ${response.statusText}`;
const error = new Error(errorMessage);
error.status = response.status;
error.details = errorDetails;
error.model = modelName;
errorLog(
`[${requestId}] Error from Generic OpenAI API:`,
errorDetails,
);
errorLog(
`[${requestId}] Error from Generic OpenAI API: messages roles:`,
messages.map((m) => m.role),
);
throw error;
}
// Parse response
const data = await response.json();
log(
`[${requestId}] Parsed JSON response:`,
JSON.stringify(data).substring(0, 500) + "...",
);
const completionTime = Date.now() - startTime;
const modelUsed = data.model || modelName;
log(`[${requestId}] Successfully generated text`, {
timestamp: new Date().toISOString(),
completionTimeMs: completionTime,
modelUsed,
// Pass the complete usage object instead of extracting fields
usage: data.usage,
});
// Use custom response formatter if provided
// Pass only choices[0] to formatResponse, reconstruct after
const originalChoice =
data.choices && data.choices[0] ? data.choices[0] : {};
const formattedChoice = formatResponse
? formatResponse(originalChoice, requestId, startTime, modelName)
: originalChoice;
// Default response formatting
// Ensure the response has all expected fields
if (!data.id) {
log(`[${requestId}] Adding missing id field to response`);
data.id = `genericopenai-${requestId}`;
}
if (!data.object) {
data.object = "chat.completion";
}
// Reconstruct the response object with the formatted choice
// Preserve non-standard fields like citations (from Perplexity) when present
return {
...data,
choices: [formattedChoice],
...(data.citations && { citations: data.citations }),
};
} catch (error) {
errorLog(`[${requestId}] Error in text generation`, {
timestamp: new Date().toISOString(),
error: error.message,
status: error.status,
model: modelName,
provider: config.provider,
requestId,
});
// Simply throw the error
throw error;
}
}