forked from pollinations/pollinations
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathgenerateTextPortkey.js
More file actions
165 lines (144 loc) · 5.5 KB
/
generateTextPortkey.js
File metadata and controls
165 lines (144 loc) · 5.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
import dotenv from "dotenv";
import { genericOpenAIClient } from "./genericOpenAIClient.js";
import debug from "debug";
import { resolveModelConfig } from "./utils/modelResolver.js";
import { generateHeaders } from "./transforms/headerGenerator.js";
import { sanitizeMessages } from "./transforms/messageSanitizer.js";
import { createImageUrlToBase64Transform } from "./transforms/imageUrlToBase64Transform.js";
import { processParameters } from "./transforms/parameterProcessor.js";
import { findModelByName } from "./availableModels.js";
dotenv.config();
export const log = debug("pollinations:portkey");
const errorLog = debug("pollinations:portkey:error");
// Model mapping is now handled via mappedModel field in availableModels.js
// Default options
const DEFAULT_OPTIONS = {
model: "openai-fast",
jsonMode: false,
};
/**
* Generates text using a local Portkey gateway with OpenAI-compatible endpoints
* @param {Array} messages - Array of message objects
* @param {Object} options - Options for text generation
* @returns {Object} - OpenAI-compatible response
*/
/**
* Configuration object for the Portkey client
*/
const clientConfig = {
// Use Portkey API Gateway URL from .env with fallback to localhost
endpoint: () =>
`${process.env.PORTKEY_GATEWAY_URL || "http://localhost:8787"}/v1/chat/completions`,
// Auth header configuration
authHeaderName: "Authorization",
authHeaderValue: () => {
// Use the actual Portkey API key from environment variables
return `Bearer ${process.env.PORTKEY_API_KEY}`;
},
// Additional headers will be dynamically set in transformRequest
additionalHeaders: {},
// Default options
defaultOptions: DEFAULT_OPTIONS,
};
/**
* Generates text using a local Portkey gateway with Azure OpenAI models
*/
export async function generateTextPortkey(messages, options = {}) {
// Create a copy of options to avoid mutating the original
let processedOptions = { ...options };
// Apply model transform if it exists
let processedMessages = messages;
if (processedOptions.model) {
const modelDef = findModelByName(processedOptions.model);
if (modelDef?.transform) {
try {
const transformed = modelDef.transform(
messages,
processedOptions,
);
const {
messages: transformedMessages,
options: transformedOptions,
} = transformed;
processedMessages = transformedMessages;
// Merge transformed options
processedOptions = {
...processedOptions,
...transformedOptions,
};
} catch (error) {
errorLog("Error applying transform:", error);
throw error;
}
}
}
// Apply transformations sequentially
if (processedOptions.model) {
try {
// 1. Resolve model configuration
let result = resolveModelConfig(
processedMessages,
processedOptions,
);
processedMessages = result.messages;
processedOptions = result.options;
log(
"After resolveModelConfig:",
!!processedOptions.modelDef,
!!processedOptions.modelConfig,
);
// 2. Generate headers
result = await generateHeaders(processedMessages, processedOptions);
processedMessages = result.messages;
processedOptions = result.options;
log(
"After generateHeaders:",
!!processedOptions.modelDef,
!!processedOptions.modelConfig,
);
// 3. Convert image URLs to base64 for Vertex AI
const imageUrlTransform = createImageUrlToBase64Transform();
result = await imageUrlTransform(
processedMessages,
processedOptions,
);
processedMessages = result.messages;
processedOptions = result.options;
log(
"After imageUrlTransform:",
!!processedOptions.modelDef,
!!processedOptions.modelConfig,
);
// 4. Sanitize messages
result = sanitizeMessages(processedMessages, processedOptions);
processedMessages = result.messages;
processedOptions = result.options;
log(
"After sanitizeMessages:",
!!processedOptions.modelDef,
!!processedOptions.modelConfig,
);
// 5. Process parameters (limit checking removed - handled by enter.pollinations.ai)
result = processParameters(processedMessages, processedOptions);
processedMessages = result.messages;
processedOptions = result.options;
} catch (error) {
errorLog("Error in request transformation:", error);
throw error;
}
}
// Create a fresh config with clean headers for this request
const requestConfig = {
...clientConfig,
additionalHeaders: processedOptions.additionalHeaders || {},
};
// Remove from options since it's now in config
if (processedOptions.additionalHeaders) {
delete processedOptions.additionalHeaders;
}
return await genericOpenAIClient(
processedMessages,
processedOptions,
requestConfig,
);
}