Skip to content

Commit 35b34ad

Browse files
committed
update openai models including o3 and gpt-4.1
1 parent da3d1e3 commit 35b34ad

File tree

4 files changed

+30
-29
lines changed

4 files changed

+30
-29
lines changed

src/modules/llms/api/openai.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ type ConversationOutput = Omit<ChatConversation, 'timestamp' | 'model' | 'id' |
8383

8484
export const prepareConversation = (conversation: ChatConversation[], model: string, ctx: OnMessageContext | OnCallBackQueryData): ConversationOutput[] => {
8585
const messages = conversation.filter(c => c.model === model).map(m => { return { content: m.content, role: m.role } })
86-
if (messages.length !== 1 || model === LlmModelsEnum.O1 || model.includes('deep')) {
86+
if (messages.length !== 1 || model === LlmModelsEnum.O3 || model.includes('deep')) {
8787
return messages
8888
}
8989
const systemMessage = {

src/modules/llms/llmsBase.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -410,7 +410,7 @@ export abstract class LlmsBase implements PayableBot {
410410
const parameters = this.modelManager.getModelParameters(model)
411411
const response = await this.chatCompletion(conversation, model, ctx, usesTools, parameters)
412412
if (response.completion) {
413-
if (model === this.modelsEnum.O1) {
413+
if (model === this.modelsEnum.O3) {
414414
const msgs = splitTelegramMessage(response.completion.content as string)
415415
await ctx.api.editMessageText(
416416
ctx.chat.id,

src/modules/llms/openaiBot.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ export class OpenAIBot extends LlmsBase {
8888
usesTools: boolean,
8989
parameters?: ModelParameters
9090
): Promise<LlmCompletion> {
91-
return await chatCompletion(conversation, model, ctx, model !== this.modelsEnum.O1, parameters) // limitTokens doesn't apply for o1-preview
91+
return await chatCompletion(conversation, model, ctx, model !== this.modelsEnum.O3, parameters) // limitTokens doesn't apply for o1-preview
9292
}
9393

9494
hasPrefix (prompt: string): string {

src/modules/llms/utils/llmsData.ts

Lines changed: 27 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -123,20 +123,21 @@ export const llmData: LLMData = {
123123
chargeType: 'TOKEN',
124124
stream: true
125125
},
126-
// 'gpt-4': {
127-
// provider: 'openai',
128-
// name: 'gpt-4',
129-
// fullName: 'GPT-4',
130-
// botName: 'OpenAIBot',
131-
// version: 'gpt-4',
132-
// commands: ['gpt4'],
133-
// apiSpec: 'https://openai.com/index/gpt-4/',
134-
// inputPrice: 0.03,
135-
// outputPrice: 0.06,
136-
// maxContextTokens: 8192,
137-
// chargeType: 'TOKEN',
138-
// stream: true
139-
// },
126+
'gpt-4-1': {
127+
provider: 'openai',
128+
name: 'gpt-4.1',
129+
fullName: 'GPT-4.1',
130+
botName: 'OpenAIBot',
131+
version: 'gpt-4.1-2025-04-14',
132+
commands: ['gpt41', 'ask41'],
133+
prefix: ['a41. '],
134+
apiSpec: 'https://platform.openai.com/docs/models/gpt-4.1',
135+
inputPrice: 0.002,
136+
outputPrice: 0.008,
137+
maxContextTokens: 32768,
138+
chargeType: 'TOKEN',
139+
stream: true
140+
},
140141
'gpt-35-turbo': {
141142
provider: 'openai',
142143
name: 'gpt-35-turbo',
@@ -145,9 +146,9 @@ export const llmData: LLMData = {
145146
version: 'gpt-3.5-turbo',
146147
commands: ['ask35'],
147148
apiSpec: 'https://platform.openai.com/docs/models/gpt-3-5-turbo',
148-
inputPrice: 0.003,
149-
outputPrice: 0.006,
150-
maxContextTokens: 4000,
149+
inputPrice: 0.0005,
150+
outputPrice: 0.0015,
151+
maxContextTokens: 4096,
151152
chargeType: 'TOKEN',
152153
stream: true
153154
},
@@ -166,17 +167,17 @@ export const llmData: LLMData = {
166167
// chargeType: 'TOKEN',
167168
// stream: true
168169
// },
169-
o1: {
170+
o3: {
170171
provider: 'openai',
171-
name: 'o1',
172-
fullName: 'O1 Preview',
172+
name: 'o3',
173+
fullName: 'O3',
173174
botName: 'OpenAIBot',
174-
version: 'o1-preview',
175-
commands: ['o1', 'ask1'],
176-
prefix: ['o1. '],
175+
version: 'o3-2025-04-16',
176+
commands: ['o3'],
177+
prefix: ['o3. '],
177178
apiSpec: 'https://platform.openai.com/docs/models/o1',
178-
inputPrice: 0.015,
179-
outputPrice: 0.06,
179+
inputPrice: 0.01,
180+
outputPrice: 0.04,
180181
maxContextTokens: 200000,
181182
chargeType: 'TOKEN',
182183
stream: false
@@ -294,7 +295,7 @@ export const llmData: LLMData = {
294295
temperature: config.openAi.dalle.completions.temperature,
295296
max_completion_tokens: +config.openAi.chatGpt.maxTokens
296297
},
297-
modelOverrides: { o1: { temperature: 1 }, 'o3-mini': { temperature: undefined } } // uses model name, not model version
298+
modelOverrides: { o3: { temperature: 1 }, 'o3-mini': { temperature: undefined } } // uses model name, not model version
298299
},
299300
claude: {
300301
defaultParameters: {

0 commit comments

Comments
 (0)