Skip to content

Commit 35f54eb

Browse files
committed
openai models moving
1 parent 1277636 commit 35f54eb

File tree

12 files changed

+757
-804
lines changed

12 files changed

+757
-804
lines changed

packages/__tests__/cost/__snapshots__/registrySnapshots.test.ts.snap

Lines changed: 276 additions & 225 deletions
Large diffs are not rendered by default.

packages/cost/models/authors/openai/gpt-4.1/endpoints.ts

Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -320,6 +320,85 @@ export const endpoints = {
320320
"*": {},
321321
},
322322
},
323+
"gpt-4.1:helicone": {
324+
provider: "helicone",
325+
author: "openai",
326+
providerModelId: "pa/gt-4.1",
327+
pricing: [
328+
{
329+
threshold: 0,
330+
input: 0.000002, // $2.00 per 1M tokens
331+
output: 0.000008, // $8.00 per 1M tokens
332+
},
333+
],
334+
contextLength: 128000,
335+
maxCompletionTokens: 16384,
336+
supportedParameters: [
337+
"max_tokens",
338+
"temperature",
339+
"top_p",
340+
"stop",
341+
"frequency_penalty",
342+
"presence_penalty",
343+
],
344+
ptbEnabled: true,
345+
requireExplicitRouting: true,
346+
endpointConfigs: {
347+
"*": {},
348+
},
349+
},
350+
"gpt-4.1-nano:helicone": {
351+
provider: "helicone",
352+
author: "openai",
353+
providerModelId: "pa/gt-4.1-n",
354+
pricing: [
355+
{
356+
threshold: 0,
357+
input: 0.0000001, // $0.10 per 1M tokens
358+
output: 0.0000004, // $0.40 per 1M tokens
359+
},
360+
],
361+
contextLength: 128000,
362+
maxCompletionTokens: 8192,
363+
supportedParameters: [
364+
"max_tokens",
365+
"temperature",
366+
"top_p",
367+
"stop",
368+
],
369+
ptbEnabled: true,
370+
requireExplicitRouting: true,
371+
endpointConfigs: {
372+
"*": {},
373+
},
374+
},
375+
"gpt-4.1-mini:helicone": {
376+
provider: "helicone",
377+
author: "openai",
378+
providerModelId: "pa/gt-4.1-m",
379+
pricing: [
380+
{
381+
threshold: 0,
382+
input: 0.0000004, // $0.40 per 1M tokens
383+
output: 0.0000016, // $1.60 per 1M tokens
384+
},
385+
],
386+
contextLength: 128000,
387+
maxCompletionTokens: 16384,
388+
supportedParameters: [
389+
"max_tokens",
390+
"temperature",
391+
"top_p",
392+
"stop",
393+
"frequency_penalty",
394+
"presence_penalty",
395+
],
396+
ptbEnabled: true,
397+
requireExplicitRouting: true,
398+
endpointConfigs: {
399+
"*": {},
400+
},
401+
},
323402
} satisfies Partial<
324403
Record<`${GPT41ModelName}:${ModelProviderName}`, ModelProviderConfig>
325404
>;

packages/cost/models/authors/openai/gpt-4o/endpoints.ts

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -283,6 +283,60 @@ export const endpoints = {
283283
"*": {},
284284
},
285285
},
286+
"gpt-4o:helicone": {
287+
provider: "helicone",
288+
author: "openai",
289+
providerModelId: "pa/gt-4p",
290+
pricing: [
291+
{
292+
threshold: 0,
293+
input: 0.0000025, // $2.50 per 1M tokens
294+
output: 0.00001, // $10.00 per 1M tokens
295+
},
296+
],
297+
contextLength: 128000,
298+
maxCompletionTokens: 16384,
299+
supportedParameters: [
300+
"max_tokens",
301+
"temperature",
302+
"top_p",
303+
"stop",
304+
"frequency_penalty",
305+
"presence_penalty",
306+
],
307+
ptbEnabled: true,
308+
requireExplicitRouting: true,
309+
endpointConfigs: {
310+
"*": {},
311+
},
312+
},
313+
"gpt-4o-mini:helicone": {
314+
provider: "helicone",
315+
author: "openai",
316+
providerModelId: "pa/gt-4p-m",
317+
pricing: [
318+
{
319+
threshold: 0,
320+
input: 0.00000015, // $0.15 per 1M tokens
321+
output: 0.0000006, // $0.60 per 1M tokens
322+
},
323+
],
324+
contextLength: 128000,
325+
maxCompletionTokens: 16384,
326+
supportedParameters: [
327+
"max_tokens",
328+
"temperature",
329+
"top_p",
330+
"stop",
331+
"frequency_penalty",
332+
"presence_penalty",
333+
],
334+
ptbEnabled: true,
335+
requireExplicitRouting: true,
336+
endpointConfigs: {
337+
"*": {},
338+
},
339+
},
286340
} satisfies Partial<
287341
Record<`${GPT4oModelName}:${ModelProviderName}`, ModelProviderConfig>
288342
>;

packages/cost/models/authors/openai/gpt-5/endpoints.ts

Lines changed: 158 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -283,6 +283,164 @@ export const endpoints = {
283283
"*": {},
284284
},
285285
},
286+
"gpt-5:helicone": {
287+
provider: "helicone",
288+
author: "openai",
289+
providerModelId: "pa/gpt-5",
290+
pricing: [
291+
{
292+
threshold: 0,
293+
input: 0.00000125, // $1.25 per 1M tokens
294+
output: 0.00001, // $10.00 per 1M tokens
295+
},
296+
],
297+
contextLength: 128000,
298+
maxCompletionTokens: 32768,
299+
supportedParameters: [
300+
"max_tokens",
301+
"temperature",
302+
"top_p",
303+
"stop",
304+
"frequency_penalty",
305+
"presence_penalty",
306+
],
307+
ptbEnabled: true,
308+
requireExplicitRouting: true,
309+
endpointConfigs: {
310+
"*": {},
311+
},
312+
},
313+
"gpt-5-mini:helicone": {
314+
provider: "helicone",
315+
author: "openai",
316+
providerModelId: "pa/gpt-5-mini",
317+
pricing: [
318+
{
319+
threshold: 0,
320+
input: 0.00000025, // $0.25 per 1M tokens
321+
output: 0.000002, // $2.00 per 1M tokens
322+
},
323+
],
324+
contextLength: 128000,
325+
maxCompletionTokens: 16384,
326+
supportedParameters: [
327+
"max_tokens",
328+
"temperature",
329+
"top_p",
330+
"stop",
331+
"frequency_penalty",
332+
"presence_penalty",
333+
],
334+
ptbEnabled: true,
335+
requireExplicitRouting: true,
336+
endpointConfigs: {
337+
"*": {},
338+
},
339+
},
340+
"gpt-5-nano:helicone": {
341+
provider: "helicone",
342+
author: "openai",
343+
providerModelId: "pa/gpt-5-nano",
344+
pricing: [
345+
{
346+
threshold: 0,
347+
input: 0.00000005, // $0.05 per 1M tokens
348+
output: 0.0000004, // $0.40 per 1M tokens
349+
},
350+
],
351+
contextLength: 128000,
352+
maxCompletionTokens: 8192,
353+
supportedParameters: [
354+
"max_tokens",
355+
"temperature",
356+
"top_p",
357+
"stop",
358+
],
359+
ptbEnabled: true,
360+
requireExplicitRouting: true,
361+
endpointConfigs: {
362+
"*": {},
363+
},
364+
},
365+
"gpt-5-chat-latest:helicone": {
366+
provider: "helicone",
367+
author: "openai",
368+
providerModelId: "pa/gpt-5-chat-latest",
369+
pricing: [
370+
{
371+
threshold: 0,
372+
input: 0.00000125, // $1.25 per 1M tokens
373+
output: 0.00001, // $10.00 per 1M tokens
374+
},
375+
],
376+
contextLength: 128000,
377+
maxCompletionTokens: 32768,
378+
supportedParameters: [
379+
"max_tokens",
380+
"temperature",
381+
"top_p",
382+
"stop",
383+
"frequency_penalty",
384+
"presence_penalty",
385+
],
386+
ptbEnabled: true,
387+
requireExplicitRouting: true,
388+
endpointConfigs: {
389+
"*": {},
390+
},
391+
},
392+
"gpt-5-pro:helicone": {
393+
provider: "helicone",
394+
author: "openai",
395+
providerModelId: "pa/gpt-5-pro",
396+
pricing: [
397+
{
398+
threshold: 0,
399+
input: 0.000015, // $15.00 per 1M tokens
400+
output: 0.00012, // $120.00 per 1M tokens
401+
},
402+
],
403+
contextLength: 128000,
404+
maxCompletionTokens: 32768,
405+
supportedParameters: [
406+
"max_tokens",
407+
"temperature",
408+
"top_p",
409+
"stop",
410+
"frequency_penalty",
411+
"presence_penalty",
412+
],
413+
ptbEnabled: true,
414+
requireExplicitRouting: true,
415+
endpointConfigs: {
416+
"*": {},
417+
},
418+
},
419+
"gpt-5-codex:helicone": {
420+
provider: "helicone",
421+
author: "openai",
422+
providerModelId: "pa/gpt-5-codex",
423+
pricing: [
424+
{
425+
threshold: 0,
426+
input: 0.00000125, // $1.25 per 1M tokens
427+
output: 0.00001, // $10.00 per 1M tokens
428+
},
429+
],
430+
contextLength: 128000,
431+
maxCompletionTokens: 32768,
432+
supportedParameters: [
433+
"max_tokens",
434+
"temperature",
435+
"top_p",
436+
"stop",
437+
],
438+
ptbEnabled: true,
439+
requireExplicitRouting: true,
440+
endpointConfigs: {
441+
"*": {},
442+
},
443+
},
286444
} satisfies Partial<
287445
Record<`${GPT5ModelName}:${ModelProviderName}`, ModelProviderConfig>
288446
>;

packages/cost/models/authors/openai/gpt-5/models.ts

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,26 @@ export const models = {
4545
modality: { inputs: ["text", "image"], outputs: ["text"] },
4646
tokenizer: "GPT",
4747
},
48+
"gpt-5-pro": {
49+
name: "OpenAI: GPT-5 Pro",
50+
author: "openai",
51+
description: "Most capable GPT-5 model with extended thinking capabilities",
52+
contextLength: 128000,
53+
maxOutputTokens: 32768,
54+
created: "2025-01-01T00:00:00.000Z",
55+
modality: { inputs: ["text"], outputs: ["text"] },
56+
tokenizer: "GPT",
57+
},
58+
"gpt-5-codex": {
59+
name: "OpenAI: GPT-5 Codex",
60+
author: "openai",
61+
description: "Specialized model for code generation and analysis",
62+
contextLength: 128000,
63+
maxOutputTokens: 32768,
64+
created: "2025-01-01T00:00:00.000Z",
65+
modality: { inputs: ["text"], outputs: ["text"] },
66+
tokenizer: "GPT",
67+
},
4868
} satisfies Record<string, ModelConfig>;
4969

5070
export type GPT5ModelName = keyof typeof models;

0 commit comments

Comments
 (0)