Skip to content

Commit cee99df

Browse files
authored
add new models (#5556)
1 parent 79aaeea commit cee99df

File tree

3 files changed

+162
-8
lines changed

3 files changed

+162
-8
lines changed

packages/__tests__/cost/__snapshots__/registrySnapshots.test.ts.snap

Lines changed: 62 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5129,6 +5129,21 @@ exports[`Registry Snapshots endpoint configurations snapshot 1`] = `
51295129
},
51305130
},
51315131
"openai/gpt-5.2": {
5132+
"gpt-5.2-chat-latest:helicone": {
5133+
"context": 128000,
5134+
"crossRegion": false,
5135+
"maxTokens": 16384,
5136+
"modelId": "pa/gpt-5.2-chat-latest",
5137+
"parameters": [
5138+
"max_completion_tokens",
5139+
"stop",
5140+
],
5141+
"provider": "helicone",
5142+
"ptbEnabled": true,
5143+
"regions": [
5144+
"*",
5145+
],
5146+
},
51325147
"gpt-5.2-chat-latest:openai": {
51335148
"context": 128000,
51345149
"crossRegion": false,
@@ -5177,6 +5192,21 @@ exports[`Registry Snapshots endpoint configurations snapshot 1`] = `
51775192
"*",
51785193
],
51795194
},
5195+
"gpt-5.2-pro:helicone": {
5196+
"context": 128000,
5197+
"crossRegion": false,
5198+
"maxTokens": 32768,
5199+
"modelId": "pa/gpt-5.2-pro",
5200+
"parameters": [
5201+
"max_completion_tokens",
5202+
"stop",
5203+
],
5204+
"provider": "helicone",
5205+
"ptbEnabled": true,
5206+
"regions": [
5207+
"*",
5208+
],
5209+
},
51805210
"gpt-5.2-pro:openai": {
51815211
"context": 400000,
51825212
"crossRegion": false,
@@ -5248,6 +5278,21 @@ exports[`Registry Snapshots endpoint configurations snapshot 1`] = `
52485278
"*",
52495279
],
52505280
},
5281+
"gpt-5.2:helicone": {
5282+
"context": 128000,
5283+
"crossRegion": false,
5284+
"maxTokens": 32768,
5285+
"modelId": "pa/gpt-5.2",
5286+
"parameters": [
5287+
"max_completion_tokens",
5288+
"stop",
5289+
],
5290+
"provider": "helicone",
5291+
"ptbEnabled": true,
5292+
"regions": [
5293+
"*",
5294+
],
5295+
},
52515296
"gpt-5.2:openai": {
52525297
"context": 400000,
52535298
"crossRegion": false,
@@ -6889,6 +6934,9 @@ exports[`Registry Snapshots model coverage snapshot 1`] = `
68896934
],
68906935
"openai/gpt-5.2": [
68916936
"azure",
6937+
"helicone",
6938+
"helicone",
6939+
"helicone",
68926940
"openai",
68936941
"openai",
68946942
"openai",
@@ -8630,6 +8678,14 @@ exports[`Registry Snapshots pricing snapshot 1`] = `
86308678
"web_search": 0.01,
86318679
},
86328680
],
8681+
"helicone": [
8682+
{
8683+
"input": 0.000021,
8684+
"output": 0.000168,
8685+
"threshold": 0,
8686+
"web_search": 0.01,
8687+
},
8688+
],
86338689
"openai": [
86348690
{
86358691
"cacheMultipliers": {
@@ -9338,20 +9394,23 @@ exports[`Registry Snapshots verify registry state 1`] = `
93389394
"model": "gpt-5.2",
93399395
"providers": [
93409396
"azure",
9397+
"helicone",
93419398
"openai",
93429399
"openrouter",
93439400
],
93449401
},
93459402
{
93469403
"model": "gpt-5.2-chat-latest",
93479404
"providers": [
9405+
"helicone",
93489406
"openai",
93499407
"openrouter",
93509408
],
93519409
},
93529410
{
93539411
"model": "gpt-5.2-pro",
93549412
"providers": [
9413+
"helicone",
93559414
"openai",
93569415
"openrouter",
93579416
],
@@ -9755,7 +9814,7 @@ exports[`Registry Snapshots verify registry state 1`] = `
97559814
"provider": "groq",
97569815
},
97579816
{
9758-
"modelCount": 43,
9817+
"modelCount": 46,
97599818
"provider": "helicone",
97609819
},
97619820
{
@@ -9904,8 +9963,8 @@ exports[`Registry Snapshots verify registry state 1`] = `
99049963
"claude-3.5-haiku:anthropic:*",
99059964
],
99069965
"totalArchivedConfigs": 0,
9907-
"totalEndpoints": 297,
9908-
"totalModelProviderConfigs": 297,
9966+
"totalEndpoints": 300,
9967+
"totalModelProviderConfigs": 300,
99099968
"totalModelsWithPtb": 102,
99109969
"totalProviders": 21,
99119970
}

packages/cost/models/authors/openai/gpt-5.2/endpoints.ts

Lines changed: 98 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -80,8 +80,7 @@ export const endpoints = {
8080
"top_logprobs",
8181
"verbosity",
8282
],
83-
unsupportedParameters: [
84-
],
83+
unsupportedParameters: [],
8584
ptbEnabled: true,
8685
endpointConfigs: {
8786
"*": {},
@@ -161,8 +160,7 @@ export const endpoints = {
161160
"top_logprobs",
162161
"verbosity",
163162
],
164-
unsupportedParameters: [
165-
],
163+
unsupportedParameters: [],
166164
ptbEnabled: true,
167165
endpointConfigs: {
168166
"*": {},
@@ -246,7 +244,103 @@ export const endpoints = {
246244
"top_logprobs",
247245
"verbosity",
248246
],
247+
unsupportedParameters: [],
248+
ptbEnabled: true,
249+
endpointConfigs: {
250+
"*": {},
251+
},
252+
},
253+
"gpt-5.2-chat-latest:helicone": {
254+
provider: "helicone",
255+
author: "openai",
256+
providerModelId: "pa/gpt-5.2-chat-latest",
257+
pricing: [
258+
{
259+
threshold: 0,
260+
input: 0.00000175, // $1.75 per 1M tokens
261+
output: 0.000014, // $14.00 per 1M tokens
262+
web_search: 0.01, // $10 per 1000 searches (1:1 USD; 10/1K)
263+
cacheMultipliers: {
264+
cachedInput: 0.1, // $0.175 per 1M tokens
265+
},
266+
},
267+
],
268+
contextLength: 128000,
269+
maxCompletionTokens: 16384,
270+
supportedParameters: ["max_completion_tokens", "stop"],
249271
unsupportedParameters: [
272+
"temperature",
273+
"top_p",
274+
"presence_penalty",
275+
"frequency_penalty",
276+
"logprobs",
277+
"top_logprobs",
278+
"logit_bias",
279+
"max_tokens",
280+
"n",
281+
],
282+
ptbEnabled: true,
283+
endpointConfigs: {
284+
"*": {},
285+
},
286+
},
287+
"gpt-5.2:helicone": {
288+
provider: "helicone",
289+
author: "openai",
290+
providerModelId: "pa/gpt-5.2",
291+
pricing: [
292+
{
293+
threshold: 0,
294+
input: 0.00000175, // $1.75 per 1M tokens
295+
output: 0.000014, // $14.00 per 1M tokens
296+
web_search: 0.01, // $10 per 1000 searches (1:1 USD; 10/1K)
297+
cacheMultipliers: {
298+
cachedInput: 0.1, // $0.175 per 1M tokens
299+
},
300+
},
301+
],
302+
contextLength: 128000,
303+
maxCompletionTokens: 32768,
304+
supportedParameters: ["max_completion_tokens", "stop"],
305+
unsupportedParameters: [
306+
"temperature",
307+
"top_p",
308+
"presence_penalty",
309+
"frequency_penalty",
310+
"logprobs",
311+
"top_logprobs",
312+
"logit_bias",
313+
"max_tokens",
314+
],
315+
ptbEnabled: true,
316+
endpointConfigs: {
317+
"*": {},
318+
},
319+
},
320+
"gpt-5.2-pro:helicone": {
321+
provider: "helicone",
322+
author: "openai",
323+
providerModelId: "pa/gpt-5.2-pro",
324+
pricing: [
325+
{
326+
threshold: 0,
327+
input: 0.000021_00, // $21.00 per 1M tokens
328+
output: 0.000168_00, // $168.00 per 1M tokens
329+
web_search: 0.01, // $10 per 1000 searches (1:1 USD; 10/1K)
330+
},
331+
],
332+
contextLength: 128000,
333+
maxCompletionTokens: 32768,
334+
supportedParameters: ["max_completion_tokens", "stop"],
335+
unsupportedParameters: [
336+
"temperature",
337+
"top_p",
338+
"presence_penalty",
339+
"frequency_penalty",
340+
"logprobs",
341+
"top_logprobs",
342+
"logit_bias",
343+
"max_tokens",
250344
],
251345
ptbEnabled: true,
252346
endpointConfigs: {

packages/cost/models/providers/helicone.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,8 @@ export class HeliconeProvider extends BaseProvider {
2727
const isResponsesEndpoint =
2828
requestParams.bodyMapping === "RESPONSES" ||
2929
endpoint.providerModelId.includes("gpt-5-pro") ||
30-
endpoint.providerModelId.includes("gpt-5-codex");
30+
endpoint.providerModelId.includes("gpt-5-codex") ||
31+
endpoint.providerModelId.includes("gpt-5.2-pro");
3132

3233
const path = isResponsesEndpoint ? "/responses" : "/chat/completions";
3334
return `${this.baseUrl}/openai/v1${path}`;

0 commit comments

Comments
 (0)