Skip to content

Commit 892fa0e

Browse files
fix: handle Responses API text.format parameter correctly
The Responses API uses text.format for structured output configuration, not response_format (which is used in Chat Completions API). This fix: - Removes response_format from ResponsesRequestBody and OpenAIResponseRequest - Adds proper text.format type with json_schema support - Updates toChatCompletions to convert text.format to response_format - Adds tests for json_schema, json_object, and text format types Fixes the error: "Unsupported parameter: 'response_format'. In the Responses API, this parameter has moved to 'text.format'." Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
1 parent cee99df commit 892fa0e

File tree

4 files changed

+157
-3
lines changed

4 files changed

+157
-3
lines changed

packages/__tests__/llm-mapper/openai-chat-to-responses-converters.test.ts

Lines changed: 116 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -351,6 +351,122 @@ describe("OpenAI Chat -> Responses converters", () => {
351351
expect(textPart).toMatchObject({ type: "text", text: "Hi there" });
352352
}
353353
});
354+
355+
it("maps text.format json_schema to response_format", () => {
356+
const req: ResponsesRequestBody = {
357+
model: "gpt-4o-mini",
358+
input: "Generate a JSON object with name and age",
359+
text: {
360+
format: {
361+
type: "json_schema",
362+
json_schema: {
363+
name: "person",
364+
description: "A person object",
365+
schema: {
366+
type: "object",
367+
properties: {
368+
name: { type: "string" },
369+
age: { type: "number" },
370+
},
371+
required: ["name", "age"],
372+
},
373+
strict: true,
374+
},
375+
},
376+
},
377+
};
378+
const oai = toChatCompletions(req);
379+
expect(oai.response_format).toBeDefined();
380+
expect(oai.response_format?.type).toBe("json_schema");
381+
expect((oai.response_format as any)?.json_schema).toEqual({
382+
name: "person",
383+
description: "A person object",
384+
schema: {
385+
type: "object",
386+
properties: {
387+
name: { type: "string" },
388+
age: { type: "number" },
389+
},
390+
required: ["name", "age"],
391+
},
392+
strict: true,
393+
});
394+
});
395+
396+
it("maps text.format json_object to response_format", () => {
397+
const req: ResponsesRequestBody = {
398+
model: "gpt-4o-mini",
399+
input: "Generate a JSON response",
400+
text: {
401+
format: {
402+
type: "json_object",
403+
},
404+
},
405+
};
406+
const oai = toChatCompletions(req);
407+
expect(oai.response_format).toBeDefined();
408+
expect(oai.response_format?.type).toBe("json_object");
409+
expect((oai.response_format as any)?.json_schema).toBeUndefined();
410+
});
411+
412+
it("maps text.format text type to response_format", () => {
413+
const req: ResponsesRequestBody = {
414+
model: "gpt-4o-mini",
415+
input: "Hello",
416+
text: {
417+
format: {
418+
type: "text",
419+
},
420+
},
421+
};
422+
const oai = toChatCompletions(req);
423+
expect(oai.response_format).toBeDefined();
424+
expect(oai.response_format?.type).toBe("text");
425+
});
426+
427+
it("does not set response_format when text.format is not provided", () => {
428+
const req: ResponsesRequestBody = {
429+
model: "gpt-4o-mini",
430+
input: "Hello",
431+
text: {
432+
verbosity: "high",
433+
},
434+
};
435+
const oai = toChatCompletions(req);
436+
expect(oai.response_format).toBeUndefined();
437+
});
438+
439+
it("does not set response_format when text is not provided", () => {
440+
const req: ResponsesRequestBody = {
441+
model: "gpt-4o-mini",
442+
input: "Hello",
443+
};
444+
const oai = toChatCompletions(req);
445+
expect(oai.response_format).toBeUndefined();
446+
});
447+
448+
it("preserves text.verbosity separately from text.format", () => {
449+
const req: ResponsesRequestBody = {
450+
model: "gpt-4o-mini",
451+
input: "Generate JSON",
452+
text: {
453+
format: {
454+
type: "json_schema",
455+
json_schema: {
456+
name: "test",
457+
schema: { type: "object" },
458+
},
459+
},
460+
verbosity: "medium",
461+
},
462+
};
463+
const oai = toChatCompletions(req);
464+
// response_format should be set from text.format
465+
expect(oai.response_format).toBeDefined();
466+
expect(oai.response_format?.type).toBe("json_schema");
467+
// Note: verbosity is not directly mapped to Chat Completions API
468+
// but the response_format should still work correctly
469+
});
354470
});
355471

356472
describe("fromChatCompletions (request mapping)", () => {

packages/llm-mapper/mappers/openai/responses.ts

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,20 @@ interface OpenAIResponseRequest {
4545
reasoning?: {
4646
effort?: "low" | "medium" | "high" | "minimal";
4747
};
48+
/**
49+
* Text output configuration including format for structured output.
50+
* In the Responses API, `text.format` replaces `response_format` from Chat Completions.
51+
*/
4852
text?: {
53+
format?: {
54+
type: "text" | "json_schema" | "json_object";
55+
json_schema?: {
56+
name: string;
57+
description?: string;
58+
schema: Record<string, any>;
59+
strict?: boolean;
60+
};
61+
};
4962
verbosity?: "low" | "medium" | "high";
5063
};
5164
store?: boolean;
@@ -73,7 +86,6 @@ interface OpenAIResponseRequest {
7386
logprobs?: boolean;
7487
top_logprobs?: number;
7588
n?: number;
76-
response_format?: { type: string; json_schema?: any };
7789
seed?: number;
7890
service_tier?: string;
7991
stream_options?: any;

packages/llm-mapper/transform/providers/responses/request/toChatCompletions.ts

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -234,7 +234,15 @@ export function toChatCompletions(
234234
logit_bias: body.logit_bias,
235235
logprobs: body.logprobs,
236236
top_logprobs: body.top_logprobs,
237-
response_format: body.response_format,
237+
// Convert Responses API text.format to Chat Completions response_format
238+
response_format: body.text?.format
239+
? {
240+
type: body.text.format.type,
241+
...(body.text.format.json_schema && {
242+
json_schema: body.text.format.json_schema,
243+
}),
244+
}
245+
: undefined,
238246
seed: body.seed,
239247
user: body.user,
240248
service_tier: body.service_tier,

packages/llm-mapper/transform/types/responses.ts

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -120,6 +120,20 @@ export interface ResponsesImageGenerationConfig {
120120
image_size: string; // e.g "2K"
121121
}
122122

123+
/**
124+
* Text format configuration for structured output in the Responses API.
125+
* Maps to response_format in Chat Completions API.
126+
*/
127+
export interface ResponsesTextFormat {
128+
type: "text" | "json_schema" | "json_object";
129+
json_schema?: {
130+
name: string;
131+
description?: string;
132+
schema: Record<string, any>;
133+
strict?: boolean;
134+
};
135+
}
136+
123137
export interface ResponsesRequestBody {
124138
model: string;
125139
input: string | ResponsesInputItem[];
@@ -136,7 +150,12 @@ export interface ResponsesRequestBody {
136150
reasoning_options?: {
137151
budget_tokens?: number;
138152
};
153+
/**
154+
* Text output configuration including format for structured output.
155+
* In the Responses API, `text.format` replaces `response_format` from Chat Completions.
156+
*/
139157
text?: {
158+
format?: ResponsesTextFormat;
140159
verbosity?: "low" | "medium" | "high";
141160
};
142161
store?: boolean;
@@ -154,7 +173,6 @@ export interface ResponsesRequestBody {
154173
logprobs?: boolean;
155174
top_logprobs?: number;
156175
n?: number;
157-
response_format?: { type: string; json_schema?: any };
158176
seed?: number;
159177
service_tier?: string;
160178
stream_options?: any;

0 commit comments

Comments
 (0)