Skip to content

Commit 6916e19

Browse files
authored
Fix Responses API parallel tool calls and add store parameter (#5571)
1 parent 33d945e commit 6916e19

File tree

3 files changed

+113
-13
lines changed

3 files changed

+113
-13
lines changed

packages/__tests__/llm-mapper/openai-chat-to-responses-converters.test.ts

Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -297,6 +297,82 @@ describe("OpenAI Chat -> Responses converters", () => {
297297
expect(tool.content).toBe("4");
298298
});
299299

300+
it("groups parallel function_call items into a single assistant message with multiple tool_calls", () => {
301+
const req: ResponsesRequestBody = {
302+
model: "gpt-4o-mini",
303+
input: [
304+
{ type: "message", role: "user", content: "Search for healthcare and finance projects" },
305+
{ type: "function_call", call_id: "fc_call1", name: "search_projects", arguments: "{\"queries\": [\"healthcare\"]}" },
306+
{ type: "function_call", call_id: "fc_call2", name: "search_projects", arguments: "{\"queries\": [\"finance\"]}" },
307+
{ type: "function_call_output", call_id: "fc_call1", output: "{\"projects\": [{\"name\": \"Healthcare Strategy\"}]}" },
308+
{ type: "function_call_output", call_id: "fc_call2", output: "{\"projects\": [{\"name\": \"Finance Overview\"}]}" },
309+
],
310+
};
311+
const oai = toChatCompletions(req);
312+
313+
// Should have: user message, ONE assistant message with 2 tool_calls, 2 tool messages
314+
expect(oai.messages?.length).toBe(4);
315+
316+
// First message: user
317+
expect(oai.messages?.[0]).toMatchObject({ role: "user", content: "Search for healthcare and finance projects" });
318+
319+
// Second message: assistant with BOTH tool_calls in a single message
320+
const assistant = oai.messages?.[1] as any;
321+
expect(assistant.role).toBe("assistant");
322+
expect(assistant.tool_calls?.length).toBe(2);
323+
expect(assistant.tool_calls?.[0]).toMatchObject({
324+
id: "fc_call1",
325+
type: "function",
326+
function: { name: "search_projects", arguments: "{\"queries\": [\"healthcare\"]}" }
327+
});
328+
expect(assistant.tool_calls?.[1]).toMatchObject({
329+
id: "fc_call2",
330+
type: "function",
331+
function: { name: "search_projects", arguments: "{\"queries\": [\"finance\"]}" }
332+
});
333+
334+
// Third and fourth messages: tool responses
335+
const tool1 = oai.messages?.[2] as any;
336+
expect(tool1.role).toBe("tool");
337+
expect(tool1.tool_call_id).toBe("fc_call1");
338+
339+
const tool2 = oai.messages?.[3] as any;
340+
expect(tool2.role).toBe("tool");
341+
expect(tool2.tool_call_id).toBe("fc_call2");
342+
});
343+
344+
it("handles multiple separate tool call rounds correctly", () => {
345+
const req: ResponsesRequestBody = {
346+
model: "gpt-4o-mini",
347+
input: [
348+
{ type: "message", role: "user", content: "Do two things" },
349+
// First round: 2 parallel calls
350+
{ type: "function_call", call_id: "call_a", name: "func_a", arguments: "{}" },
351+
{ type: "function_call", call_id: "call_b", name: "func_b", arguments: "{}" },
352+
{ type: "function_call_output", call_id: "call_a", output: "result_a" },
353+
{ type: "function_call_output", call_id: "call_b", output: "result_b" },
354+
// Second round: 1 call
355+
{ type: "function_call", call_id: "call_c", name: "func_c", arguments: "{}" },
356+
{ type: "function_call_output", call_id: "call_c", output: "result_c" },
357+
],
358+
};
359+
const oai = toChatCompletions(req);
360+
361+
// Should have: user, assistant (2 calls), tool, tool, assistant (1 call), tool
362+
expect(oai.messages?.length).toBe(6);
363+
364+
// First assistant message should have 2 tool_calls
365+
const assistant1 = oai.messages?.[1] as any;
366+
expect(assistant1.role).toBe("assistant");
367+
expect(assistant1.tool_calls?.length).toBe(2);
368+
369+
// Second assistant message should have 1 tool_call
370+
const assistant2 = oai.messages?.[4] as any;
371+
expect(assistant2.role).toBe("assistant");
372+
expect(assistant2.tool_calls?.length).toBe(1);
373+
expect(assistant2.tool_calls?.[0].id).toBe("call_c");
374+
});
375+
300376
it("maps Responses tools (flattened) to Chat tools (nested)", () => {
301377
const req = {
302378
model: "gpt-4o-mini",

packages/llm-mapper/transform/providers/responses/request/toChatCompletions.ts

Lines changed: 36 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -52,9 +52,26 @@ function convertContentParts(
5252
});
5353
}
5454

55+
/**
56+
* Collects consecutive items of a specific type from the input array.
57+
* Returns the collected items and the index after the last collected item.
58+
*/
59+
function collectConsecutiveByType<T extends ResponsesInputItem>(
60+
input: ResponsesInputItem[],
61+
startIndex: number,
62+
type: string
63+
): { items: T[]; endIndex: number } {
64+
const items: T[] = [];
65+
let index = startIndex;
66+
while (index < input.length && input[index].type === type) {
67+
items.push(input[index] as T);
68+
index++;
69+
}
70+
return { items, endIndex: index };
71+
}
72+
5573
function convertInputToMessages(input: ResponsesRequestBody["input"]) {
5674
const messages: NonNullable<HeliconeChatCreateParams["messages"]> = [];
57-
// emit an assistant message for each function_call item to simplify typing
5875
if (typeof input === "string") {
5976
messages.push({ role: "user", content: input });
6077
return messages;
@@ -63,22 +80,29 @@ function convertInputToMessages(input: ResponsesRequestBody["input"]) {
6380
for (let i = 0; i < input.length; i++) {
6481
const item: ResponsesInputItem = input[i];
6582

83+
// Handle function_call: group consecutive function_call items into a single assistant message
84+
// with multiple tool_calls. This is required by Chat Completions format for parallel tool calls.
6685
if (item.type === "function_call") {
67-
const fc = item;
86+
const { items: functionCalls, endIndex } = collectConsecutiveByType<
87+
Extract<ResponsesInputItem, { type: "function_call" }>
88+
>(input, i, "function_call");
89+
90+
const toolCalls = functionCalls.map((fc, idx) => ({
91+
id: fc.id || fc.call_id || `call_${i + idx}`,
92+
type: "function" as const,
93+
function: {
94+
name: fc.name,
95+
arguments: fc.arguments ?? "{}",
96+
},
97+
}));
98+
6899
messages.push({
69100
role: "assistant",
70101
content: "",
71-
tool_calls: [
72-
{
73-
id: fc.id || fc.call_id || `call_${i}`,
74-
type: "function",
75-
function: {
76-
name: fc.name,
77-
arguments: fc.arguments ?? "{}",
78-
},
79-
},
80-
],
102+
tool_calls: toolCalls,
81103
});
104+
105+
i = endIndex - 1;
82106
continue;
83107
}
84108

worker/src/lib/ai-gateway/validators/responses-types.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -686,7 +686,7 @@ const CreateResponse = CreateModelResponseProperties.merge(ResponseProperties)
686686
input: InputParam,
687687
include: z.union([z.array(IncludeEnum), z.null()]),
688688
parallel_tool_calls: z.union([z.boolean(), z.null()]),
689-
// store was removed
689+
store: z.boolean().optional(),
690690
instructions: z.union([z.string(), z.null()]),
691691
stream: z.union([z.boolean(), z.null()]),
692692
stream_options: ResponseStreamOptions,

0 commit comments

Comments
 (0)