Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -373,6 +373,51 @@ describe("OpenAI Chat -> Responses converters", () => {
expect(assistant2.tool_calls?.[0].id).toBe("call_c");
});

it("truncates long tool_call_ids to 40 characters for Azure compatibility", () => {
const longId = "call_1234567890abcdefghij1234567890abcdefghij1234567890"; // 54 chars
const req = {
model: "gpt-4o-mini",
input: [
{ role: "user", content: "Hello" },
{ type: "function_call", call_id: longId, name: "my_func", arguments: "{}" },
{ type: "function_call_output", call_id: longId, output: "result" },
],
} as unknown as ResponsesRequestBody;

const oai = toChatCompletions(req);

// Check assistant message tool_call id is truncated
const assistant = oai.messages?.[1] as any;
expect(assistant.tool_calls?.[0].id.length).toBeLessThanOrEqual(40);

// Check tool response tool_call_id is truncated
const toolResponse = oai.messages?.[2] as any;
expect(toolResponse.tool_call_id.length).toBeLessThanOrEqual(40);

// Both should have the same truncated ID (deterministic)
expect(assistant.tool_calls?.[0].id).toBe(toolResponse.tool_call_id);
});

it("preserves short tool_call_ids unchanged", () => {
const shortId = "call_abc123"; // < 40 chars
const req = {
model: "gpt-4o-mini",
input: [
{ role: "user", content: "Hello" },
{ type: "function_call", call_id: shortId, name: "my_func", arguments: "{}" },
{ type: "function_call_output", call_id: shortId, output: "result" },
],
} as unknown as ResponsesRequestBody;

const oai = toChatCompletions(req);

const assistant = oai.messages?.[1] as any;
expect(assistant.tool_calls?.[0].id).toBe(shortId);

const toolResponse = oai.messages?.[2] as any;
expect(toolResponse.tool_call_id).toBe(shortId);
});

it("maps Responses tools (flattened) to Chat tools (nested)", () => {
const req = {
model: "gpt-4o-mini",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,33 @@ import {
} from "@helicone-package/prompts/types";
import { ResponsesToolDefinition } from "../../../types/responses";

/**
* Azure has a 40 character limit on tool_call_id.
* This function truncates long IDs deterministically so that:
* 1. IDs <= 40 chars are unchanged
* 2. IDs > 40 chars are shortened to prefix + hash suffix
* The same input always produces the same output, ensuring tool_calls
* and their corresponding tool responses match.
*/
const AZURE_TOOL_CALL_ID_LIMIT = 40;

function truncateToolCallId(id: string): string {
if (id.length <= AZURE_TOOL_CALL_ID_LIMIT) {
return id;
}
// Use a simple deterministic hash for the suffix
let hash = 0;
for (let i = 0; i < id.length; i++) {
const char = id.charCodeAt(i);
hash = ((hash << 5) - hash) + char;
hash = hash & hash; // Convert to 32-bit integer
}
const hashStr = Math.abs(hash).toString(36);
// Keep prefix + underscore + hash, ensuring total <= 40 chars
const prefixLength = AZURE_TOOL_CALL_ID_LIMIT - hashStr.length - 1;
return `${id.substring(0, prefixLength)}_${hashStr}`;
}

function mapRole(role: string): "system" | "user" | "assistant" | "tool" | "function" {
if (role === "developer") return "system";
if (role === "system" || role === "user" || role === "assistant") return role;
Expand Down Expand Up @@ -88,7 +115,7 @@ function convertInputToMessages(input: ResponsesRequestBody["input"]) {
>(input, i, "function_call");

const toolCalls = functionCalls.map((fc, idx) => ({
id: fc.id || fc.call_id || `call_${i + idx}`,
id: truncateToolCallId(fc.id || fc.call_id || `call_${i + idx}`),
type: "function" as const,
function: {
name: fc.name,
Expand All @@ -110,7 +137,7 @@ function convertInputToMessages(input: ResponsesRequestBody["input"]) {
const fco = item;
messages.push({
role: "tool",
tool_call_id: fco.call_id,
tool_call_id: truncateToolCallId(fco.call_id),
content: fco.output ?? "",
});
continue;
Expand Down
Loading