Skip to content

Commit 8850067

Browse files
authored
fix: cached tokens in vercel (#424)
* feat: cached tokens * fix: prettier + logs * fix: vercel types * fix preittier * fix: posthog caching typo + vercel mapping * fix: missed one for stremaing * fix: screwed up the package -oops * left merge conflict in changelog
1 parent 6c2cc93 commit 8850067

File tree

3 files changed

+27
-5
lines changed

3 files changed

+27
-5
lines changed

posthog-ai/CHANGELOG.md

+4
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,7 @@
1+
# 3.3.1 - 2025-03-13
2+
3+
- fix: fix vercel output mapping and token caching
4+
15
# 3.3.0 - 2025-03-08
26

37
- feat: add reasoning and cache tokens to openai and anthropic

posthog-ai/package.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "@posthog/ai",
3-
"version": "3.3.0",
3+
"version": "3.3.1",
44
"description": "PostHog Node.js AI integrations",
55
"repository": {
66
"type": "git",

posthog-ai/src/vercel/middleware.ts

+22-4
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,24 @@ const mapVercelPrompt = (prompt: LanguageModelV1Prompt): PostHogInput[] => {
112112
})
113113
}
114114

115+
const mapVercelOutput = (result: any): PostHogInput[] => {
116+
let output = {
117+
...(result.text ? { text: result.text } : {}),
118+
...(result.object ? { object: result.object } : {}),
119+
...(result.reasoning ? { reasoning: result.reasoning } : {}),
120+
...(result.response ? { response: result.response } : {}),
121+
...(result.finishReason ? { finishReason: result.finishReason } : {}),
122+
...(result.usage ? { usage: result.usage } : {}),
123+
...(result.warnings ? { warnings: result.warnings } : {}),
124+
...(result.providerMetadata ? { toolCalls: result.providerMetadata } : {}),
125+
}
126+
// if text and no object or reasoning, return text
127+
if (output.text && !output.object && !output.reasoning) {
128+
return [{ content: output.text, role: 'assistant' }]
129+
}
130+
return [{ content: JSON.stringify(output), role: 'assistant' }]
131+
}
132+
115133
const extractProvider = (model: LanguageModelV1): string => {
116134
// vercel provider is in the format of provider.endpoint
117135
const provider = model.provider.toLowerCase()
@@ -138,14 +156,14 @@ export const createInstrumentationMiddleware = (
138156
options.posthogModelOverride ?? (result.response?.modelId ? result.response.modelId : model.modelId)
139157
const provider = options.posthogProviderOverride ?? extractProvider(model)
140158
const baseURL = '' // cannot currently get baseURL from vercel
141-
let content = result.text || JSON.stringify(result)
159+
let content = mapVercelOutput(result)
142160
// let tools = result.toolCalls
143161
let providerMetadata = result.providerMetadata
144162
let additionalTokenValues = {
145163
...(providerMetadata?.openai?.reasoningTokens
146164
? { reasoningTokens: providerMetadata.openai.reasoningTokens }
147165
: {}),
148-
...(providerMetadata?.openai?.cachedPromptToken
166+
...(providerMetadata?.openai?.cachedPromptTokens
149167
? { cacheReadInputTokens: providerMetadata.openai.cachedPromptTokens }
150168
: {}),
151169
...(providerMetadata?.anthropic
@@ -233,8 +251,8 @@ export const createInstrumentationMiddleware = (
233251
if (chunk.providerMetadata?.openai?.reasoningTokens) {
234252
usage.reasoningTokens = chunk.providerMetadata.openai.reasoningTokens
235253
}
236-
if (chunk.providerMetadata?.openai?.cachedPromptToken) {
237-
usage.cacheReadInputTokens = chunk.providerMetadata.openai.cachedPromptToken
254+
if (chunk.providerMetadata?.openai?.cachedPromptTokens) {
255+
usage.cacheReadInputTokens = chunk.providerMetadata.openai.cachedPromptTokens
238256
}
239257
if (chunk.providerMetadata?.anthropic?.cacheReadInputTokens) {
240258
usage.cacheReadInputTokens = chunk.providerMetadata.anthropic.cacheReadInputTokens

0 commit comments

Comments
 (0)