Skip to content

Commit 4d0e32c

Browse files
feat(worker): allow running prompts without specifying model
This change allows users to run a prompt (prompt version) without providing the model in the request. Since the model is stored with the prompt version, it will be automatically fetched when only prompt_id is provided. Changes: - Add getModelFromPrompt method to PromptStore to fetch model from prompt version based on prompt_id, version_id, or environment - Add getModelFromPrompt method to PromptManager with caching (5 min TTL) to reduce database queries for sequential requests - Modify parseAndPrepareRequest in SimpleAIGateway to fetch model from prompt when request has prompt fields but no model specified - Add tests for the new functionality This enables users to make requests like: ```json { "prompt_id": "my-prompt", "inputs": { "name": "John" } } ``` Without having to specify the model separately, as it will be pulled from the stored prompt version. Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
1 parent 1ab78af commit 4d0e32c

File tree

4 files changed

+395
-0
lines changed

4 files changed

+395
-0
lines changed

worker/src/lib/ai-gateway/SimpleAIGateway.ts

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -364,6 +364,15 @@ export class SimpleAIGateway {
364364
const rawBody = await this.requestWrapper.unsafeGetBodyText();
365365
const parsedBody: any = tryJSONParse(rawBody ?? "{}");
366366

367+
// If prompt_id is provided but model is not, fetch the model from the prompt
368+
if (parsedBody && !parsedBody.model && this.hasPromptFields(parsedBody)) {
369+
const modelResult = await this.getModelFromPromptFields(parsedBody);
370+
if (isErr(modelResult)) {
371+
return err(modelResult.error);
372+
}
373+
parsedBody.model = modelResult.data;
374+
}
375+
367376
if (!parsedBody || !parsedBody.model) {
368377
return err(
369378
new Response(
@@ -453,6 +462,59 @@ export class SimpleAIGateway {
453462
);
454463
}
455464

465+
/**
466+
* Fetches the model from a prompt when the request has prompt fields but no model specified.
467+
* This allows users to run prompts without explicitly providing the model in the request,
468+
* as the model will be pulled from the stored prompt version.
469+
*/
470+
private async getModelFromPromptFields(
471+
parsedBody: any
472+
): Promise<Result<string, Response>> {
473+
// Only prompt_id is required to fetch the model
474+
if (!parsedBody.prompt_id) {
475+
return err(
476+
new Response(
477+
JSON.stringify({
478+
error:
479+
"prompt_id is required to fetch model from prompt. Either provide a model or a prompt_id.",
480+
}),
481+
{ status: 400, headers: { "Content-Type": "application/json" } }
482+
)
483+
);
484+
}
485+
486+
const promptManager = new PromptManager(
487+
new HeliconePromptManager({
488+
apiKey: this.apiKey,
489+
baseUrl: this.env.VALHALLA_URL,
490+
}),
491+
new PromptStore(this.supabaseClient),
492+
this.env
493+
);
494+
495+
const modelResult = await promptManager.getModelFromPrompt(
496+
{
497+
prompt_id: parsedBody.prompt_id,
498+
version_id: parsedBody.version_id,
499+
environment: parsedBody.environment,
500+
},
501+
this.orgId
502+
);
503+
504+
if (isErr(modelResult)) {
505+
return err(
506+
new Response(
507+
JSON.stringify({
508+
error: `Failed to fetch model from prompt: ${modelResult.error}`,
509+
}),
510+
{ status: 400, headers: { "Content-Type": "application/json" } }
511+
)
512+
);
513+
}
514+
515+
return ok(modelResult.data);
516+
}
517+
456518
// reasoning_options reserved for providers with custom reasoning logic
457519
private requiresReasoningOptions(providerModelId: string): boolean {
458520
return (

worker/src/lib/db/PromptStore.ts

Lines changed: 118 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,4 +102,122 @@ export class PromptStore {
102102

103103
return data.production_version;
104104
}
105+
106+
/**
107+
* Gets the model from a prompt version based on the prompt params.
108+
* Uses the same resolution logic as getPromptVersionId:
109+
* 1. If environment is specified, use the version for that environment
110+
* 2. If version_id is specified, use that specific version
111+
* 3. Otherwise, use the production version
112+
*/
113+
async getModelFromPrompt(
114+
params: HeliconePromptParams,
115+
orgId: string
116+
): Promise<Result<string, string>> {
117+
const { prompt_id, version_id, environment } = params;
118+
119+
if (!prompt_id) {
120+
return err("No prompt ID provided");
121+
}
122+
123+
// If environment is specified, get model from environment version
124+
if (environment) {
125+
const model = await this.getModelFromEnvironmentVersion(
126+
prompt_id,
127+
environment,
128+
orgId
129+
);
130+
if (model) {
131+
return ok(model);
132+
}
133+
}
134+
135+
// If specific version_id is provided, get model from that version
136+
if (version_id) {
137+
const model = await this.getModelFromVersionById(version_id, orgId);
138+
if (model) {
139+
return ok(model);
140+
}
141+
}
142+
143+
// Fall back to production version
144+
const model = await this.getModelFromProductionVersion(prompt_id, orgId);
145+
if (!model) {
146+
return err("Invalid prompt ID - no valid version found");
147+
}
148+
return ok(model);
149+
}
150+
151+
private async getModelFromEnvironmentVersion(
152+
promptId: string,
153+
environment: string,
154+
orgId: string
155+
): Promise<string | null> {
156+
const { data, error } = await this.supabaseClient
157+
.from("prompts_2025_versions")
158+
.select("model")
159+
.eq("prompt_id", promptId)
160+
.contains("environments", [environment])
161+
.eq("organization", orgId)
162+
.eq("soft_delete", false)
163+
.single();
164+
165+
if (error || !data) {
166+
return null;
167+
}
168+
169+
return data.model;
170+
}
171+
172+
private async getModelFromVersionById(
173+
versionId: string,
174+
orgId: string
175+
): Promise<string | null> {
176+
const { data, error } = await this.supabaseClient
177+
.from("prompts_2025_versions")
178+
.select("model")
179+
.eq("id", versionId)
180+
.eq("organization", orgId)
181+
.eq("soft_delete", false)
182+
.single();
183+
184+
if (error || !data) {
185+
return null;
186+
}
187+
188+
return data.model;
189+
}
190+
191+
private async getModelFromProductionVersion(
192+
promptId: string,
193+
orgId: string
194+
): Promise<string | null> {
195+
// First, get the production version ID from prompts_2025
196+
const { data: promptData, error: promptError } = await this.supabaseClient
197+
.from("prompts_2025")
198+
.select("production_version")
199+
.eq("id", promptId)
200+
.eq("organization", orgId)
201+
.eq("soft_delete", false)
202+
.single();
203+
204+
if (promptError || !promptData || !promptData.production_version) {
205+
return null;
206+
}
207+
208+
// Then get the model from the version
209+
const { data: versionData, error: versionError } = await this.supabaseClient
210+
.from("prompts_2025_versions")
211+
.select("model")
212+
.eq("id", promptData.production_version)
213+
.eq("organization", orgId)
214+
.eq("soft_delete", false)
215+
.single();
216+
217+
if (versionError || !versionData) {
218+
return null;
219+
}
220+
221+
return versionData.model;
222+
}
105223
}

worker/src/lib/managers/PromptManager.ts

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,23 @@ export class PromptManager {
3636
return `prompt_version_${params.prompt_id}_${scope}_${orgId}`;
3737
}
3838

39+
private buildPromptModelCacheKey(
40+
params: HeliconePromptParams,
41+
orgId: string
42+
): string | null {
43+
if (!params.prompt_id) {
44+
return null;
45+
}
46+
47+
const scope = params.environment
48+
? `env:${params.environment}`
49+
: params.version_id
50+
? `version:${params.version_id}`
51+
: "prod";
52+
53+
return `prompt_model_${params.prompt_id}_${scope}_${orgId}`;
54+
}
55+
3956
private buildPromptBodyCacheKey(
4057
promptId: string,
4158
versionId: string,
@@ -64,6 +81,31 @@ export class PromptManager {
6481
);
6582
}
6683

84+
/**
85+
* Gets the model associated with a prompt version, with caching.
86+
* This allows requests to omit the model field when using a prompt_id,
87+
* as the model will be fetched from the stored prompt version.
88+
*/
89+
async getModelFromPrompt(
90+
params: HeliconePromptParams,
91+
orgId: string
92+
): Promise<Result<string, string>> {
93+
const cacheKey = this.buildPromptModelCacheKey(params, orgId);
94+
if (!cacheKey) {
95+
return this.promptStore.getModelFromPrompt(params, orgId);
96+
}
97+
98+
return await getAndStoreInCache(
99+
cacheKey,
100+
this.env,
101+
async () => {
102+
return this.promptStore.getModelFromPrompt(params, orgId);
103+
},
104+
300,
105+
false
106+
);
107+
}
108+
67109
async getSourcePromptBodyWithFetch(
68110
params: HeliconePromptParams,
69111
orgId: string

0 commit comments

Comments
 (0)