Skip to content

Commit fa247bc

Browse files
replicas-connector[bot]claudeH2Shami
authored
feat(worker): allow running prompts without specifying model (#5539)
* feat(worker): allow running prompts without specifying model This change allows users to run a prompt (prompt version) without providing the model in the request. Since the model is stored with the prompt version, it will be automatically fetched when only prompt_id is provided. Changes: - Add getModelFromPrompt method to PromptStore to fetch model from prompt version based on prompt_id, version_id, or environment - Add getModelFromPrompt method to PromptManager with caching (5 min TTL) to reduce database queries for sequential requests - Modify parseAndPrepareRequest in SimpleAIGateway to fetch model from prompt when request has prompt fields but no model specified - Add tests for the new functionality This enables users to make requests like: ```json { "prompt_id": "my-prompt", "inputs": { "name": "John" } } ``` Without having to specify the model separately, as it will be pulled from the stored prompt version. Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com> * add testing for prompts --------- Co-authored-by: replicas-connector[bot] <replicas-connector[bot]@users.noreply.github.com> Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com> Co-authored-by: H2Shami <H2Shami@gmail.com>
1 parent cee99df commit fa247bc

File tree

5 files changed

+2072
-0
lines changed

5 files changed

+2072
-0
lines changed

worker/src/lib/ai-gateway/SimpleAIGateway.ts

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -365,6 +365,15 @@ export class SimpleAIGateway {
365365
const rawBody = await this.requestWrapper.unsafeGetBodyText();
366366
const parsedBody: any = tryJSONParse(rawBody ?? "{}");
367367

368+
// If prompt_id is provided but model is not, fetch the model from the prompt
369+
if (parsedBody && !parsedBody.model && this.hasPromptFields(parsedBody)) {
370+
const modelResult = await this.getModelFromPromptFields(parsedBody);
371+
if (isErr(modelResult)) {
372+
return err(modelResult.error);
373+
}
374+
parsedBody.model = modelResult.data;
375+
}
376+
368377
if (!parsedBody || !parsedBody.model) {
369378
return err(
370379
new Response(
@@ -454,6 +463,59 @@ export class SimpleAIGateway {
454463
);
455464
}
456465

466+
/**
467+
* Fetches the model from a prompt when the request has prompt fields but no model specified.
468+
* This allows users to run prompts without explicitly providing the model in the request,
469+
* as the model will be pulled from the stored prompt version.
470+
*/
471+
private async getModelFromPromptFields(
472+
parsedBody: any
473+
): Promise<Result<string, Response>> {
474+
// Only prompt_id is required to fetch the model
475+
if (!parsedBody.prompt_id) {
476+
return err(
477+
new Response(
478+
JSON.stringify({
479+
error:
480+
"prompt_id is required to fetch model from prompt. Either provide a model or a prompt_id.",
481+
}),
482+
{ status: 400, headers: { "Content-Type": "application/json" } }
483+
)
484+
);
485+
}
486+
487+
const promptManager = new PromptManager(
488+
new HeliconePromptManager({
489+
apiKey: this.apiKey,
490+
baseUrl: this.env.VALHALLA_URL,
491+
}),
492+
new PromptStore(this.supabaseClient),
493+
this.env
494+
);
495+
496+
const modelResult = await promptManager.getModelFromPrompt(
497+
{
498+
prompt_id: parsedBody.prompt_id,
499+
version_id: parsedBody.version_id,
500+
environment: parsedBody.environment,
501+
},
502+
this.orgId
503+
);
504+
505+
if (isErr(modelResult)) {
506+
return err(
507+
new Response(
508+
JSON.stringify({
509+
error: `Failed to fetch model from prompt: ${modelResult.error}`,
510+
}),
511+
{ status: 400, headers: { "Content-Type": "application/json" } }
512+
)
513+
);
514+
}
515+
516+
return ok(modelResult.data);
517+
}
518+
457519
// reasoning_options reserved for providers with custom reasoning logic
458520
private requiresReasoningOptions(providerModelId: string): boolean {
459521
return (

worker/src/lib/db/PromptStore.ts

Lines changed: 118 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,4 +102,122 @@ export class PromptStore {
102102

103103
return data.production_version;
104104
}
105+
106+
/**
107+
* Gets the model from a prompt version based on the prompt params.
108+
* Uses the same resolution logic as getPromptVersionId:
109+
* 1. If environment is specified, use the version for that environment
110+
* 2. If version_id is specified, use that specific version
111+
* 3. Otherwise, use the production version
112+
*/
113+
async getModelFromPrompt(
114+
params: HeliconePromptParams,
115+
orgId: string
116+
): Promise<Result<string, string>> {
117+
const { prompt_id, version_id, environment } = params;
118+
119+
if (!prompt_id) {
120+
return err("No prompt ID provided");
121+
}
122+
123+
// If environment is specified, get model from environment version
124+
if (environment) {
125+
const model = await this.getModelFromEnvironmentVersion(
126+
prompt_id,
127+
environment,
128+
orgId
129+
);
130+
if (model) {
131+
return ok(model);
132+
}
133+
}
134+
135+
// If specific version_id is provided, get model from that version
136+
if (version_id) {
137+
const model = await this.getModelFromVersionById(version_id, orgId);
138+
if (model) {
139+
return ok(model);
140+
}
141+
}
142+
143+
// Fall back to production version
144+
const model = await this.getModelFromProductionVersion(prompt_id, orgId);
145+
if (!model) {
146+
return err("Invalid prompt ID - no valid version found");
147+
}
148+
return ok(model);
149+
}
150+
151+
private async getModelFromEnvironmentVersion(
152+
promptId: string,
153+
environment: string,
154+
orgId: string
155+
): Promise<string | null> {
156+
const { data, error } = await this.supabaseClient
157+
.from("prompts_2025_versions")
158+
.select("model")
159+
.eq("prompt_id", promptId)
160+
.contains("environments", [environment])
161+
.eq("organization", orgId)
162+
.eq("soft_delete", false)
163+
.single();
164+
165+
if (error || !data) {
166+
return null;
167+
}
168+
169+
return data.model;
170+
}
171+
172+
private async getModelFromVersionById(
173+
versionId: string,
174+
orgId: string
175+
): Promise<string | null> {
176+
const { data, error } = await this.supabaseClient
177+
.from("prompts_2025_versions")
178+
.select("model")
179+
.eq("id", versionId)
180+
.eq("organization", orgId)
181+
.eq("soft_delete", false)
182+
.single();
183+
184+
if (error || !data) {
185+
return null;
186+
}
187+
188+
return data.model;
189+
}
190+
191+
private async getModelFromProductionVersion(
192+
promptId: string,
193+
orgId: string
194+
): Promise<string | null> {
195+
// First, get the production version ID from prompts_2025
196+
const { data: promptData, error: promptError } = await this.supabaseClient
197+
.from("prompts_2025")
198+
.select("production_version")
199+
.eq("id", promptId)
200+
.eq("organization", orgId)
201+
.eq("soft_delete", false)
202+
.single();
203+
204+
if (promptError || !promptData || !promptData.production_version) {
205+
return null;
206+
}
207+
208+
// Then get the model from the version
209+
const { data: versionData, error: versionError } = await this.supabaseClient
210+
.from("prompts_2025_versions")
211+
.select("model")
212+
.eq("id", promptData.production_version)
213+
.eq("organization", orgId)
214+
.eq("soft_delete", false)
215+
.single();
216+
217+
if (versionError || !versionData) {
218+
return null;
219+
}
220+
221+
return versionData.model;
222+
}
105223
}

worker/src/lib/managers/PromptManager.ts

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,23 @@ export class PromptManager {
3636
return `prompt_version_${params.prompt_id}_${scope}_${orgId}`;
3737
}
3838

39+
private buildPromptModelCacheKey(
40+
params: HeliconePromptParams,
41+
orgId: string
42+
): string | null {
43+
if (!params.prompt_id) {
44+
return null;
45+
}
46+
47+
const scope = params.environment
48+
? `env:${params.environment}`
49+
: params.version_id
50+
? `version:${params.version_id}`
51+
: "prod";
52+
53+
return `prompt_model_${params.prompt_id}_${scope}_${orgId}`;
54+
}
55+
3956
private buildPromptBodyCacheKey(
4057
promptId: string,
4158
versionId: string,
@@ -64,6 +81,31 @@ export class PromptManager {
6481
);
6582
}
6683

84+
/**
85+
* Gets the model associated with a prompt version, with caching.
86+
* This allows requests to omit the model field when using a prompt_id,
87+
* as the model will be fetched from the stored prompt version.
88+
*/
89+
async getModelFromPrompt(
90+
params: HeliconePromptParams,
91+
orgId: string
92+
): Promise<Result<string, string>> {
93+
const cacheKey = this.buildPromptModelCacheKey(params, orgId);
94+
if (!cacheKey) {
95+
return this.promptStore.getModelFromPrompt(params, orgId);
96+
}
97+
98+
return await getAndStoreInCache(
99+
cacheKey,
100+
this.env,
101+
async () => {
102+
return this.promptStore.getModelFromPrompt(params, orgId);
103+
},
104+
300,
105+
false
106+
);
107+
}
108+
67109
async getSourcePromptBodyWithFetch(
68110
params: HeliconePromptParams,
69111
orgId: string

0 commit comments

Comments
 (0)