Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 58 additions & 0 deletions apps/backend/app/database.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,11 @@ def improvements(self) -> Table:
"""Improvement results table."""
Copy link
Copy Markdown
Contributor

@cubic-dev-ai cubic-dev-ai bot Feb 9, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2: reset_database doesn’t clear the newly added prompt_templates table, so database resets leave custom prompts behind and can contaminate test/environment state.

Prompt for AI agents
Check if this issue is valid — if so, understand the root cause and fix it. At apps/backend/app/database.py, line 51:

<comment>`reset_database` doesn’t clear the newly added `prompt_templates` table, so database resets leave custom prompts behind and can contaminate test/environment state.</comment>

<file context>
@@ -47,6 +47,11 @@ def improvements(self) -> Table:
         return self.db.table("improvements")
 
+    @property
+    def prompt_templates(self) -> Table:
+        """Custom prompt templates table."""
+        return self.db.table("prompt_templates")
</file context>
Fix with Cubic

return self.db.table("improvements")

@property
def prompt_templates(self) -> Table:
"""Custom prompt templates table."""
return self.db.table("prompt_templates")

def close(self) -> None:
"""Close database connection."""
if self._db is not None:
Expand Down Expand Up @@ -172,6 +177,59 @@ def list_resumes(self) -> list[dict[str, Any]]:
"""List all resumes."""
return list(self.resumes.all())

# Prompt template operations
def list_prompt_templates(self) -> list[dict[str, Any]]:
"""List all custom prompt templates."""
return list(self.prompt_templates.all())

def get_prompt_template(self, prompt_id: str) -> dict[str, Any] | None:
"""Get a custom prompt template by ID."""
Prompt = Query()
result = self.prompt_templates.search(Prompt.prompt_id == prompt_id)
return result[0] if result else None

def create_prompt_template(
self, label: str, description: str, prompt: str
) -> dict[str, Any]:
"""Create a new custom prompt template."""
prompt_id = str(uuid4())
now = datetime.now(timezone.utc).isoformat()

doc = {
"prompt_id": prompt_id,
"label": label,
"description": description,
"prompt": prompt,
"created_at": now,
"updated_at": now,
}

self.prompt_templates.insert(doc)
return doc

def update_prompt_template(
self, prompt_id: str, updates: dict[str, Any]
) -> dict[str, Any]:
"""Update a custom prompt template by ID."""
Prompt = Query()
updates["updated_at"] = datetime.now(timezone.utc).isoformat()
updated_count = self.prompt_templates.update(updates, Prompt.prompt_id == prompt_id)

if not updated_count:
raise ValueError(f"Prompt template not found: {prompt_id}")

result = self.get_prompt_template(prompt_id)
if not result:
raise ValueError(f"Prompt template disappeared after update: {prompt_id}")

return result

def delete_prompt_template(self, prompt_id: str) -> bool:
"""Delete a custom prompt template by ID."""
Prompt = Query()
removed = self.prompt_templates.remove(Prompt.prompt_id == prompt_id)
return len(removed) > 0

def set_master_resume(self, resume_id: str) -> bool:
"""Set a resume as the master, unsetting any existing master.

Expand Down
9 changes: 5 additions & 4 deletions apps/backend/app/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,13 +293,14 @@ def _supports_temperature(provider: str, model: str) -> bool:
def _get_reasoning_effort(provider: str, model: str) -> str | None:
"""Return a default reasoning_effort for models that require it.

Some OpenAI gpt-5 models may return empty message.content unless a supported
`reasoning_effort` is explicitly set. This keeps downstream JSON parsing reliable.
Some OpenAI reasoning models (o1, o3, gpt-5) require a supported
`reasoning_effort` (low, medium, high) to work correctly.
"""
_ = provider
model_lower = model.lower()
if "gpt-5" in model_lower:
return "minimal"
# Handle GPT-5 (future-proofing) and o-series (o1, o3)
if any(m in model_lower for m in ["gpt-5", "o1", "o3"]):
return "medium"
return None


Expand Down
1 change: 1 addition & 0 deletions apps/backend/app/prompts/templates.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ def get_language_name(code: str) -> str:
"company": "Tech Corp",
"location": "San Francisco, CA",
"years": "2020 - Present",
"jobDescription": "Led platform modernization initiatives across customer-facing systems.",
"description": [
"Led development of microservices architecture",
"Improved system performance by 40%"
Expand Down
180 changes: 178 additions & 2 deletions apps/backend/app/routers/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import json
import logging
import string
from pathlib import Path

from fastapi import APIRouter, BackgroundTasks, HTTPException
Expand All @@ -18,13 +19,23 @@
PromptConfigRequest,
PromptConfigResponse,
PromptOption,
PromptTemplate,
PromptTemplateCreateRequest,
PromptTemplateDeleteResponse,
PromptTemplateListResponse,
PromptTemplateResponse,
PromptTemplateUpdateRequest,
ApiKeyProviderStatus,
ApiKeyStatusResponse,
ApiKeysUpdateRequest,
ApiKeysUpdateResponse,
ResetDatabaseRequest,
)
from app.prompts import DEFAULT_IMPROVE_PROMPT_ID, IMPROVE_PROMPT_OPTIONS
from app.prompts import (
DEFAULT_IMPROVE_PROMPT_ID,
IMPROVE_PROMPT_OPTIONS,
IMPROVE_RESUME_PROMPTS,
)
from app.config import (
get_api_keys_from_config,
save_api_keys_to_config,
Expand Down Expand Up @@ -67,7 +78,61 @@ def _mask_api_key(key: str) -> str:

def _get_prompt_options() -> list[PromptOption]:
"""Return available prompt options for resume tailoring."""
return [PromptOption(**option) for option in IMPROVE_PROMPT_OPTIONS]
options = [PromptOption(**option) for option in IMPROVE_PROMPT_OPTIONS]
for prompt in db.list_prompt_templates():
options.append(
PromptOption(
id=prompt["prompt_id"],
label=prompt.get("label", ""),
description=prompt.get("description", ""),
)
)
return options


def _get_builtin_prompt_templates() -> list[PromptTemplate]:
templates: list[PromptTemplate] = []
for option in IMPROVE_PROMPT_OPTIONS:
prompt_id = option["id"]
templates.append(
PromptTemplate(
id=prompt_id,
label=option["label"],
description=option["description"],
prompt=IMPROVE_RESUME_PROMPTS[prompt_id],
is_builtin=True,
created_at=None,
updated_at=None,
)
)
return templates


_ALLOWED_PROMPT_FIELDS = {
"job_description",
"job_keywords",
"original_resume",
"schema",
"output_language",
"critical_truthfulness_rules",
}


def _validate_prompt_template(prompt: str) -> None:
fields = {
field_name
for _, field_name, _, _ in string.Formatter().parse(prompt)
if field_name
}
Comment on lines +122 to +126
Copy link
Copy Markdown
Contributor

@cubic-dev-ai cubic-dev-ai bot Feb 9, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2: string.Formatter().parse(prompt) can raise ValueError for malformed format strings (e.g., unescaped {/}), and this exception is not caught. User-provided prompts with invalid braces will cause a 500 error instead of a 400 validation response.

Prompt for AI agents
Check if this issue is valid — if so, understand the root cause and fix it. At apps/backend/app/routers/config.py, line 122:

<comment>`string.Formatter().parse(prompt)` can raise `ValueError` for malformed format strings (e.g., unescaped `{`/`}`), and this exception is not caught. User-provided prompts with invalid braces will cause a 500 error instead of a 400 validation response.</comment>

<file context>
@@ -67,7 +78,61 @@ def _mask_api_key(key: str) -> str:
+
+
+def _validate_prompt_template(prompt: str) -> None:
+    fields = {
+        field_name
+        for _, field_name, _, _ in string.Formatter().parse(prompt)
</file context>
Suggested change
fields = {
field_name
for _, field_name, _, _ in string.Formatter().parse(prompt)
if field_name
}
try:
fields = {
field_name
for _, field_name, _, _ in string.Formatter().parse(prompt)
if field_name
}
except ValueError as exc:
raise HTTPException(status_code=400, detail="Invalid prompt format string.") from exc
Fix with Cubic

unknown = {field for field in fields if field not in _ALLOWED_PROMPT_FIELDS}
if unknown:
raise HTTPException(
status_code=400,
detail=(
"Unsupported prompt placeholders: "
f"{sorted(unknown)}. Allowed: {sorted(_ALLOWED_PROMPT_FIELDS)}"
),
)


async def _log_llm_health_check(config: LLMConfig) -> None:
Expand Down Expand Up @@ -320,6 +385,117 @@ async def update_prompt_config(
)


@router.get("/prompt-templates", response_model=PromptTemplateListResponse)
async def list_prompt_templates() -> PromptTemplateListResponse:
"""List built-in and custom prompt templates."""
builtins = _get_builtin_prompt_templates()
custom = [
PromptTemplate(
id=prompt["prompt_id"],
label=prompt.get("label", ""),
description=prompt.get("description", ""),
prompt=prompt.get("prompt", ""),
is_builtin=False,
created_at=prompt.get("created_at"),
updated_at=prompt.get("updated_at"),
)
for prompt in db.list_prompt_templates()
]
return PromptTemplateListResponse(data=[*builtins, *custom])


@router.post("/prompt-templates", response_model=PromptTemplateResponse)
async def create_prompt_template(
request: PromptTemplateCreateRequest,
) -> PromptTemplateResponse:
"""Create a custom prompt template."""
label = request.label.strip()
description = request.description.strip()
prompt = request.prompt.strip()

if not label or not description or not prompt:
raise HTTPException(
status_code=400,
detail="Label, description, and prompt are required.",
)

_validate_prompt_template(prompt)

created = db.create_prompt_template(label=label, description=description, prompt=prompt)
return PromptTemplateResponse(
data=PromptTemplate(
id=created["prompt_id"],
label=created["label"],
description=created["description"],
prompt=created["prompt"],
is_builtin=False,
created_at=created.get("created_at"),
updated_at=created.get("updated_at"),
)
)


@router.put("/prompt-templates/{prompt_id}", response_model=PromptTemplateResponse)
async def update_prompt_template(
prompt_id: str,
request: PromptTemplateUpdateRequest,
) -> PromptTemplateResponse:
"""Update a custom prompt template."""
builtin_ids = {option["id"] for option in IMPROVE_PROMPT_OPTIONS}
if prompt_id in builtin_ids:
raise HTTPException(status_code=400, detail="Built-in prompts cannot be edited.")

updates: dict[str, str] = {}
if request.label is not None:
updates["label"] = request.label.strip()
if request.description is not None:
updates["description"] = request.description.strip()
if request.prompt is not None:
prompt = request.prompt.strip()
if prompt:
_validate_prompt_template(prompt)
updates["prompt"] = prompt
Copy link
Copy Markdown
Contributor

@cubic-dev-ai cubic-dev-ai bot Feb 9, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2: update_prompt_template allows setting an empty prompt string, breaking the invariant enforced on creation and letting templates be saved with no prompt text.

Prompt for AI agents
Check if this issue is valid — if so, understand the root cause and fix it. At apps/backend/app/routers/config.py, line 457:

<comment>update_prompt_template allows setting an empty prompt string, breaking the invariant enforced on creation and letting templates be saved with no prompt text.</comment>

<file context>
@@ -320,6 +385,117 @@ async def update_prompt_config(
+        prompt = request.prompt.strip()
+        if prompt:
+            _validate_prompt_template(prompt)
+        updates["prompt"] = prompt
+
+    if not updates:
</file context>
Fix with Cubic


if not updates:
raise HTTPException(status_code=400, detail="No updates provided.")

try:
updated = db.update_prompt_template(prompt_id, updates)
except ValueError as exc:
raise HTTPException(status_code=404, detail=str(exc)) from exc

return PromptTemplateResponse(
data=PromptTemplate(
id=updated["prompt_id"],
label=updated.get("label", ""),
description=updated.get("description", ""),
prompt=updated.get("prompt", ""),
is_builtin=False,
created_at=updated.get("created_at"),
updated_at=updated.get("updated_at"),
)
)


@router.delete("/prompt-templates/{prompt_id}", response_model=PromptTemplateDeleteResponse)
async def delete_prompt_template(prompt_id: str) -> PromptTemplateDeleteResponse:
"""Delete a custom prompt template."""
builtin_ids = {option["id"] for option in IMPROVE_PROMPT_OPTIONS}
if prompt_id in builtin_ids:
raise HTTPException(status_code=400, detail="Built-in prompts cannot be deleted.")

removed = db.delete_prompt_template(prompt_id)
if not removed:
raise HTTPException(status_code=404, detail="Prompt template not found.")

stored = _load_config()
if stored.get("default_prompt_id") == prompt_id:
stored["default_prompt_id"] = DEFAULT_IMPROVE_PROMPT_ID
_save_config(stored)

return PromptTemplateDeleteResponse(message="Prompt template deleted.")


# Supported API key providers
SUPPORTED_PROVIDERS = ["openai", "anthropic", "google", "openrouter", "deepseek"]

Expand Down
3 changes: 3 additions & 0 deletions apps/backend/app/routers/resumes.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,9 @@ def _get_default_prompt_id() -> str:
"""Get configured default prompt id from config file."""
config = _load_config()
option_ids = {option["id"] for option in IMPROVE_PROMPT_OPTIONS}
option_ids.update(
prompt.get("prompt_id") for prompt in db.list_prompt_templates() if prompt.get("prompt_id")
Copy link
Copy Markdown
Contributor

@cubic-dev-ai cubic-dev-ai bot Feb 9, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2: Synchronous TinyDB I/O was added inside _get_default_prompt_id(), which is called from async route handlers. This blocking DB call will freeze the event loop while reading prompt templates, reducing concurrency under load.

Prompt for AI agents
Check if this issue is valid — if so, understand the root cause and fix it. At apps/backend/app/routers/resumes.py, line 86:

<comment>Synchronous TinyDB I/O was added inside _get_default_prompt_id(), which is called from async route handlers. This blocking DB call will freeze the event loop while reading prompt templates, reducing concurrency under load.</comment>

<file context>
@@ -82,6 +82,9 @@ def _get_default_prompt_id() -> str:
     config = _load_config()
     option_ids = {option["id"] for option in IMPROVE_PROMPT_OPTIONS}
+    option_ids.update(
+        prompt.get("prompt_id") for prompt in db.list_prompt_templates() if prompt.get("prompt_id")
+    )
     prompt_id = config.get("default_prompt_id", DEFAULT_IMPROVE_PROMPT_ID)
</file context>
Fix with Cubic

)
prompt_id = config.get("default_prompt_id", DEFAULT_IMPROVE_PROMPT_ID)
return prompt_id if prompt_id in option_ids else DEFAULT_IMPROVE_PROMPT_ID

Expand Down
12 changes: 12 additions & 0 deletions apps/backend/app/schemas/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,12 @@
PromptConfigRequest,
PromptConfigResponse,
PromptOption,
PromptTemplate,
PromptTemplateCreateRequest,
PromptTemplateDeleteResponse,
PromptTemplateListResponse,
PromptTemplateResponse,
PromptTemplateUpdateRequest,
RawResume,
RefinementStats,
ResumeDiffSummary,
Expand Down Expand Up @@ -85,6 +91,12 @@
"PromptOption",
"PromptConfigRequest",
"PromptConfigResponse",
"PromptTemplate",
"PromptTemplateCreateRequest",
"PromptTemplateUpdateRequest",
"PromptTemplateResponse",
"PromptTemplateListResponse",
"PromptTemplateDeleteResponse",
"FeatureConfigRequest",
"FeatureConfigResponse",
"ApiKeyProviderStatus",
Expand Down
Loading