Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
84 changes: 84 additions & 0 deletions litellm/llms/litellm_proxy/skills/handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,17 @@ async def create_skill(
"""
prisma_client = await LiteLLMSkillsHandler._get_prisma_client()

# Enforce unique display_title
if data.display_title:
existing = await prisma_client.db.litellm_skillstable.find_first(
where={"display_title": data.display_title}
)
if existing is not None:
raise ValueError(
f"A skill with display_title '{data.display_title}' already exists "
f"(id: {existing.skill_id}). Skill names must be unique."
)

skill_id = f"litellm_skill_{uuid.uuid4()}"

skill_data: Dict[str, Any] = {
Expand Down Expand Up @@ -217,3 +228,76 @@ async def fetch_skill_from_db(skill_id: str) -> Optional[LiteLLM_SkillsTable]:
f"LiteLLMSkillsHandler: Error fetching skill {skill_id}: {e}"
)
return None

@staticmethod
async def save_provider_skill_id(
skill_id: str,
provider: str,
provider_skill_id: str,
) -> None:
"""
Save a provider-assigned skill ID for a LiteLLM skill.

When a skill is used with a provider that has a native skills API
(e.g. Anthropic), the provider returns its own skill ID. We store
that mapping so subsequent calls can reuse it without re-creating.

Stored in metadata._provider_skill_ids.{provider} = provider_skill_id

Args:
skill_id: The LiteLLM skill ID
provider: Provider name (e.g. "anthropic")
provider_skill_id: The ID assigned by the provider
"""
import json

prisma_client = await LiteLLMSkillsHandler._get_prisma_client()

# Fetch current metadata
skill = await prisma_client.db.litellm_skillstable.find_unique(
where={"skill_id": skill_id}
)
if skill is None:
verbose_logger.warning(
f"LiteLLMSkillsHandler: Cannot save provider ID - skill {skill_id} not found"
)
return

metadata = skill.metadata if isinstance(skill.metadata, dict) else {}
if isinstance(metadata, str):
metadata = json.loads(metadata)

provider_ids = metadata.get("_provider_skill_ids", {})
provider_ids[provider] = provider_skill_id
metadata["_provider_skill_ids"] = provider_ids

await prisma_client.db.litellm_skillstable.update(
where={"skill_id": skill_id},
data={"metadata": json.dumps(metadata)},
)

verbose_logger.debug(
f"LiteLLMSkillsHandler: Saved {provider} skill ID "
f"'{provider_skill_id}' for skill {skill_id}"
)

@staticmethod
def get_provider_skill_id(
skill: LiteLLM_SkillsTable,
provider: str,
) -> Optional[str]:
"""
Get the provider-assigned skill ID from a skill's metadata.

Args:
skill: The LiteLLM skill record
provider: Provider name (e.g. "anthropic")

Returns:
The provider's skill ID, or None if not yet registered
"""
if not skill.metadata or not isinstance(skill.metadata, dict):
return None

provider_ids = skill.metadata.get("_provider_skill_ids", {})
return provider_ids.get(provider)
194 changes: 194 additions & 0 deletions litellm/llms/litellm_proxy/skills/skill_applicator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,194 @@
"""
Skill Applicator for Gateway Skills.

Handles provider-specific strategies for applying skills to LLM requests.
Uses get_llm_provider() to resolve models to providers, then checks the
centralized beta headers config to determine if the model's provider
supports native skills (skills-2025-10-02 beta). If not, falls back to
system prompt injection.
"""

from typing import List, Optional

from litellm._logging import verbose_logger
from litellm.constants import ANTHROPIC_SKILLS_API_BETA_VERSION
from litellm.proxy._types import LiteLLM_SkillsTable


class SkillApplicator:
"""
Applies gateway skills to LLM requests using provider-specific strategies.

Provider resolution is delegated to litellm.get_llm_provider().
Native skills support is determined by the centralized beta headers
config (anthropic_beta_headers_config.json) — if the provider maps
skills-2025-10-02 to a non-null value, native skills are supported.
"""

def __init__(self):
from litellm.llms.litellm_proxy.skills.prompt_injection import (
SkillPromptInjectionHandler,
)

self.prompt_handler = SkillPromptInjectionHandler()

def supports_native_skills(self, provider: str) -> bool:
"""
Check if a provider supports native skills by consulting the
centralized beta headers config.
"""
from litellm.anthropic_beta_headers_manager import is_beta_header_supported

return is_beta_header_supported(
beta_header=ANTHROPIC_SKILLS_API_BETA_VERSION,
provider=provider,
)

async def apply_skills(
self,
data: dict,
skills: List[LiteLLM_SkillsTable],
provider: str,
) -> dict:
"""
Apply skills to a request based on provider.

Args:
data: The request data dict
skills: List of skills to apply
provider: The LLM provider name (from get_llm_provider)

Returns:
Modified request data with skills applied
"""
if not skills:
return data

if self.supports_native_skills(provider):
verbose_logger.debug(
f"SkillApplicator: Applying {len(skills)} skills via native API "
f"for provider={provider}"
)
return self._apply_tool_conversion_strategy(data, skills)

verbose_logger.debug(
f"SkillApplicator: Applying {len(skills)} skills via system prompt "
f"for provider={provider}"
)
return self._apply_system_prompt_strategy(data, skills)

def _apply_system_prompt_strategy(
self,
data: dict,
skills: List[LiteLLM_SkillsTable],
) -> dict:
"""
Apply skills by injecting content into system prompt.

Format:
---
## Skill: {display_title}
**Description:** {description}

### Instructions
{SKILL.md body content}
---
"""
skill_contents: List[str] = []

for skill in skills:
content = self._format_skill_content(skill)
if content:
skill_contents.append(content)

if not skill_contents:
return data

return self.prompt_handler.inject_skill_content_to_messages(
data, skill_contents, use_anthropic_format=False
)

def _apply_tool_conversion_strategy(
self,
data: dict,
skills: List[LiteLLM_SkillsTable],
) -> dict:
"""
Apply skills by converting to Anthropic-style tools + system prompt.
"""
tools = data.get("tools", [])
skill_contents: List[str] = []

for skill in skills:
tools.append(self.prompt_handler.convert_skill_to_anthropic_tool(skill))

content = self.prompt_handler.extract_skill_content(skill)
if content:
skill_contents.append(content)

if tools:
data["tools"] = tools

if skill_contents:
data = self.prompt_handler.inject_skill_content_to_messages(
data, skill_contents, use_anthropic_format=True
)

return data

def _format_skill_content(self, skill: LiteLLM_SkillsTable) -> Optional[str]:
"""
Format skill content for system prompt injection.
"""
content = self.prompt_handler.extract_skill_content(skill)

if not content:
content = skill.instructions

if not content:
return None

title = skill.display_title or skill.skill_id
parts = [f"## Skill: {title}"]

if skill.description:
parts.append(f"**Description:** {skill.description}")

parts.append("")
parts.append("### Instructions")
parts.append(content)

return "\n".join(parts)


def get_provider_from_model(model: str) -> str:
"""
Determine the provider from a model string.

First checks the proxy router's model list to resolve aliases
(e.g., "claude-sonnet" -> "anthropic/claude-sonnet-4-20250514"),
then uses get_llm_provider on the resolved model.
"""
resolved_model = model

# Try to resolve through the router's model list
try:
from litellm.proxy.proxy_server import llm_router

if llm_router is not None:
deployments = llm_router.get_model_list(model_name=model)
if deployments:
resolved_model = deployments[0]["litellm_params"]["model"]
except Exception:
pass

try:
from litellm.litellm_core_utils.get_llm_provider_logic import get_llm_provider

_, custom_llm_provider, _, _ = get_llm_provider(model=resolved_model)
return custom_llm_provider or "openai"
except Exception as e:
verbose_logger.warning(
f"SkillApplicator: Failed to determine provider for model {model}: {e}"
)
return "openai"
Loading
Loading