-
-
Notifications
You must be signed in to change notification settings - Fork 7.1k
feat(guardrails): optional skip system message in unified guardrail inputs #25481
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
| @@ -0,0 +1,22 @@ | ||||||
| from __future__ import annotations | ||||||
|
|
||||||
| from typing import Any, Dict, List | ||||||
|
|
||||||
|
|
||||||
| def effective_skip_system_message_for_guardrail(guardrail_to_apply: Any) -> bool: | ||||||
| per = getattr(guardrail_to_apply, "skip_system_message_in_guardrail", None) | ||||||
| if per is not None: | ||||||
| return bool(per) | ||||||
| import litellm | ||||||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Per the project style guide, module-level imports are required; the only accepted exception is avoiding circular imports. The
Suggested change
Context Used: CLAUDE.md (source) Note: If this suggestion doesn't match your team's coding style, reply to this and let me know. I'll remember it for next time! |
||||||
|
|
||||||
| return bool(getattr(litellm, "skip_system_message_in_guardrail", False)) | ||||||
|
|
||||||
|
|
||||||
| def openai_messages_without_system( | ||||||
| messages: List[Dict[str, Any]], | ||||||
| ) -> List[Dict[str, Any]]: | ||||||
| return [ | ||||||
| m | ||||||
| for m in messages | ||||||
| if str((m or {}).get("role") or "").lower() != "system" | ||||||
| ] | ||||||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -2,9 +2,17 @@ | |
|
|
||
| import pytest | ||
|
|
||
| import litellm | ||
| from litellm.caching import DualCache | ||
| from litellm.integrations.custom_guardrail import CustomGuardrail | ||
| from litellm.llms.base_llm.guardrail_translation.base_translation import BaseTranslation | ||
| from litellm.llms.base_llm.guardrail_translation.utils import ( | ||
| effective_skip_system_message_for_guardrail, | ||
| openai_messages_without_system, | ||
| ) | ||
| from litellm.llms.openai.chat.guardrail_translation.handler import ( | ||
| OpenAIChatCompletionsHandler, | ||
| ) | ||
| from litellm.llms.base_llm.ocr.transformation import OCRPage, OCRResponse | ||
| from litellm.llms.mistral.ocr.guardrail_translation.handler import OCRHandler | ||
| from litellm.proxy._experimental.mcp_server.guardrail_translation.handler import ( | ||
|
|
@@ -68,6 +76,109 @@ def _inject_mcp_handler_mapping(): | |
|
|
||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
The PR description explicitly states the feature applies to "OpenAI Chat Completions and Anthropic Messages," but |
||
|
|
||
| class TestUnifiedLLMGuardrails: | ||
| class TestSkipSystemMessageForChatCompletions: | ||
| def test_openai_messages_without_system(self): | ||
| msgs = [ | ||
| {"role": "system", "content": "sys"}, | ||
| {"role": "user", "content": "hi"}, | ||
| ] | ||
| out = openai_messages_without_system(msgs) | ||
| assert len(out) == 1 | ||
| assert out[0]["role"] == "user" | ||
| assert msgs[0]["content"] == "sys" | ||
|
|
||
| def test_effective_skip_respects_per_guardrail_over_global(self, monkeypatch): | ||
| monkeypatch.setattr( | ||
| litellm, "skip_system_message_in_guardrail", True, raising=False | ||
| ) | ||
|
|
||
| class G: | ||
| skip_system_message_in_guardrail = False | ||
|
|
||
| assert effective_skip_system_message_for_guardrail(G()) is False | ||
|
|
||
| class G2: | ||
| skip_system_message_in_guardrail = None | ||
|
|
||
| assert effective_skip_system_message_for_guardrail(G2()) is True | ||
|
|
||
| @pytest.mark.asyncio | ||
| async def test_openai_handler_skips_system_in_guardrail_inputs( | ||
| self, monkeypatch | ||
| ): | ||
| monkeypatch.setattr( | ||
| litellm, "skip_system_message_in_guardrail", True, raising=False | ||
| ) | ||
|
|
||
| captured = {} | ||
|
|
||
| class MockGuardrail: | ||
| skip_system_message_in_guardrail = None | ||
|
|
||
| async def apply_guardrail( | ||
| self, inputs, request_data, input_type, logging_obj=None | ||
| ): | ||
| captured["inputs"] = inputs | ||
| return inputs | ||
|
|
||
| data = { | ||
| "messages": [ | ||
| {"role": "system", "content": "secret system"}, | ||
| {"role": "user", "content": "hello"}, | ||
| ], | ||
| "model": "gpt-4o", | ||
| } | ||
|
|
||
| handler = OpenAIChatCompletionsHandler() | ||
| await handler.process_input_messages( | ||
| data=data, | ||
| guardrail_to_apply=MockGuardrail(), | ||
| litellm_logging_obj=None, | ||
| ) | ||
|
|
||
| assert captured["inputs"]["texts"] == ["hello"] | ||
| sm = captured["inputs"].get("structured_messages") or [] | ||
| assert all(m.get("role") != "system" for m in sm) | ||
| assert data["messages"][0]["content"] == "secret system" | ||
|
|
||
| @pytest.mark.asyncio | ||
| async def test_openai_handler_per_guardrail_skip_false_overrides_global( | ||
| self, monkeypatch | ||
| ): | ||
| monkeypatch.setattr( | ||
| litellm, "skip_system_message_in_guardrail", True, raising=False | ||
| ) | ||
|
|
||
| captured = {} | ||
|
|
||
| class MockGuardrail: | ||
| skip_system_message_in_guardrail = False | ||
|
|
||
| async def apply_guardrail( | ||
| self, inputs, request_data, input_type, logging_obj=None | ||
| ): | ||
| captured["inputs"] = inputs | ||
| return inputs | ||
|
|
||
| data = { | ||
| "messages": [ | ||
| {"role": "system", "content": "sys"}, | ||
| {"role": "user", "content": "u"}, | ||
| ], | ||
| } | ||
|
|
||
| await OpenAIChatCompletionsHandler().process_input_messages( | ||
| data=data, | ||
| guardrail_to_apply=MockGuardrail(), | ||
| litellm_logging_obj=None, | ||
| ) | ||
|
|
||
| assert "sys" in captured["inputs"]["texts"] | ||
| roles = { | ||
| m.get("role") for m in (captured["inputs"].get("structured_messages") or []) | ||
| } | ||
| assert "system" in roles | ||
|
|
||
| class TestAsyncPreCallHook: | ||
| @pytest.mark.asyncio | ||
| async def test_uses_mcp_event_type(self): | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.