Skip to content

Commit da1515a

Browse files
authored
Add validator module (#61)
1 parent 106a725 commit da1515a

File tree

10 files changed

+515
-4
lines changed

10 files changed

+515
-4
lines changed

CHANGELOG.md

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
77

88
## [Unreleased]
99

10+
## [1.0.5] - 2025-03-27
11+
12+
- Add `Validator` API
13+
- Deprecate `response_validation.py` module.
14+
1015
## [1.0.4] - 2025-03-14
1116

1217
- Pass analytics metadata in headers for all Codex API requests.
@@ -29,7 +34,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
2934

3035
- Initial release of the `cleanlab-codex` client library.
3136

32-
[Unreleased]: https://github.com/cleanlab/cleanlab-codex/compare/v1.0.4...HEAD
37+
[Unreleased]: https://github.com/cleanlab/cleanlab-codex/compare/v1.0.5...HEAD
38+
[1.0.5]: https://github.com/cleanlab/cleanlab-codex/compare/v1.0.4...v1.0.5
3339
[1.0.4]: https://github.com/cleanlab/cleanlab-codex/compare/v1.0.3...v1.0.4
3440
[1.0.3]: https://github.com/cleanlab/cleanlab-codex/compare/v1.0.2...v1.0.3
3541
[1.0.2]: https://github.com/cleanlab/cleanlab-codex/compare/v1.0.1...v1.0.2

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ classifiers = [
2525
"Programming Language :: Python :: Implementation :: PyPy",
2626
]
2727
dependencies = [
28+
"cleanlab-tlm~=1.0.12",
2829
"codex-sdk==0.1.0a12",
2930
"pydantic>=2.0.0, <3",
3031
]

src/cleanlab_codex/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,5 +2,6 @@
22
from cleanlab_codex.client import Client
33
from cleanlab_codex.codex_tool import CodexTool
44
from cleanlab_codex.project import Project
5+
from cleanlab_codex.validator import Validator
56

6-
__all__ = ["Client", "CodexTool", "Project"]
7+
__all__ = ["Client", "CodexTool", "Project", "Validator"]
Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
from __future__ import annotations
2+
3+
from typing import TYPE_CHECKING, Any, Optional, Sequence, cast
4+
5+
from cleanlab_tlm.utils.rag import Eval, TrustworthyRAGScore, get_default_evals
6+
7+
from cleanlab_codex.types.validator import ThresholdedTrustworthyRAGScore
8+
9+
if TYPE_CHECKING:
10+
from cleanlab_codex.validator import BadResponseThresholds
11+
12+
13+
"""Evaluation metrics (excluding trustworthiness) that are used to determine if a response is bad."""
14+
DEFAULT_EVAL_METRICS = ["response_helpfulness"]
15+
16+
17+
def get_default_evaluations() -> list[Eval]:
18+
"""Get the default evaluations for the TrustworthyRAG.
19+
20+
Note:
21+
This excludes trustworthiness, which is automatically computed by TrustworthyRAG.
22+
"""
23+
return [evaluation for evaluation in get_default_evals() if evaluation.name in DEFAULT_EVAL_METRICS]
24+
25+
26+
def get_default_trustworthyrag_config() -> dict[str, Any]:
27+
"""Get the default configuration for the TrustworthyRAG."""
28+
return {
29+
"options": {
30+
"log": ["explanation"],
31+
},
32+
}
33+
34+
35+
def update_scores_based_on_thresholds(
36+
scores: TrustworthyRAGScore | Sequence[TrustworthyRAGScore], thresholds: BadResponseThresholds
37+
) -> ThresholdedTrustworthyRAGScore:
38+
"""Adds a `is_bad` flag to the scores dictionaries based on the thresholds."""
39+
40+
# Helper function to check if a score is bad
41+
def is_bad(score: Optional[float], threshold: float) -> bool:
42+
return score is not None and score < threshold
43+
44+
if isinstance(scores, Sequence):
45+
raise NotImplementedError("Batching is not supported yet.")
46+
47+
thresholded_scores = {}
48+
for eval_name, score_dict in scores.items():
49+
thresholded_scores[eval_name] = {
50+
**score_dict,
51+
"is_bad": is_bad(score_dict["score"], thresholds.get_threshold(eval_name)),
52+
}
53+
return cast(ThresholdedTrustworthyRAGScore, thresholded_scores)

src/cleanlab_codex/response_validation.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
"""
2-
Validation functions for evaluating LLM responses and determining if they should be replaced with Codex-generated alternatives.
2+
This module is now superseded by this [Validator API](/codex/api/validator/).
3+
4+
Deprecated validation functions for evaluating LLM responses and determining if they should be replaced with Codex-generated alternatives.
35
"""
46

57
from __future__ import annotations

src/cleanlab_codex/types/response_validation.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,7 @@
1-
"""Types for response validation."""
1+
"""
2+
This module is now superseded by this [Validator API](/codex/api/validator/).
3+
4+
Deprecated types for response validation."""
25

36
from abc import ABC, abstractmethod
47
from collections import OrderedDict

src/cleanlab_codex/types/validator.py

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
from cleanlab_tlm.utils.rag import EvalMetric
2+
3+
4+
class ThresholdedEvalMetric(EvalMetric):
5+
is_bad: bool
6+
7+
8+
ThresholdedEvalMetric.__doc__ = f"""
9+
{EvalMetric.__doc__}
10+
11+
is_bad: bool
12+
Whether the score is a certain threshold.
13+
"""
14+
15+
16+
class ThresholdedTrustworthyRAGScore(dict[str, ThresholdedEvalMetric]):
17+
"""Object returned by `Validator.detect` containing evaluation scores from [TrustworthyRAGScore](/tlm/api/python/utils.rag/#class-trustworthyragscore)
18+
along with a boolean flag, `is_bad`, indicating whether the score is below the threshold.
19+
20+
Example:
21+
```python
22+
{
23+
"trustworthiness": {
24+
"score": 0.92,
25+
"log": {"explanation": "Did not find a reason to doubt trustworthiness."},
26+
"is_bad": False
27+
},
28+
"response_helpfulness": {
29+
"score": 0.35,
30+
"is_bad": True
31+
},
32+
...
33+
}
34+
```
35+
"""

src/cleanlab_codex/validator.py

Lines changed: 241 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,241 @@
1+
"""
2+
Detect and remediate bad responses in RAG applications, by integrating Codex as-a-Backup.
3+
"""
4+
5+
from __future__ import annotations
6+
7+
from typing import TYPE_CHECKING, Any, Callable, Optional, cast
8+
9+
from cleanlab_tlm import TrustworthyRAG
10+
from pydantic import BaseModel, Field, field_validator
11+
12+
from cleanlab_codex.internal.validator import (
13+
get_default_evaluations,
14+
get_default_trustworthyrag_config,
15+
)
16+
from cleanlab_codex.internal.validator import update_scores_based_on_thresholds as _update_scores_based_on_thresholds
17+
from cleanlab_codex.project import Project
18+
19+
if TYPE_CHECKING:
20+
from cleanlab_codex.types.validator import ThresholdedTrustworthyRAGScore
21+
22+
23+
class Validator:
24+
def __init__(
25+
self,
26+
codex_access_key: str,
27+
tlm_api_key: Optional[str] = None,
28+
trustworthy_rag_config: Optional[dict[str, Any]] = None,
29+
bad_response_thresholds: Optional[dict[str, float]] = None,
30+
):
31+
"""Real-time detection and remediation of bad responses in RAG applications, powered by Cleanlab's TrustworthyRAG and Codex.
32+
33+
This object combines Cleanlab's TrustworthyRAG evaluation scores with configurable thresholds to detect potentially bad responses
34+
in your RAG application. When a bad response is detected, this Validator automatically attempts to remediate by retrieving an expert-provided
35+
answer from the Codex Project you've integrated with your RAG app. If no expert answer is available,
36+
the corresponding query is logged in the Codex Project for SMEs to answer.
37+
38+
For production, use the `validate()` method which provides a complete validation workflow including both detection and remediation.
39+
A `detect()` method is separately available for you to test/tune detection configurations like score thresholds and TrustworthyRAG settings
40+
without triggering any Codex lookups that otherwise could affect the state of the corresponding Codex Project.
41+
42+
Args:
43+
codex_access_key (str): The [access key](/codex/web_tutorials/create_project/#access-keys) for a Codex project. Used to retrieve expert-provided answers
44+
when bad responses are detected, or otherwise log the corresponding queries for SMEs to answer.
45+
46+
tlm_api_key (str, optional): API key for accessing [TrustworthyRAG](/tlm/api/python/utils.rag/#class-trustworthyrag). If not provided, this must be specified
47+
in `trustworthy_rag_config`.
48+
49+
trustworthy_rag_config (dict[str, Any], optional): Optional initialization arguments for [TrustworthyRAG](/tlm/api/python/utils.rag/#class-trustworthyrag),
50+
which is used to detect response issues. If not provided, a default configuration will be used.
51+
By default, this Validator uses the same default configurations as [TrustworthyRAG](/tlm/api/python/utils.rag/#class-trustworthyrag), except:
52+
- Explanations are returned in logs for better debugging
53+
- Only the `response_helpfulness` eval is run
54+
55+
bad_response_thresholds (dict[str, float], optional): Detection score thresholds used to flag whether
56+
a response is bad or not. Each key corresponds to an Eval from [TrustworthyRAG](/tlm/api/python/utils.rag/#class-trustworthyrag),
57+
and the value indicates a threshold (between 0 and 1) below which Eval scores are treated as detected issues. A response
58+
is flagged as bad if any issues are detected. If not provided, default thresholds will be used. See
59+
[`BadResponseThresholds`](/codex/api/python/validator/#class-badresponsethresholds) for more details.
60+
61+
Raises:
62+
ValueError: If both tlm_api_key and api_key in trustworthy_rag_config are provided.
63+
ValueError: If bad_response_thresholds contains thresholds for non-existent evaluation metrics.
64+
TypeError: If any threshold value is not a number.
65+
ValueError: If any threshold value is not between 0 and 1.
66+
"""
67+
trustworthy_rag_config = trustworthy_rag_config or get_default_trustworthyrag_config()
68+
if tlm_api_key is not None and "api_key" in trustworthy_rag_config:
69+
error_msg = "Cannot specify both tlm_api_key and api_key in trustworthy_rag_config"
70+
raise ValueError(error_msg)
71+
if tlm_api_key is not None:
72+
trustworthy_rag_config["api_key"] = tlm_api_key
73+
74+
self._project: Project = Project.from_access_key(access_key=codex_access_key)
75+
76+
trustworthy_rag_config.setdefault("evals", get_default_evaluations())
77+
self._tlm_rag = TrustworthyRAG(**trustworthy_rag_config)
78+
79+
# Validate that all the necessary thresholds are present in the TrustworthyRAG.
80+
_evals = [e.name for e in self._tlm_rag.get_evals()] + ["trustworthiness"]
81+
82+
self._bad_response_thresholds = BadResponseThresholds.model_validate(bad_response_thresholds or {})
83+
84+
_threshold_keys = self._bad_response_thresholds.model_dump().keys()
85+
86+
# Check if there are any thresholds without corresponding evals (this is an error)
87+
_extra_thresholds = set(_threshold_keys) - set(_evals)
88+
if _extra_thresholds:
89+
error_msg = f"Found thresholds for non-existent evaluation metrics: {_extra_thresholds}"
90+
raise ValueError(error_msg)
91+
92+
def validate(
93+
self,
94+
query: str,
95+
context: str,
96+
response: str,
97+
prompt: Optional[str] = None,
98+
form_prompt: Optional[Callable[[str, str], str]] = None,
99+
) -> dict[str, Any]:
100+
"""Evaluate whether the AI-generated response is bad, and if so, request an alternate expert answer.
101+
If no expert answer is available, this query is still logged for SMEs to answer.
102+
103+
Args:
104+
query (str): The user query that was used to generate the response.
105+
context (str): The context that was retrieved from the RAG Knowledge Base and used to generate the response.
106+
response (str): A reponse from your LLM/RAG system.
107+
prompt (str, optional): Optional prompt representing the actual inputs (combining query, context, and system instructions into one string) to the LLM that generated the response.
108+
form_prompt (Callable[[str, str], str], optional): Optional function to format the prompt based on query and context. Cannot be provided together with prompt, provide one or the other. This function should take query and context as parameters and return a formatted prompt string. If not provided, a default prompt formatter will be used. To include a system prompt or any other special instructions for your LLM, incorporate them directly in your custom form_prompt() function definition.
109+
110+
Returns:
111+
dict[str, Any]: A dictionary containing:
112+
- 'expert_answer': Alternate SME-provided answer from Codex if the response was flagged as bad and an answer was found in the Codex Project, or None otherwise.
113+
- 'is_bad_response': True if the response is flagged as potentially bad, False otherwise. When True, a Codex lookup is performed, which logs this query into the Codex Project for SMEs to answer.
114+
- Additional keys from a [`ThresholdedTrustworthyRAGScore`](/cleanlab_codex/types/validator/#class-thresholdedtrustworthyragscore) dictionary: each corresponds to a [TrustworthyRAG](/tlm/api/python/utils.rag/#class-trustworthyrag) evaluation metric, and points to the score for this evaluation as well as a boolean `is_bad` flagging whether the score falls below the corresponding threshold.
115+
"""
116+
scores, is_bad_response = self.detect(query, context, response, prompt, form_prompt)
117+
expert_answer = None
118+
if is_bad_response:
119+
expert_answer = self._remediate(query)
120+
121+
return {
122+
"expert_answer": expert_answer,
123+
"is_bad_response": is_bad_response,
124+
**scores,
125+
}
126+
127+
def detect(
128+
self,
129+
query: str,
130+
context: str,
131+
response: str,
132+
prompt: Optional[str] = None,
133+
form_prompt: Optional[Callable[[str, str], str]] = None,
134+
) -> tuple[ThresholdedTrustworthyRAGScore, bool]:
135+
"""Score response quality using TrustworthyRAG and flag bad responses based on configured thresholds.
136+
137+
Note:
138+
Use this method instead of `validate()` to test/tune detection configurations like score thresholds and TrustworthyRAG settings.
139+
This `detect()` method will not affect your Codex Project, whereas `validate()` will log queries whose response was detected as bad into the Codex Project and is thus only suitable for production, not testing.
140+
Both this method and `validate()` rely on this same detection logic, so you can use this method to first optimize detections and then switch to using `validate()`.
141+
142+
Args:
143+
query (str): The user query that was used to generate the response.
144+
context (str): The context that was retrieved from the RAG Knowledge Base and used to generate the response.
145+
response (str): A reponse from your LLM/RAG system.
146+
prompt (str, optional): Optional prompt representing the actual inputs (combining query, context, and system instructions into one string) to the LLM that generated the response.
147+
form_prompt (Callable[[str, str], str], optional): Optional function to format the prompt based on query and context. Cannot be provided together with prompt, provide one or the other. This function should take query and context as parameters and return a formatted prompt string. If not provided, a default prompt formatter will be used. To include a system prompt or any other special instructions for your LLM, incorporate them directly in your custom form_prompt() function definition.
148+
149+
Returns:
150+
tuple[ThresholdedTrustworthyRAGScore, bool]: A tuple containing:
151+
- ThresholdedTrustworthyRAGScore: Quality scores for different evaluation metrics like trustworthiness
152+
and response helpfulness. Each metric has a score between 0-1. It also has a boolean flag, `is_bad` indicating whether the score is below a given threshold.
153+
- bool: True if the response is determined to be bad based on the evaluation scores
154+
and configured thresholds, False otherwise.
155+
"""
156+
scores = self._tlm_rag.score(
157+
response=response,
158+
query=query,
159+
context=context,
160+
prompt=prompt,
161+
form_prompt=form_prompt,
162+
)
163+
164+
thresholded_scores = _update_scores_based_on_thresholds(
165+
scores=scores,
166+
thresholds=self._bad_response_thresholds,
167+
)
168+
169+
is_bad_response = any(score_dict["is_bad"] for score_dict in thresholded_scores.values())
170+
return thresholded_scores, is_bad_response
171+
172+
def _remediate(self, query: str) -> str | None:
173+
"""Request a SME-provided answer for this query, if one is available in Codex.
174+
175+
Args:
176+
query (str): The user's original query to get SME-provided answer for.
177+
178+
Returns:
179+
str | None: The SME-provided answer from Codex, or None if no answer could be found in the Codex Project.
180+
"""
181+
codex_answer, _ = self._project.query(question=query)
182+
return codex_answer
183+
184+
185+
class BadResponseThresholds(BaseModel):
186+
"""Config for determining if a response is bad.
187+
Each key is an evaluation metric and the value is a threshold such that a response is considered bad whenever the corresponding evaluation score falls below the threshold.
188+
189+
Default Thresholds:
190+
- trustworthiness: 0.5
191+
- response_helpfulness: 0.5
192+
- Any custom eval: 0.5 (if not explicitly specified in bad_response_thresholds)
193+
"""
194+
195+
trustworthiness: float = Field(
196+
description="Threshold for trustworthiness.",
197+
default=0.5,
198+
ge=0.0,
199+
le=1.0,
200+
)
201+
response_helpfulness: float = Field(
202+
description="Threshold for response helpfulness.",
203+
default=0.5,
204+
ge=0.0,
205+
le=1.0,
206+
)
207+
208+
@property
209+
def default_threshold(self) -> float:
210+
"""The default threshold to use when an evaluation metric's threshold is not specified. This threshold is set to 0.5."""
211+
return 0.5
212+
213+
def get_threshold(self, eval_name: str) -> float:
214+
"""Get threshold for an eval, if it exists.
215+
216+
For fields defined in the model, returns their value (which may be the field's default).
217+
For custom evals not defined in the model, returns the default threshold value (see `default_threshold`).
218+
"""
219+
220+
# For fields defined in the model, use their value (which may be the field's default)
221+
if eval_name in self.model_fields:
222+
return cast(float, getattr(self, eval_name))
223+
224+
# For custom evals, use the default threshold
225+
return getattr(self, eval_name, self.default_threshold)
226+
227+
@field_validator("*")
228+
@classmethod
229+
def validate_threshold(cls, v: Any) -> float:
230+
"""Validate that all fields (including dynamic ones) are floats between 0 and 1."""
231+
if not isinstance(v, (int, float)):
232+
error_msg = f"Threshold must be a number, got {type(v)}"
233+
raise TypeError(error_msg)
234+
if not 0 <= float(v) <= 1:
235+
error_msg = f"Threshold must be between 0 and 1, got {v}"
236+
raise ValueError(error_msg)
237+
return float(v)
238+
239+
model_config = {
240+
"extra": "allow" # Allow additional fields for custom eval thresholds
241+
}

0 commit comments

Comments
 (0)