Skip to content

Commit d3048ce

Browse files
committed
Format QAEvaluator changes with black
1 parent 64d8be6 commit d3048ce

File tree

2 files changed

+31
-11
lines changed

2 files changed

+31
-11
lines changed

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_qa/_qa.py

Lines changed: 25 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -105,11 +105,31 @@ def __init__(
105105
is_reasoning_model = kwargs.get("is_reasoning_model", False)
106106

107107
evaluators = [
108-
GroundednessEvaluator(model_config, threshold=groundedness_threshold, is_reasoning_model=is_reasoning_model),
109-
RelevanceEvaluator(model_config, threshold=relevance_threshold, is_reasoning_model=is_reasoning_model),
110-
CoherenceEvaluator(model_config, threshold=coherence_threshold, is_reasoning_model=is_reasoning_model),
111-
FluencyEvaluator(model_config, threshold=fluency_threshold, is_reasoning_model=is_reasoning_model),
112-
SimilarityEvaluator(model_config, threshold=similarity_threshold, is_reasoning_model=is_reasoning_model),
108+
GroundednessEvaluator(
109+
model_config,
110+
threshold=groundedness_threshold,
111+
is_reasoning_model=is_reasoning_model,
112+
),
113+
RelevanceEvaluator(
114+
model_config,
115+
threshold=relevance_threshold,
116+
is_reasoning_model=is_reasoning_model,
117+
),
118+
CoherenceEvaluator(
119+
model_config,
120+
threshold=coherence_threshold,
121+
is_reasoning_model=is_reasoning_model,
122+
),
123+
FluencyEvaluator(
124+
model_config,
125+
threshold=fluency_threshold,
126+
is_reasoning_model=is_reasoning_model,
127+
),
128+
SimilarityEvaluator(
129+
model_config,
130+
threshold=similarity_threshold,
131+
is_reasoning_model=is_reasoning_model,
132+
),
113133
F1ScoreEvaluator(threshold=f1_score_threshold),
114134
]
115135
super().__init__(evaluators=evaluators, **kwargs)

sdk/evaluation/azure-ai-evaluation/tests/unittests/test_qa_evaluator.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -13,9 +13,9 @@ def test_is_reasoning_model_passed_to_sub_evaluators(self, mock_model_config):
1313
for evaluator in qa_evaluator._evaluators:
1414
# F1ScoreEvaluator doesn't use LLM, so it doesn't have _is_reasoning_model
1515
if hasattr(evaluator, "_is_reasoning_model"):
16-
assert evaluator._is_reasoning_model is True, (
17-
f"{type(evaluator).__name__} did not receive is_reasoning_model=True"
18-
)
16+
assert (
17+
evaluator._is_reasoning_model is True
18+
), f"{type(evaluator).__name__} did not receive is_reasoning_model=True"
1919

2020
def test_is_reasoning_model_defaults_to_false(self, mock_model_config):
2121
"""Test that is_reasoning_model defaults to False for sub-evaluators"""
@@ -24,6 +24,6 @@ def test_is_reasoning_model_defaults_to_false(self, mock_model_config):
2424
# Verify that all LLM-based sub-evaluators have is_reasoning_model=False
2525
for evaluator in qa_evaluator._evaluators:
2626
if hasattr(evaluator, "_is_reasoning_model"):
27-
assert evaluator._is_reasoning_model is False, (
28-
f"{type(evaluator).__name__} did not default to is_reasoning_model=False"
29-
)
27+
assert (
28+
evaluator._is_reasoning_model is False
29+
), f"{type(evaluator).__name__} did not default to is_reasoning_model=False"

0 commit comments

Comments
 (0)