forked from strands-agents/evals
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest_output_evaluator.py
More file actions
261 lines (193 loc) · 9.63 KB
/
test_output_evaluator.py
File metadata and controls
261 lines (193 loc) · 9.63 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
from unittest.mock import Mock, patch
import pytest
from strands_evals.evaluators import OutputEvaluator
from strands_evals.types import EnvironmentState, EvaluationData, EvaluationOutput
@pytest.fixture
def mock_agent():
"""Mock Agent for testing"""
agent = Mock()
mock_result = Mock()
mock_result.structured_output = EvaluationOutput(score=0.8, test_pass=True, reason="Mock evaluation result")
agent.return_value = mock_result
return agent
@pytest.fixture
def mock_async_agent():
"""Mock Agent for testing with async"""
agent = Mock()
# Create a mock coroutine function
async def mock_invoke_async(*args, **kwargs):
mock_result = Mock()
mock_result.structured_output = EvaluationOutput(
score=0.8, test_pass=True, reason="Mock async evaluation result"
)
return mock_result
agent.invoke_async = mock_invoke_async
return agent
@pytest.fixture
def evaluation_data():
return EvaluationData(input="What is 2+2?", actual_output="4", expected_output="4", name="math_test")
def test_output_evaluator__init__with_defaults():
"""Test OutputEvaluator initialization with default values"""
evaluator = OutputEvaluator(rubric="Test rubric")
assert evaluator.rubric == "Test rubric"
assert evaluator.model is None
assert evaluator.include_inputs is True
assert evaluator.system_prompt is not None # Uses default template
def test_output_evaluator__init__with_custom_values():
"""Test OutputEvaluator initialization with custom values"""
custom_prompt = "Custom system prompt"
evaluator = OutputEvaluator(
rubric="Custom rubric", model="gpt-4", system_prompt=custom_prompt, include_inputs=False
)
assert evaluator.rubric == "Custom rubric"
assert evaluator.model == "gpt-4"
assert evaluator.include_inputs is False
assert evaluator.system_prompt == custom_prompt
@patch("strands_evals.evaluators.output_evaluator.Agent")
def test_output_evaluator_evaluate_with_inputs(mock_agent_class, evaluation_data, mock_agent):
"""Test evaluation with inputs included (the default behavior) and trajectory should not be included"""
mock_agent_class.return_value = mock_agent
evaluator = OutputEvaluator(rubric="Test rubric")
result = evaluator.evaluate(evaluation_data)
# Verify Agent was created with correct parameters
mock_agent_class.assert_called_once_with(model=None, system_prompt=evaluator.system_prompt, callback_handler=None)
# Verify agent was called
mock_agent.assert_called_once()
call_args = mock_agent.call_args
prompt = call_args[0][0]
assert call_args[1]["structured_output_model"] == EvaluationOutput
assert "<Input>What is 2+2?</Input>" in prompt
assert "<Trajectory>" not in prompt
assert "<ExpectedTrajectory>" not in prompt
assert "<Output>4</Output>" in prompt
assert "<ExpectedOutput>4</ExpectedOutput>" in prompt
assert "<Rubric>Test rubric</Rubric>" in prompt
assert len(result) == 1
assert result[0].score == 0.8
assert result[0].test_pass is True
@patch("strands_evals.evaluators.output_evaluator.Agent")
def test_output_evaluator_evaluate_without_inputs(mock_agent_class, evaluation_data, mock_agent):
"""Test evaluation without inputs included and trajectory should not be included"""
mock_agent_class.return_value = mock_agent
evaluator = OutputEvaluator(rubric="Test rubric", include_inputs=False)
result = evaluator.evaluate(evaluation_data)
call_args = mock_agent.call_args
prompt = call_args[0][0]
assert "<Input>" not in prompt
assert "<Trajectory>" not in prompt
assert "<ExpectedTrajectory>" not in prompt
assert "<Output>4</Output>" in prompt
assert "<ExpectedOutput>4</ExpectedOutput>" in prompt
assert "<Rubric>Test rubric</Rubric>" in prompt
assert len(result) == 1
assert result[0].score == 0.8
assert result[0].test_pass is True
@patch("strands_evals.evaluators.output_evaluator.Agent")
def test_output_evaluator_evaluate_without_expected_output(mock_agent_class, mock_agent):
"""Test evaluation without expected output"""
mock_agent_class.return_value = mock_agent
evaluator = OutputEvaluator(rubric="Test rubric")
evaluation_data = EvaluationData(
input="test",
actual_output="result",
)
evaluator.evaluate(evaluation_data)
call_args = mock_agent.call_args
prompt = call_args[0][0]
assert "<ExpectedOutput>" not in prompt
assert "<Output>result</Output>" in prompt
def test_output_evaluator_evaluate_missing_actual_output():
"""Test evaluation raises exception when actual_output is missing"""
evaluator = OutputEvaluator(rubric="Test rubric")
evaluation_data = EvaluationData(input="test", expected_output="expected")
with pytest.raises(Exception, match="Please make sure the task function return the output"):
evaluator.evaluate(evaluation_data)
@pytest.mark.asyncio
@patch("strands_evals.evaluators.output_evaluator.Agent")
async def test_output_evaluator_evaluate_async_with_inputs(mock_agent_class, evaluation_data, mock_async_agent):
"""Test async evaluation with inputs included"""
mock_agent_class.return_value = mock_async_agent
evaluator = OutputEvaluator(rubric="Test rubric")
result = await evaluator.evaluate_async(evaluation_data)
# Verify Agent was created with correct parameters
mock_agent_class.assert_called_once_with(model=None, system_prompt=evaluator.system_prompt, callback_handler=None)
assert len(result) == 1
assert result[0].score == 0.8
assert result[0].test_pass is True
assert result[0].reason == "Mock async evaluation result"
@pytest.mark.asyncio
@patch("strands_evals.evaluators.output_evaluator.Agent")
async def test_output_evaluator_evaluate_async_without_inputs(mock_agent_class, evaluation_data, mock_async_agent):
"""Test async evaluation without inputs included"""
mock_agent_class.return_value = mock_async_agent
evaluator = OutputEvaluator(rubric="Test rubric", include_inputs=False)
result = await evaluator.evaluate_async(evaluation_data)
assert len(result) == 1
assert result[0].score == 0.8
assert result[0].test_pass is True
@pytest.mark.asyncio
async def test_output_evaluator_evaluate_async_missing_actual_output():
"""Test async evaluation raises exception when actual_output is missing"""
evaluator = OutputEvaluator(rubric="Test rubric")
evaluation_data = EvaluationData(input="test", expected_output="expected")
with pytest.raises(Exception, match="Please make sure the task function return the output"):
await evaluator.evaluate_async(evaluation_data)
def test_output_evaluator_default_system_prompt_mentions_environment_state():
evaluator = OutputEvaluator(rubric="Test rubric")
assert "<ActualEnvironmentState>" in evaluator.system_prompt
assert "<ExpectedEnvironmentState>" in evaluator.system_prompt
def test_output_evaluator__init__uses_environment_state_defaults_false():
evaluator = OutputEvaluator(rubric="Test rubric")
assert evaluator.uses_environment_state is False
@patch("strands_evals.evaluators.output_evaluator.Agent")
def test_output_evaluator_evaluate_includes_environment_state_in_prompt(mock_agent_class, mock_agent):
mock_agent_class.return_value = mock_agent
evaluator = OutputEvaluator(rubric="Test rubric", uses_environment_state=True)
data = EvaluationData(
input="test",
actual_environment_state=[EnvironmentState(name="db", state={"rows": 5})],
expected_environment_state=[EnvironmentState(name="db", state={"rows": 5})],
)
evaluator.evaluate(data)
prompt = mock_agent.call_args[0][0]
assert "<ActualEnvironmentState>" in prompt
assert "<ExpectedEnvironmentState>" in prompt
@patch("strands_evals.evaluators.output_evaluator.Agent")
def test_output_evaluator_evaluate_environment_state_does_not_require_actual_output(mock_agent_class, mock_agent):
mock_agent_class.return_value = mock_agent
evaluator = OutputEvaluator(rubric="Test rubric", uses_environment_state=True)
data = EvaluationData(
input="test",
actual_environment_state=[EnvironmentState(name="db", state={"rows": 5})],
)
# Should not raise even though actual_output is None
evaluator.evaluate(data)
def test_output_evaluator_evaluate_environment_state_raises_without_actual_state():
evaluator = OutputEvaluator(rubric="Test rubric", uses_environment_state=True)
data = EvaluationData(input="test")
with pytest.raises(Exception, match="environment_state"):
evaluator.evaluate(data)
@patch("strands_evals.evaluators.output_evaluator.Agent")
def test_output_evaluator_evaluate_excludes_environment_state_by_default(mock_agent_class, mock_agent):
mock_agent_class.return_value = mock_agent
evaluator = OutputEvaluator(rubric="Test rubric")
data = EvaluationData(
input="test",
actual_output="result",
actual_environment_state=[EnvironmentState(name="db", state={"rows": 5})],
)
evaluator.evaluate(data)
prompt = mock_agent.call_args[0][0]
assert "<ActualEnvironmentState>" not in prompt
@pytest.mark.asyncio
@patch("strands_evals.evaluators.output_evaluator.Agent")
async def test_output_evaluator_evaluate_async_includes_environment_state(mock_agent_class, mock_async_agent):
mock_agent_class.return_value = mock_async_agent
evaluator = OutputEvaluator(rubric="Test rubric", uses_environment_state=True)
data = EvaluationData(
input="test",
actual_environment_state=[EnvironmentState(name="db", state={"rows": 5})],
)
result = await evaluator.evaluate_async(data)
assert len(result) == 1
assert result[0].test_pass is True