-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathcheck_test_status.py
More file actions
295 lines (255 loc) · 9.8 KB
/
check_test_status.py
File metadata and controls
295 lines (255 loc) · 9.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
#!/usr/bin/env python3
"""
Script to check test status and count individual test case failures.
This script runs all tests and counts the individual test case failures
(not just the number of test functions that fail), and also checks if
performance tests pass.
"""
import ast
import os
import re
import subprocess
import sys
EXPECTED_FAILURES = 65
def run_tests():
"""Run all tests and capture output."""
env = os.environ.copy()
env["PYTHONHASHSEED"] = "0"
# Ensure UTF-8 encoding on all platforms
env["PYTHONIOENCODING"] = "utf-8"
try:
# Prepare failure log path for this run
fail_log = os.path.join(os.getcwd(), ".pytest_failures.txt")
# Clear previous log if exists
try:
if os.path.exists(fail_log):
os.remove(fail_log)
except Exception:
pass
env["SINONYM_FAIL_LOG"] = fail_log
result = subprocess.run(
[
"uv",
"run",
"pytest",
"-q",
"-s",
"tests/",
"--ignore=tests/test_performance.py",
"--disable-warnings",
"--maxfail=0",
],
check=False,
capture_output=True,
text=True,
encoding="utf-8",
env=env,
timeout=300,
)
# Append path to combined output for downstream consumers
combined = result.stdout + result.stderr
if os.path.exists(fail_log):
combined += f"\n__FAIL_LOG_PATH__={fail_log}\n"
return combined
except subprocess.TimeoutExpired:
print("Tests timed out after 60 seconds")
return ""
except Exception as e:
print(f"Error running tests: {e}")
return ""
def extract_failure_counts(output):
"""Extract individual test case failure counts from test output."""
# Match both with and without leading 'AssertionError:' prefix
pattern_with_assert = re.compile(
r"AssertionError:\s*([^:]+):\s*(\d+)\s+failures?\s+out\s+of\s+(\d+)\s+tests?",
)
pattern_plain = re.compile(
r"([^:]+):\s*(\d+)\s+failures?\s+out\s+of\s+(\d+)\s+tests?",
)
failure_details = {} # Use dict to deduplicate by test name
# Split output into lines and process each one
for line in output.split("\n"):
match = None
if "AssertionError:" in line:
match = pattern_with_assert.search(line)
if not match:
match = pattern_plain.search(line)
if match:
test_name, failures, total = match.groups()
test_name = test_name.strip()
# Use dict to automatically deduplicate by test name
failure_details[test_name] = {
"name": test_name,
"failures": int(failures),
"total": int(total),
}
return list(failure_details.values())
def read_fail_log_path_from_output(output: str) -> str | None:
for line in output.splitlines():
if line.strip().startswith("__FAIL_LOG_PATH__="):
return line.strip().split("=", 1)[1]
return None
def read_fail_log(path: str) -> list[str]:
try:
with open(path, encoding="utf-8") as f:
return [ln.rstrip("\n") for ln in f]
except Exception:
return []
## Removed per-file execution helpers
def check_performance_tests():
"""Run performance tests separately and check if they pass."""
env = os.environ.copy()
env["PYTHONHASHSEED"] = "0"
# Ensure UTF-8 encoding on all platforms
env["PYTHONIOENCODING"] = "utf-8"
try:
result = subprocess.run(
["uv", "run", "pytest", "tests/test_performance.py", "-v"],
check=False,
capture_output=True,
text=True,
encoding="utf-8",
env=env,
timeout=30,
)
# Check if tests passed
if "PASSED" in result.stdout and "FAILED" not in result.stdout:
return True, result.stdout
return False, result.stdout + result.stderr
except Exception as e:
return False, str(e)
def main():
print("=" * 70)
print("SINONYM TEST STATUS CHECKER")
print("=" * 70)
# Run all tests
print("\nRunning all tests...")
output = run_tests()
# Prefer explicit failure log if available
fail_log_path = read_fail_log_path_from_output(output)
logged = read_fail_log(fail_log_path) if fail_log_path else []
failures = extract_failure_counts(output) if not logged else []
if logged:
# Summarize by label from log entries
by_label: dict[str, int] = {}
parsed_entries: list[dict] = []
for ln in logged:
parts = ln.split("\t")
if len(parts) != 6:
continue
label, name_repr, expected_success, expected_output, actual_success, actual_output = parts
by_label[label] = by_label.get(label, 0) + 1
# Convert repr(name) back to a string if possible
try:
name = ast.literal_eval(name_repr)
except Exception:
name = name_repr
parsed_entries.append(
{
"label": label,
"name": name,
"expected_success": expected_success,
"expected_output": expected_output,
"actual_success": actual_success,
"actual_output": actual_output,
},
)
print("\n" + "=" * 70)
print("INDIVIDUAL TEST CASE FAILURES (aggregated counts):")
print("=" * 70)
total_failures = 0
for label in sorted(by_label):
count = by_label[label]
print(f" {label}: {count} failures")
total_failures += count
print("-" * 70)
print(f"TOTAL INDIVIDUAL TEST CASE FAILURES: {total_failures}")
# Cross-check aggregated total against raw log length
if total_failures != len(logged):
print("WARNING: Aggregated count does not match raw log line count!")
print(f" Aggregated: {total_failures} vs Raw: {len(logged)}")
print("=" * 70)
# Print detailed list of every failing case from the log
print("\n" + "=" * 70)
print("DETAILED FAILURES (from fail log):")
print("=" * 70)
for i, e in enumerate(parsed_entries, start=1):
print(
f"{i:4d}. [{e['label']}] {e['name']} | "
f"expected_success={e['expected_success']} actual_success={e['actual_success']} | "
f"expected={e['expected_output']} | actual={e['actual_output']}",
)
print("=" * 70)
elif failures:
print("\n" + "=" * 70)
print("INDIVIDUAL TEST CASE FAILURES (aggregated counts):")
print("=" * 70)
total_failures = 0
for failure in failures:
print(f" {failure['name']}: {failure['failures']}/{failure['total']} failures")
total_failures += failure["failures"]
print("-" * 70)
print(f"TOTAL INDIVIDUAL TEST CASE FAILURES: {total_failures}")
print("=" * 70)
else:
print("\nNo aggregated failure counts found.")
print("(This might mean all tests passed or output format changed)")
# Check performance tests
print("\n" + "=" * 70)
print("PERFORMANCE TEST STATUS:")
print("=" * 70)
perf_passed, perf_output = check_performance_tests()
if perf_passed:
print("✓ Performance tests PASSED")
# Try to extract performance metrics
if "microseconds per name" in perf_output:
lines = perf_output.split("\n")
for line in lines:
if "microseconds per name" in line or "names/second" in line:
print(f" {line.strip()}")
else:
print("✗ Performance tests FAILED")
print("Performance test output:")
print(perf_output)
# Final summary
print("\n" + "=" * 70)
print("SUMMARY:")
print("=" * 70)
if logged:
print(f"Individual test case failures: {len(logged)}")
elif failures:
print(f"Individual test case failures: {total_failures}")
else:
print("Individual test case failures: Unable to determine")
print(f"Performance tests: {'PASSED ✓' if perf_passed else 'FAILED ✗'}")
# Exit code based on status (updated baseline to EXPECTED_FAILURES after config improvements)
if logged and len(logged) == EXPECTED_FAILURES and perf_passed:
print("\n✓ Tests are at expected baseline (EXPECTED_FAILURES failures, performance OK)")
sys.exit(0)
elif logged and len(logged) < EXPECTED_FAILURES and perf_passed:
print(
f"\n✓ IMPROVEMENT! Tests are better than baseline ({len(logged)} < EXPECTED_FAILURES failures, performance OK)",
)
sys.exit(0)
elif logged and len(logged) > EXPECTED_FAILURES:
print(f"\n✗ REGRESSION! Too many failures ({len(logged)} > EXPECTED_FAILURES)")
sys.exit(1)
elif failures and total_failures == EXPECTED_FAILURES and perf_passed:
print("\n✓ Tests are at expected baseline (EXPECTED_FAILURES failures, performance OK)")
sys.exit(0)
elif failures and total_failures < EXPECTED_FAILURES and perf_passed:
print(
f"\n✓ IMPROVEMENT! Tests are better than baseline ({total_failures} < EXPECTED_FAILURES failures, performance OK)",
)
sys.exit(0)
elif failures and total_failures > EXPECTED_FAILURES:
print(f"\n✗ REGRESSION! Too many failures ({total_failures} > EXPECTED_FAILURES)")
sys.exit(1)
elif not perf_passed:
print("\n✗ Performance tests failed!")
sys.exit(1)
else:
print("\n⚠ Unable to determine test status")
sys.exit(0)
if __name__ == "__main__":
main()