Skip to content

Commit dda9acc

Browse files
batch error logs (#484)
Category: feature JIRA issue: MIC-5503 Add with checks. Testing Added multiple assert statements that would fail using with check and verified that they all appeared in the output logs.
1 parent 2cc72b1 commit dda9acc

File tree

6 files changed

+29
-17
lines changed

6 files changed

+29
-17
lines changed

.readthedocs.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,4 +20,4 @@ python:
2020
- docs
2121

2222
formats:
23-
- pdf
23+
- pdf

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,5 +23,6 @@ exclude = [
2323
[[tool.mypy.overrides]]
2424
module = [
2525
"scipy.*",
26+
"pytest_check",
2627
]
2728
ignore_missing_imports = true

setup.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@
4949
"tqdm",
5050
"layered_config_tree>=2.1.0",
5151
"loguru",
52+
"pytest_check",
5253
# type stubs
5354
"pandas-stubs",
5455
"types-PyYAML",

tests/integration/release/test_release.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
import pandas as pd
66
import pytest
77
from _pytest.fixtures import FixtureRequest
8+
from pytest_check import check
89
from vivarium_testing_utils import FuzzyChecker
910

1011
from pseudopeople.dataset import Dataset

tests/integration/release/test_runner.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -8,23 +8,22 @@
88
@pytest.mark.parametrize(
99
"pytest_args",
1010
[
11-
#([]),
11+
([]),
1212
(["--dataset", "acs"]),
13-
#(["--dataset", "cps"]),
13+
(["--dataset", "cps"]),
1414
# (["--dataset", "acs", "--population", "USA"]),
1515
# (["--dataset", "acs", "--population", "USA", "--state", "RI"]),
16-
#(["--dataset", "wic", "--year", "2015"]),
16+
(["--dataset", "wic", "--year", "2015"]),
1717
# (["--dataset", "wic", "--population", "USA", "--state", "RI", "--year", "2015"]),
1818
],
19-
#ids=["1", "2", "3", "4"],
20-
ids = ['1'],
19+
ids=["1", "2", "3", "4"],
2120
)
2221
def test_release_tests(
2322
pytest_args: list[str], release_output_dir: Path, request: pytest.FixtureRequest
2423
) -> None:
2524
os.chdir(Path(__file__).parent) # need this to access cli options from conftest.py
26-
base_cmd = ["pytest", "--release", "test_release.py", f"--output-dir={release_output_dir}"]
27-
cmd = base_cmd + pytest_args + ["--population", "USA"]
25+
base_cmd = ["pytest", "--release", "test_release.py", "--check-max-tb=1000", f"--output-dir={release_output_dir}"]
26+
cmd = base_cmd + pytest_args
2827

2928
# log using job id
3029
job_id = request.node.callspec.id

tests/utilities.py

Lines changed: 19 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
import numpy as np
99
import numpy.typing as npt
1010
import pandas as pd
11+
from pytest_check import check
1112
from vivarium_testing_utils import FuzzyChecker
1213

1314
from pseudopeople.configuration import Keys, get_configuration
@@ -34,7 +35,8 @@ def run_column_noising_tests(
3435

3536
# Check that originally missing data remained missing
3637
originally_missing_idx = check_original.index[check_original[col.name].isna()]
37-
assert check_noised.loc[originally_missing_idx, col.name].isna().all()
38+
with check:
39+
assert check_noised.loc[originally_missing_idx, col.name].isna().all()
3840

3941
# Check for noising where applicable
4042
to_compare_idx = shared_idx.difference(originally_missing_idx)
@@ -43,7 +45,8 @@ def run_column_noising_tests(
4345
check_original.loc[to_compare_idx, col.name].values
4446
!= check_noised.loc[to_compare_idx, col.name].values
4547
)
46-
assert different_check.any()
48+
with check:
49+
assert different_check.any()
4750

4851
noise_level = different_check.sum()
4952

@@ -64,7 +67,8 @@ def run_column_noising_tests(
6467
== check_noised.loc[to_compare_idx, col.name].values
6568
)
6669

67-
assert same_check.all()
70+
with check:
71+
assert same_check.all()
6872

6973

7074
def run_omit_row_or_do_not_respond_tests(
@@ -87,15 +91,20 @@ def run_omit_row_or_do_not_respond_tests(
8791
]:
8892
# Census and household surveys have do_not_respond and omit_row.
8993
# For all other datasets they are mutually exclusive
90-
assert len(noise_types) == 2
94+
with check:
95+
assert len(noise_types) == 2
9196
else:
92-
assert len(noise_types) < 2
97+
with check:
98+
assert len(noise_types) < 2
9399
if not noise_types: # Check that there are no missing indexes
94-
assert noised_data.index.symmetric_difference(original_data.index).empty
100+
with check:
101+
assert noised_data.index.symmetric_difference(original_data.index).empty
95102
else: # Check that there are some omissions
96103
# TODO: assert levels are as expected
97-
assert noised_data.index.difference(original_data.index).empty
98-
assert not original_data.index.difference(noised_data.index).empty
104+
with check:
105+
assert noised_data.index.difference(original_data.index).empty
106+
with check:
107+
assert not original_data.index.difference(noised_data.index).empty
99108

100109

101110
def validate_column_noise_level(
@@ -158,7 +167,8 @@ def validate_column_noise_level(
158167
[1 - p for p in token_probability]
159168
)
160169
else:
161-
assert isinstance(tokens_per_string, pd.Series)
170+
with check:
171+
assert isinstance(tokens_per_string, pd.Series)
162172
avg_probability_any_token_noised = (
163173
1 - (1 - token_probability) ** tokens_per_string
164174
).mean()

0 commit comments

Comments
 (0)