File tree Expand file tree Collapse file tree 2 files changed +4
-3
lines changed
tests/integration/release Expand file tree Collapse file tree 2 files changed +4
-3
lines changed Original file line number Diff line number Diff line change @@ -31,7 +31,7 @@ def _get_census_omission_noise_levels(
3131 .astype (str )
3232 .map (data_values .DO_NOT_RESPOND_ADDITIVE_PROBABILITY_BY_RACE )
3333 )
34- ages = pd .Series (np .arange (population ["age" ].max () + 1 ))
34+ ages = pd .Series (np .arange (population ["age" ].astype ( int ). max () + 1 ))
3535 for sex in ["Female" , "Male" ]:
3636 effect_by_age_bin = data_values .DO_NOT_RESPOND_ADDITIVE_PROBABILITY_BY_SEX_AGE [sex ]
3737 # NOTE: calling pd.cut on a large array with an IntervalIndex is slow,
@@ -44,7 +44,7 @@ def _get_census_omission_noise_levels(
4444 )
4545 sex_mask = population ["sex" ] == sex
4646 probabilities [sex_mask ] += (
47- population [sex_mask ]["age" ].map (effect_by_age ).astype (float )
47+ population [sex_mask ]["age" ].astype ( int ). map (effect_by_age ).astype (float )
4848 )
4949 probabilities [probabilities < 0.0 ] = 0.0
5050 probabilities [probabilities > 1.0 ] = 1.0
Original file line number Diff line number Diff line change @@ -131,7 +131,8 @@ def test_do_not_respond(
131131 name = "test_do_not_respond" ,
132132 observed_numerator = len (original_data ) - len (noised_data ),
133133 observed_denominator = len (original_data ),
134- target_proportion = expected_noise ,
134+ # 3% uncertainty on either side
135+ target_proportion = (expected_noise * .97 , expected_noise * 1.03 ),
135136 name_additional = f"noised_data" ,
136137 )
137138 assert set (noised_data .columns ) == set (original_data .columns )
You can’t perform that action at this time.
0 commit comments