Skip to content

Commit 2765efe

Browse files
committed
🧪 Add 100% test coverage for the helper module
1 parent e489053 commit 2765efe

File tree

3 files changed

+253
-6
lines changed

3 files changed

+253
-6
lines changed

.coveragerc

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,6 @@ _site-packages-to-src-mapping =
1212
*\Lib\site-packages
1313

1414
[report]
15-
# `fail_under` is set here temporarily until it can be dropped:
16-
fail_under = 28
1715
skip_covered = true
1816
skip_empty = true
1917
show_missing = true

.pre-commit-config.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -128,6 +128,7 @@ repos:
128128
name: MyPy, for Python 3.13
129129
additional_dependencies:
130130
- lxml # dep of `--txt-report`, `--cobertura-xml-report` & `--html-report`
131+
- pytest # tests are also type-checked
131132
args:
132133
- --python-version=3.13
133134
- --any-exprs-report=.tox/.tmp/.test-results/mypy--py-3.13
@@ -143,6 +144,7 @@ repos:
143144
name: MyPy, for Python 3.11
144145
additional_dependencies:
145146
- lxml # dep of `--txt-report`, `--cobertura-xml-report` & `--html-report`
147+
- pytest # tests are also type-checked
146148
args:
147149
- --python-version=3.11
148150
- --any-exprs-report=.tox/.tmp/.test-results/mypy--py-3.11
@@ -158,6 +160,7 @@ repos:
158160
name: MyPy, for Python 3.9
159161
additional_dependencies:
160162
- lxml # dep of `--txt-report`, `--cobertura-xml-report` & `--html-report`
163+
- pytest # tests are also type-checked
161164
args:
162165
- --python-version=3.9
163166
- --any-exprs-report=.tox/.tmp/.test-results/mypy--py-3.9

tests/smoke_test.py

Lines changed: 250 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,254 @@
11
"""Initial test infra smoke tests."""
22

3-
import normalize_needed_jobs_status # pth via editable install in tox
3+
import json
4+
import pathlib
5+
import sys
46

7+
import pytest
58

6-
def test_smoke() -> None:
7-
"""Check that the imported module is truthy."""
8-
assert normalize_needed_jobs_status
9+
from normalize_needed_jobs_status import main as _invoke_helper_cli
10+
11+
12+
@pytest.mark.parametrize(
13+
(
14+
'allowed_failures',
15+
'allowed_skips',
16+
'jobs',
17+
'expected_return_code',
18+
'expected_outputs',
19+
'expected_summary_entries',
20+
),
21+
(
22+
pytest.param(
23+
json.dumps(['failing-job', 'skipped-job']),
24+
'',
25+
json.dumps(
26+
{
27+
'failing-job': {
28+
'result': 'failure',
29+
'outputs': {},
30+
},
31+
'succeeding-job': {
32+
'result': 'success',
33+
'outputs': {},
34+
},
35+
'skipped-job': {
36+
'result': 'skipped',
37+
'outputs': {},
38+
},
39+
},
40+
),
41+
0,
42+
{'failure=false', 'result=success', 'success=true'},
43+
{
44+
'All of the required dependency jobs succeeded',
45+
'Some of the allowed to fail jobs did not succeed',
46+
'Some of the allowed to be skipped jobs did not succeed',
47+
'failing-job → ❌ failure [allowed to fail]',
48+
'succeeding-job → ✓ success [required to succeed]',
49+
'skipped-job → ⬜ skipped [allowed to fail]',
50+
},
51+
id='success-despite-failure-and-skip',
52+
),
53+
pytest.param(
54+
'check-links-markdown, nightly',
55+
json.dumps([]),
56+
json.dumps(
57+
{
58+
'build-web': {
59+
'result': 'success',
60+
'outputs': {},
61+
},
62+
'check-links-book': {
63+
'result': 'success',
64+
'outputs': {},
65+
},
66+
'check-links-markdown': {
67+
'result': 'failure',
68+
'outputs': {},
69+
},
70+
'lint-megalinter': {
71+
'result': 'success',
72+
'outputs': {},
73+
},
74+
'nightly': {
75+
'result': 'failure',
76+
'outputs': {},
77+
},
78+
'publish-web': {
79+
'result': 'skipped',
80+
'outputs': {},
81+
},
82+
'test-dotnet': {
83+
'result': 'success',
84+
'outputs': {},
85+
},
86+
'test-elixir': {
87+
'result': 'success',
88+
'outputs': {},
89+
},
90+
'test-java': {
91+
'result': 'success',
92+
'outputs': {},
93+
},
94+
'test-js': {
95+
'result': 'success',
96+
'outputs': {},
97+
},
98+
'test-lib': {
99+
'result': 'success',
100+
'outputs': {},
101+
},
102+
'test-php': {
103+
'result': 'success',
104+
'outputs': {},
105+
},
106+
'test-python': {
107+
'result': 'success',
108+
'outputs': {},
109+
},
110+
'test-rust': {
111+
'result': 'success',
112+
'outputs': {},
113+
},
114+
'test-rust-main': {
115+
'result': 'success',
116+
'outputs': {},
117+
},
118+
},
119+
),
120+
1,
121+
{'failure=true', 'result=failure', 'success=false'},
122+
{
123+
'Some of the required to succeed jobs failed',
124+
'Some of the allowed to fail jobs did not succeed',
125+
'Some of the allowed to be skipped jobs did not succeed',
126+
'build-web → ✓ success [required to succeed]',
127+
'check-links-book → ✓ success [required to succeed]',
128+
'check-links-markdown → ❌ failure [allowed to fail]',
129+
'lint-megalinter → ✓ success [required to succeed]',
130+
'nightly → ❌ failure [allowed to fail]',
131+
'publish-web → ⬜ skipped [required to succeed]',
132+
'test-dotnet → ✓ success [required to succeed]',
133+
'test-elixir → ✓ success [required to succeed]',
134+
'test-java → ✓ success [required to succeed]',
135+
'test-js → ✓ success [required to succeed]',
136+
'test-lib → ✓ success [required to succeed]',
137+
'test-php → ✓ success [required to succeed]',
138+
'test-python → ✓ success [required to succeed]',
139+
'test-rust → ✓ success [required to succeed]',
140+
'test-rust-main → ✓ success [required to succeed]',
141+
},
142+
id='failure-due-to-skip',
143+
),
144+
pytest.param(
145+
'succeeding-job',
146+
'succeeding-job',
147+
json.dumps(
148+
{
149+
'succeeding-job': {
150+
'result': 'success',
151+
'outputs': {},
152+
},
153+
},
154+
),
155+
0,
156+
{'failure=false', 'result=success', 'success=true'},
157+
{
158+
'All of the required dependency jobs succeeded',
159+
'All of the allowed to fail dependency jobs succeeded',
160+
'All of the allowed to be skipped dependency jobs succeeded',
161+
'succeeding-job → ✓ success [allowed to fail]',
162+
},
163+
id='success-of-all-allowed-to-skip-or-fail',
164+
),
165+
pytest.param(
166+
'failing-job',
167+
'failing-job',
168+
json.dumps(
169+
{
170+
'failing-job': {
171+
'result': 'failure',
172+
'outputs': {},
173+
},
174+
},
175+
),
176+
1,
177+
{'failure=false', 'result=success', 'success=true'},
178+
{
179+
'All of the required dependency jobs succeeded',
180+
'Some of the allowed to fail jobs did not succeed',
181+
'Some of the allowed to be skipped jobs did not succeed',
182+
'failing-job → ❌ failure [allowed to fail]',
183+
},
184+
id='success-of-some-allowed-to-skip-or-fail',
185+
marks=pytest.mark.xfail(reason='This is a bug to fix'),
186+
),
187+
pytest.param(
188+
'',
189+
'',
190+
json.dumps(
191+
{
192+
'succeeding-job': {
193+
'result': 'success',
194+
'outputs': {},
195+
},
196+
},
197+
),
198+
0,
199+
{'failure=false', 'result=success', 'success=true'},
200+
{
201+
'All of the required dependency jobs succeeded',
202+
'succeeding-job → ✓ success [required to succeed]',
203+
},
204+
id='everything-required',
205+
),
206+
pytest.param(
207+
'',
208+
'',
209+
'{}',
210+
1,
211+
set(),
212+
{'Invalid input jobs matrix'},
213+
id='failure-due-to-empty-jobs',
214+
),
215+
),
216+
)
217+
def test_smoke(
218+
allowed_failures: str,
219+
allowed_skips: str,
220+
jobs: str,
221+
expected_return_code: int,
222+
expected_outputs: set[str],
223+
expected_summary_entries: set[str],
224+
monkeypatch: pytest.MonkeyPatch,
225+
tmp_path: pathlib.Path,
226+
) -> None:
227+
"""Validate all known scenarios."""
228+
gh_step_summary_path = tmp_path / 'gh_step_summary'
229+
gh_output_path = tmp_path / 'gh_output'
230+
231+
monkeypatch.setenv('GITHUB_STEP_SUMMARY', str(gh_step_summary_path))
232+
monkeypatch.setenv('GITHUB_OUTPUT', str(gh_output_path))
233+
234+
helper_return_code = _invoke_helper_cli(
235+
[
236+
sys.executable,
237+
allowed_failures,
238+
allowed_skips,
239+
jobs,
240+
],
241+
)
242+
assert helper_return_code == expected_return_code
243+
244+
gh_step_summary_txt = gh_step_summary_path.read_text(encoding='utf-8')
245+
246+
assert all(
247+
line in gh_step_summary_txt for line in expected_summary_entries
248+
)
249+
250+
if not expected_outputs:
251+
return
252+
253+
gh_output_txt = gh_output_path.read_text(encoding='utf-8')
254+
assert all(line in gh_output_txt for line in expected_outputs)

0 commit comments

Comments
 (0)