Skip to content

Commit 711b01f

Browse files
fix: make --dry-run useful (#256)
* chore: remove include_curr_dir option The `include_curr_dir` option is only for strict_mode in python standard runner. Few people will ever use the strict mode. Realistically, no one will ever make `include_curr_dir` False because it increases the change that ATS won't work (and it doesn't work on enough siturations to add one more). I'm keeping the `strict_mode`, but making the `include_curr_dir` always True. The default was already True, but now there's no choice. * fix: make --dry-run useful This makes the --dry-run in label-analysis actually do something useful. It prints the tests you need to run _with_ the tool options to make ats run correctly. In the case of pytest this means that it prints `--cov-context=test` (we need this to get labels) and then the list of labels to run, spearated by spaces. I couldn't find a way to actually export an ENV variable safely so I'm just printing to stdout The small problem is that there's other output with it, so I decided to add "ATS_TESTS_TO_RUN=" to make grep-ing easier. Just for good measure throw it in a file as well. So in thory if you get ATS_TESTS_TO_RUN from your CI you can do `pytest <YOUR NORMAL OPTIONS> <ATS_TESTS_TO_RUN>` and that should work to run all the ATS tests. closes codecov/engineering-team#470 * feat: only print dry-run to file if path specified Introducing `--dry-run-output-path` as the filepath to print dry-run output into. Only works if `--dry-run` is specified. ALso updating help text for `--dry-run`
1 parent 9e064fd commit 711b01f

File tree

5 files changed

+149
-39
lines changed

5 files changed

+149
-39
lines changed

codecov_cli/commands/labelanalysis.py

+50-21
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
1-
import json
21
import logging
2+
import pathlib
33
import time
4-
from typing import List
4+
from typing import List, Optional
55

66
import click
77
import requests
@@ -55,9 +55,16 @@
5555
@click.option(
5656
"--dry-run",
5757
"dry_run",
58-
help="Userful during setup. This will run the label analysis, but will print the result to stdout and terminate instead of calling the runner.process_labelanalysis_result",
58+
help='Print list of tests to run and options that need to be added to the test runner as a space-separated list to stdout. Format is ATS_TESTS_TO_RUN="<options> <test_1> <test_2> ... <test_n>"',
5959
is_flag=True,
6060
)
61+
@click.option(
62+
"--dry-run-output-path",
63+
"dry_run_output_path",
64+
help="Prints the dry-run list into dry_run_output_path (in addition to stdout)",
65+
type=pathlib.Path,
66+
default=None,
67+
)
6168
@click.pass_context
6269
def label_analysis(
6370
ctx: click.Context,
@@ -67,6 +74,7 @@ def label_analysis(
6774
runner_name: str,
6875
max_wait_time: str,
6976
dry_run: bool,
77+
dry_run_output_path: Optional[pathlib.Path],
7078
):
7179
enterprise_url = ctx.obj.get("enterprise_url")
7280
logger.debug(
@@ -137,7 +145,12 @@ def label_analysis(
137145
# Retry it
138146
eid = _send_labelanalysis_request(payload, url, token_header)
139147
if eid is None:
140-
_fallback_to_collected_labels(requested_labels, runner, dry_run=dry_run)
148+
_fallback_to_collected_labels(
149+
requested_labels,
150+
runner,
151+
dry_run=dry_run,
152+
dry_run_output_path=dry_run_output_path,
153+
)
141154
return
142155

143156
has_result = False
@@ -157,23 +170,33 @@ def label_analysis(
157170
if not dry_run:
158171
runner.process_labelanalysis_result(request_result)
159172
else:
160-
_dry_run_output(LabelAnalysisRequestResult(request_result))
173+
_dry_run_output(
174+
LabelAnalysisRequestResult(request_result),
175+
runner,
176+
dry_run_output_path,
177+
)
161178
return
162179
if resp_json["state"] == "error":
163180
logger.error(
164181
"Request had problems calculating",
165182
extra=dict(extra_log_attributes=dict(resp_json=resp_json)),
166183
)
167184
_fallback_to_collected_labels(
168-
collected_labels=requested_labels, runner=runner, dry_run=dry_run
185+
collected_labels=requested_labels,
186+
runner=runner,
187+
dry_run=dry_run,
188+
dry_run_output_path=dry_run_output_path,
169189
)
170190
return
171191
if max_wait_time and (time.monotonic() - start_wait) > max_wait_time:
172192
logger.error(
173193
f"Exceeded max waiting time of {max_wait_time} seconds. Running all tests.",
174194
)
175195
_fallback_to_collected_labels(
176-
collected_labels=requested_labels, runner=runner, dry_run=dry_run
196+
collected_labels=requested_labels,
197+
runner=runner,
198+
dry_run=dry_run,
199+
dry_run_output_path=dry_run_output_path,
177200
)
178201
return
179202
logger.info("Waiting more time for result...")
@@ -285,27 +308,31 @@ def _send_labelanalysis_request(payload, url, token_header):
285308
return eid
286309

287310

288-
def _dry_run_output(result: LabelAnalysisRequestResult):
289-
logger.info(
290-
"Not executing tests because '--dry-run' is on. List of labels selected for running below."
291-
)
292-
logger.info("")
293-
logger.info("Label groups:")
294-
logger.info(
295-
"- absent_labels: Set of new labels found in HEAD that are not present in BASE"
311+
def _dry_run_output(
312+
result: LabelAnalysisRequestResult,
313+
runner: LabelAnalysisRunnerInterface,
314+
dry_run_output_path: Optional[pathlib.Path],
315+
):
316+
labels_to_run = list(
317+
set(
318+
result.absent_labels
319+
+ result.global_level_labels
320+
+ result.present_diff_labels
321+
)
296322
)
297-
logger.info("- present_diff_labels: Set of labels affected by the git diff")
298-
logger.info("- global_level_labels: Set of labels that possibly touch global code")
299-
logger.info("- present_report_labels: Set of labels previously uploaded")
300-
logger.info("")
301-
logger.info(json.dumps(result))
323+
output = runner.dry_run_runner_options + sorted(labels_to_run)
324+
if dry_run_output_path is not None:
325+
with open(dry_run_output_path, "w") as fd:
326+
fd.write(" ".join(output) + "\n")
327+
click.echo(f"ATS_TESTS_TO_RUN=\"{' '.join(output)}\"")
302328

303329

304330
def _fallback_to_collected_labels(
305331
collected_labels: List[str],
306332
runner: LabelAnalysisRunnerInterface,
307333
*,
308334
dry_run: bool = False,
335+
dry_run_output_path: Optional[pathlib.Path] = None,
309336
) -> dict:
310337
logger.info("Trying to fallback on collected labels")
311338
if collected_labels:
@@ -321,6 +348,8 @@ def _fallback_to_collected_labels(
321348
if not dry_run:
322349
return runner.process_labelanalysis_result(fake_response)
323350
else:
324-
return _dry_run_output(LabelAnalysisRequestResult(fake_response))
351+
return _dry_run_output(
352+
LabelAnalysisRequestResult(fake_response), runner, dry_run_output_path
353+
)
325354
logger.error("Cannot fallback to collected labels because no labels were collected")
326355
raise click.ClickException("Failed to get list of labels to run")

codecov_cli/runners/python_standard_runner.py

+3
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,9 @@ def _execute_pytest_subprocess(
9797

9898

9999
class PythonStandardRunner(LabelAnalysisRunnerInterface):
100+
101+
dry_run_runner_options = ["--cov-context=test"]
102+
100103
def __init__(self, config_params: Optional[dict] = None) -> None:
101104
super().__init__()
102105
if config_params is None:

codecov_cli/runners/types.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from typing import List
1+
from typing import Dict, List
22

33

44
# This is supposed to be a TypedDict,
@@ -23,7 +23,8 @@ def global_level_labels(self) -> List[str]:
2323

2424

2525
class LabelAnalysisRunnerInterface(object):
26-
params = None
26+
params: Dict = None
27+
dry_run_runner_options: List[str] = []
2728

2829
def collect_tests(self) -> List[str]:
2930
raise NotImplementedError()

tests/commands/test_invoke_labelanalysis.py

+91-16
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import json
2+
from pathlib import Path
23

34
import click
45
import pytest
@@ -122,10 +123,13 @@ def test_labelanalysis_help(self, mocker, fake_ci_provider):
122123
" --max-wait-time INTEGER Max time (in seconds) to wait for the label",
123124
" analysis result before falling back to running",
124125
" all tests. Default is to wait forever.",
125-
" --dry-run Userful during setup. This will run the label",
126-
" analysis, but will print the result to stdout",
127-
" and terminate instead of calling the",
128-
" runner.process_labelanalysis_result",
126+
" --dry-run Print list of tests to run and options that need",
127+
" to be added to the test runner as a space-",
128+
" separated list to stdout. Format is",
129+
' ATS_TESTS_TO_RUN="<options> <test_1> <test_2>',
130+
' ... <test_n>"',
131+
" --dry-run-output-path PATH Prints the dry-run list into dry_run_output_path",
132+
" (in addition to stdout)",
129133
" -h, --help Show this message and exit.",
130134
"",
131135
]
@@ -258,21 +262,90 @@ def test_invoke_label_analysis_dry_run(self, get_labelanalysis_deps, mocker):
258262
json={"state": "finished", "result": label_analysis_result},
259263
)
260264
cli_runner = CliRunner()
261-
result = cli_runner.invoke(
262-
cli,
263-
[
264-
"label-analysis",
265-
"--token=STATIC_TOKEN",
266-
f"--base-sha={FAKE_BASE_SHA}",
267-
"--dry-run",
265+
with cli_runner.isolated_filesystem():
266+
result = cli_runner.invoke(
267+
cli,
268+
[
269+
"label-analysis",
270+
"--token=STATIC_TOKEN",
271+
f"--base-sha={FAKE_BASE_SHA}",
272+
"--dry-run",
273+
],
274+
obj={},
275+
)
276+
mock_get_runner.assert_called()
277+
fake_runner.process_labelanalysis_result.assert_not_called()
278+
print(result.output)
279+
assert result.exit_code == 0
280+
assert (
281+
'ATS_TESTS_TO_RUN="--labels test_absent test_global test_in_diff'
282+
in result.output
283+
)
284+
285+
def test_invoke_label_analysis_dry_run_with_output_path(
286+
self, get_labelanalysis_deps, mocker
287+
):
288+
mock_get_runner = get_labelanalysis_deps["mock_get_runner"]
289+
fake_runner = get_labelanalysis_deps["fake_runner"]
290+
291+
label_analysis_result = {
292+
"present_report_labels": ["test_present"],
293+
"absent_labels": ["test_absent"],
294+
"present_diff_labels": ["test_in_diff"],
295+
"global_level_labels": ["test_global"],
296+
}
297+
298+
with responses.RequestsMock() as rsps:
299+
rsps.add(
300+
responses.POST,
301+
"https://api.codecov.io/labels/labels-analysis",
302+
json={"external_id": "label-analysis-request-id"},
303+
status=201,
304+
match=[
305+
matchers.header_matcher({"Authorization": "Repotoken STATIC_TOKEN"})
268306
],
269-
obj={},
270307
)
271-
mock_get_runner.assert_called()
272-
fake_runner.process_labelanalysis_result.assert_not_called()
308+
rsps.add(
309+
responses.PATCH,
310+
"https://api.codecov.io/labels/labels-analysis/label-analysis-request-id",
311+
json={"external_id": "label-analysis-request-id"},
312+
status=201,
313+
match=[
314+
matchers.header_matcher({"Authorization": "Repotoken STATIC_TOKEN"})
315+
],
316+
)
317+
rsps.add(
318+
responses.GET,
319+
"https://api.codecov.io/labels/labels-analysis/label-analysis-request-id",
320+
json={"state": "finished", "result": label_analysis_result},
321+
)
322+
cli_runner = CliRunner()
323+
with cli_runner.isolated_filesystem():
324+
result = cli_runner.invoke(
325+
cli,
326+
[
327+
"label-analysis",
328+
"--token=STATIC_TOKEN",
329+
f"--base-sha={FAKE_BASE_SHA}",
330+
"--dry-run",
331+
"--dry-run-output-path=ats_output_path",
332+
],
333+
obj={},
334+
)
335+
labels_file = Path("ats_output_path")
336+
assert labels_file.exists() and labels_file.is_file()
337+
with open(labels_file, "r") as fd:
338+
assert fd.readlines() == [
339+
"--labels test_absent test_global test_in_diff\n"
340+
]
341+
mock_get_runner.assert_called()
342+
fake_runner.process_labelanalysis_result.assert_not_called()
273343
print(result.output)
274344
assert result.exit_code == 0
275-
assert json.dumps(label_analysis_result) in result.output
345+
assert (
346+
'ATS_TESTS_TO_RUN="--labels test_absent test_global test_in_diff'
347+
in result.output
348+
)
276349

277350
def test_fallback_to_collected_labels(self, mocker):
278351
mock_runner = mocker.MagicMock()
@@ -367,7 +440,9 @@ def test_fallback_dry_run(self, get_labelanalysis_deps, mocker, use_verbose_opti
367440
"absent_labels": collected_labels,
368441
"present_diff_labels": [],
369442
"global_level_labels": [],
370-
}
443+
},
444+
fake_runner,
445+
None,
371446
)
372447
assert result.exit_code == 0
373448

tests/factory.py

+2
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,8 @@ def get_fallback_value(self, fallback_field: FallbackFieldEnum) -> Optional[str]
7575

7676

7777
class FakeRunner(LabelAnalysisRunnerInterface):
78+
dry_run_runner_options = ["--labels"]
79+
7880
def __init__(self, collect_tests_response: List[str]) -> None:
7981
super().__init__()
8082
self.collect_tests_response = collect_tests_response

0 commit comments

Comments
 (0)