33
33
TestResult ,
34
34
TestSubsystem ,
35
35
TestTimeoutField ,
36
+ _format_test_rerun_command ,
36
37
_format_test_summary ,
37
38
build_runtime_package_dependencies ,
38
39
run_tests ,
52
53
EMPTY_DIGEST ,
53
54
EMPTY_FILE_DIGEST ,
54
55
Digest ,
56
+ FileDigest ,
55
57
MergeDigests ,
56
58
Snapshot ,
57
59
Workspace ,
@@ -111,6 +113,31 @@ def make_process_result_metadata(
111
113
)
112
114
113
115
116
+ def make_test_result (
117
+ addresses : Iterable [Address ],
118
+ exit_code : None | int ,
119
+ stdout_bytes : bytes = b"" ,
120
+ stdout_digest : FileDigest = EMPTY_FILE_DIGEST ,
121
+ stderr_bytes : bytes = b"" ,
122
+ stderr_digest : FileDigest = EMPTY_FILE_DIGEST ,
123
+ coverage_data : CoverageData | None = None ,
124
+ output_setting : ShowOutput = ShowOutput .NONE ,
125
+ result_metadata : None | ProcessResultMetadata = None ,
126
+ ) -> TestResult :
127
+ """Create a TestResult with default values for most fields."""
128
+ return TestResult (
129
+ addresses = tuple (addresses ),
130
+ exit_code = exit_code ,
131
+ stdout_bytes = stdout_bytes ,
132
+ stdout_digest = stdout_digest ,
133
+ stderr_bytes = stderr_bytes ,
134
+ stderr_digest = stderr_digest ,
135
+ coverage_data = coverage_data ,
136
+ output_setting = output_setting ,
137
+ result_metadata = result_metadata ,
138
+ )
139
+
140
+
114
141
class MockMultipleSourcesField (MultipleSourcesField ):
115
142
pass
116
143
@@ -178,13 +205,9 @@ def skipped(_: Iterable[Address]) -> bool:
178
205
@classmethod
179
206
def test_result (cls , field_sets : Iterable [MockTestFieldSet ]) -> TestResult :
180
207
addresses = [field_set .address for field_set in field_sets ]
181
- return TestResult (
208
+ return make_test_result (
209
+ addresses ,
182
210
exit_code = cls .exit_code (addresses ),
183
- stdout_bytes = b"" ,
184
- stdout_digest = EMPTY_FILE_DIGEST ,
185
- stderr_bytes = b"" ,
186
- stderr_digest = EMPTY_FILE_DIGEST ,
187
- addresses = tuple (addresses ),
188
211
coverage_data = MockCoverageData (addresses ),
189
212
output_setting = ShowOutput .ALL ,
190
213
result_metadata = None if cls .skipped (addresses ) else make_process_result_metadata ("ran" ),
@@ -247,6 +270,7 @@ def run_test_rule(
247
270
report_dir : str = TestSubsystem .default_report_path ,
248
271
output : ShowOutput = ShowOutput .ALL ,
249
272
valid_targets : bool = True ,
273
+ show_rerun_command : bool = False ,
250
274
run_id : RunId = RunId (999 ),
251
275
) -> tuple [int , str ]:
252
276
test_subsystem = create_goal_subsystem (
@@ -261,6 +285,7 @@ def run_test_rule(
261
285
extra_env_vars = [],
262
286
shard = "" ,
263
287
batch_size = 1 ,
288
+ show_rerun_command = show_rerun_command ,
264
289
)
265
290
debug_adapter_subsystem = create_subsystem (
266
291
DebugAdapterSubsystem ,
@@ -408,7 +433,39 @@ def test_skipped_target_noops(rule_runner: PythonRuleRunner) -> None:
408
433
assert stderr .strip () == ""
409
434
410
435
411
- def test_summary (rule_runner : PythonRuleRunner ) -> None :
436
+ @pytest .mark .parametrize (
437
+ ("show_rerun_command" , "expected_stderr" ),
438
+ [
439
+ (
440
+ False ,
441
+ # the summary is for humans, so we test it literally, to make sure the formatting is good
442
+ dedent (
443
+ """\
444
+
445
+ ✓ //:good succeeded in 1.00s (memoized).
446
+ ✕ //:bad failed in 1.00s (memoized).
447
+ """
448
+ ),
449
+ ),
450
+ (
451
+ True ,
452
+ dedent (
453
+ """\
454
+
455
+ ✓ //:good succeeded in 1.00s (memoized).
456
+ ✕ //:bad failed in 1.00s (memoized).
457
+
458
+ To rerun the failing tests, use:
459
+
460
+ pants test //:bad
461
+ """
462
+ ),
463
+ ),
464
+ ],
465
+ )
466
+ def test_summary (
467
+ rule_runner : PythonRuleRunner , show_rerun_command : bool , expected_stderr : str
468
+ ) -> None :
412
469
good_address = Address ("" , target_name = "good" )
413
470
bad_address = Address ("" , target_name = "bad" )
414
471
skipped_address = Address ("" , target_name = "skipped" )
@@ -417,15 +474,10 @@ def test_summary(rule_runner: PythonRuleRunner) -> None:
417
474
rule_runner ,
418
475
request_type = ConditionallySucceedsRequest ,
419
476
targets = [make_target (good_address ), make_target (bad_address ), make_target (skipped_address )],
477
+ show_rerun_command = show_rerun_command ,
420
478
)
421
479
assert exit_code == ConditionallySucceedsRequest .exit_code ((bad_address ,))
422
- assert stderr == dedent (
423
- """\
424
-
425
- ✓ //:good succeeded in 1.00s (memoized).
426
- ✕ //:bad failed in 1.00s (memoized).
427
- """
428
- )
480
+ assert stderr == expected_stderr
429
481
430
482
431
483
def _assert_test_summary (
@@ -436,15 +488,11 @@ def _assert_test_summary(
436
488
result_metadata : ProcessResultMetadata | None ,
437
489
) -> None :
438
490
assert expected == _format_test_summary (
439
- TestResult (
491
+ make_test_result (
492
+ [Address (spec_path = "" , target_name = "dummy_address" )],
440
493
exit_code = exit_code ,
441
- stdout_bytes = b"" ,
442
- stderr_bytes = b"" ,
443
- stdout_digest = EMPTY_FILE_DIGEST ,
444
- stderr_digest = EMPTY_FILE_DIGEST ,
445
- addresses = (Address (spec_path = "" , target_name = "dummy_address" ),),
446
- output_setting = ShowOutput .FAILED ,
447
494
result_metadata = result_metadata ,
495
+ output_setting = ShowOutput .FAILED ,
448
496
),
449
497
RunId (run_id ),
450
498
Console (use_colors = False ),
@@ -493,6 +541,64 @@ def test_format_summary_memoized_remote(rule_runner: PythonRuleRunner) -> None:
493
541
)
494
542
495
543
544
+ @pytest .mark .parametrize (
545
+ ("results" , "expected" ),
546
+ [
547
+ pytest .param ([], None , id = "no_results" ),
548
+ pytest .param (
549
+ [make_test_result ([Address ("" , target_name = "t1" )], exit_code = 0 )], None , id = "one_success"
550
+ ),
551
+ pytest .param (
552
+ [make_test_result ([Address ("" , target_name = "t2" )], exit_code = None )],
553
+ None ,
554
+ id = "one_no_run" ,
555
+ ),
556
+ pytest .param (
557
+ [make_test_result ([Address ("" , target_name = "t3" )], exit_code = 1 )],
558
+ "To rerun the failing tests, use:\n \n pants test //:t3" ,
559
+ id = "one_failure" ,
560
+ ),
561
+ pytest .param (
562
+ [
563
+ make_test_result ([Address ("" , target_name = "t1" )], exit_code = 0 ),
564
+ make_test_result ([Address ("" , target_name = "t2" )], exit_code = None ),
565
+ make_test_result ([Address ("" , target_name = "t3" )], exit_code = 1 ),
566
+ ],
567
+ "To rerun the failing tests, use:\n \n pants test //:t3" ,
568
+ id = "one_of_each" ,
569
+ ),
570
+ pytest .param (
571
+ [
572
+ make_test_result ([Address ("path/to" , target_name = "t1" )], exit_code = 1 ),
573
+ make_test_result ([Address ("another/path" , target_name = "t2" )], exit_code = 2 ),
574
+ make_test_result ([Address ("" , target_name = "t3" )], exit_code = 3 ),
575
+ ],
576
+ "To rerun the failing tests, use:\n \n pants test //:t3 another/path:t2 path/to:t1" ,
577
+ id = "multiple_failures" ,
578
+ ),
579
+ pytest .param (
580
+ [
581
+ make_test_result (
582
+ [
583
+ Address (
584
+ "path with spaces" ,
585
+ target_name = "$*" ,
586
+ parameters = dict (key = "value" ),
587
+ generated_name = "gn" ,
588
+ )
589
+ ],
590
+ exit_code = 1 ,
591
+ )
592
+ ],
593
+ "To rerun the failing tests, use:\n \n pants test 'path with spaces:$*#gn@key=value'" ,
594
+ id = "special_characters_require_quoting" ,
595
+ ),
596
+ ],
597
+ )
598
+ def test_format_rerun_command (results : list [TestResult ], expected : None | str ) -> None :
599
+ assert expected == _format_test_rerun_command (results )
600
+
601
+
496
602
def test_debug_target (rule_runner : PythonRuleRunner ) -> None :
497
603
exit_code , _ = run_test_rule (
498
604
rule_runner ,
@@ -597,14 +703,12 @@ def assert_streaming_output(
597
703
expected_message : str ,
598
704
result_metadata : ProcessResultMetadata = make_process_result_metadata ("dummy" ),
599
705
) -> None :
600
- result = TestResult (
706
+ result = make_test_result (
707
+ addresses = (Address ("demo_test" ),),
601
708
exit_code = exit_code ,
602
709
stdout_bytes = stdout .encode (),
603
- stdout_digest = EMPTY_FILE_DIGEST ,
604
710
stderr_bytes = stderr .encode (),
605
- stderr_digest = EMPTY_FILE_DIGEST ,
606
711
output_setting = output_setting ,
607
- addresses = (Address ("demo_test" ),),
608
712
result_metadata = result_metadata ,
609
713
)
610
714
assert result .level () == expected_level
@@ -720,14 +824,11 @@ def assert_timeout_calculated(
720
824
721
825
722
826
def test_non_utf8_output () -> None :
723
- test_result = TestResult (
827
+ test_result = make_test_result (
828
+ [],
724
829
exit_code = 1 , # "test error" so stdout/stderr are output in message
725
830
stdout_bytes = b"\x80 \xBF " , # invalid UTF-8 as required by the test
726
- stdout_digest = EMPTY_FILE_DIGEST , # incorrect but we do not check in this test
727
831
stderr_bytes = b"\x80 \xBF " , # invalid UTF-8 as required by the test
728
- stderr_digest = EMPTY_FILE_DIGEST , # incorrect but we do not check in this test
729
- addresses = (),
730
832
output_setting = ShowOutput .ALL ,
731
- result_metadata = None ,
732
833
)
733
834
assert test_result .message () == "failed (exit code 1).\n ��\n ��\n \n "
0 commit comments