Skip to content

Commit 7c47af1

Browse files
committed
Fix tests
1 parent 0e3dbc1 commit 7c47af1

File tree

4 files changed

+29
-7
lines changed

4 files changed

+29
-7
lines changed

Diff for: pyperf/__main__.py

+4-1
Original file line numberDiff line numberDiff line change
@@ -491,10 +491,13 @@ def display_benchmarks(args, show_metadata=False, hist=False, stats=False,
491491
empty_line(output)
492492
output.extend(lines)
493493

494+
contains_warning = False
494495
for line in output:
496+
if line.startswith("WARNING:"):
497+
contains_warning = True
495498
print(line)
496499

497-
if not output and only_checks:
500+
if not contains_warning and only_checks:
498501
if len(data) == 1:
499502
print("The benchmark seems to be stable")
500503
else:

Diff for: pyperf/_bench.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -437,7 +437,11 @@ def required_nsamples(self):
437437
# Get the means of the values per run
438438
values = []
439439
for run in self._runs:
440-
values.append(statistics.mean(run.values))
440+
if len(run.values):
441+
values.append(statistics.mean(run.values))
442+
443+
if len(values) < 2:
444+
return None
441445

442446
total = math.fsum(values)
443447
mean = total / len(values)

Diff for: pyperf/_cli.py

+8-2
Original file line numberDiff line numberDiff line change
@@ -425,7 +425,10 @@ def format_checks(bench, lines=None):
425425
% (bench.format_value(stdev), percent, bench.format_value(mean)))
426426
else:
427427
# display a warning if the number of samples isn't enough to get a stable result
428-
if required_nsamples > len(bench._runs):
428+
if (
429+
required_nsamples is not None and
430+
required_nsamples > len(bench._runs)
431+
):
429432
warn("Not enough samples to get a stable result (95% certainly of less than 1% variation)")
430433

431434
# Minimum and maximum, detect obvious outliers
@@ -463,7 +466,10 @@ def format_checks(bench, lines=None):
463466
lines.append("Use pyperf stats, pyperf dump and pyperf hist to analyze results.")
464467
lines.append("Use --quiet option to hide these warnings.")
465468

466-
if required_nsamples < len(bench._runs) * 0.75:
469+
if (
470+
required_nsamples is not None and
471+
required_nsamples < len(bench._runs) * 0.75
472+
):
467473
lines.append("Benchmark was run more times than necessary to get a stable result.")
468474
lines.append(
469475
"Consider passing processes=%d to the Runner constructor to save time." %

Diff for: pyperf/tests/test_perf_cli.py

+12-3
Original file line numberDiff line numberDiff line change
@@ -478,11 +478,16 @@ def test_hist(self):
478478
22.8 ms: 3 ##############
479479
22.9 ms: 4 ###################
480480
22.9 ms: 4 ###################
481+
Benchmark was run more times than necessary to get a stable result.
482+
Consider passing processes=7 to the Runner constructor to save time.
481483
""")
482484
self.check_command(expected, 'hist', TELCO, env=env)
483485

484486
def test_show(self):
485487
expected = ("""
488+
Benchmark was run more times than necessary to get a stable result.
489+
Consider passing processes=7 to the Runner constructor to save time.
490+
486491
Mean +- std dev: 22.5 ms +- 0.2 ms
487492
""")
488493
self.check_command(expected, 'show', TELCO)
@@ -518,6 +523,8 @@ def test_stats(self):
518523
100th percentile: 22.9 ms (+2% of the mean) -- maximum
519524
520525
Number of outlier (out of 22.0 ms..23.0 ms): 0
526+
Benchmark was run more times than necessary to get a stable result.
527+
Consider passing processes=7 to the Runner constructor to save time.
521528
""")
522529
self.check_command(expected, 'stats', TELCO)
523530

@@ -628,8 +635,10 @@ def test_slowest(self):
628635

629636
def test_check_stable(self):
630637
stdout = self.run_command('check', TELCO)
631-
self.assertEqual(stdout.rstrip(),
632-
'The benchmark seems to be stable')
638+
self.assertTrue(
639+
'The benchmark seems to be stable' in
640+
stdout.rstrip()
641+
)
633642

634643
def test_command(self):
635644
command = [sys.executable, '-c', 'pass']
@@ -689,7 +698,7 @@ def _check_track_memory(self, track_option):
689698
'[1,2]*1000',
690699
'-o', tmp_name)
691700
bench = pyperf.Benchmark.load(tmp_name)
692-
701+
693702
self._check_track_memory_bench(bench, loops=5)
694703

695704
def test_track_memory(self):

0 commit comments

Comments
 (0)