Skip to content

Commit 5b0a8f3

Browse files
authored
Merge pull request #786 from qwhelan/timeout_bisection
Allow asv find to perform a bisection even if benchmark times out
2 parents fe0d639 + c88fe54 commit 5b0a8f3

File tree

7 files changed

+89
-31
lines changed

7 files changed

+89
-31
lines changed

CHANGES.rst

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,9 @@ API Changes
1313

1414
Bug Fixes
1515
^^^^^^^^^
16+
- When an ``asv find`` step fails due to timeout, assume runtime equal to
17+
timeout to allow bisection to proceed (#768)
18+
1619

1720
Other Changes and Additions
1821
^^^^^^^^^^^^^^^^^^^^^^^^^^^

asv/commands/find.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,7 @@ def run(cls, conf, range_spec, bench, invert=False, show_stderr=False, parallel=
111111
return 1
112112

113113
benchmark_name, = benchmarks.keys()
114+
benchmark_type = benchmarks[benchmark_name]["type"]
114115

115116
steps = int(math.log(len(commit_hashes)) / math.log(2))
116117

@@ -144,6 +145,15 @@ def do_benchmark(i):
144145

145146
results[i] = result
146147

148+
# If we failed due to timeout in a timing benchmark, set
149+
# runtime as the timeout to prevent falling back to linear
150+
# search
151+
errcode = res.errcode[benchmark_name]
152+
if errcode == util.TIMEOUT_RETCODE and benchmark_type == "time":
153+
timeout_limit = benchmarks[benchmark_name]['timeout']
154+
results[i] = [r if r is not None else timeout_limit
155+
for r in results[i]]
156+
147157
return results[i]
148158

149159
def non_null_results(*results):

test/benchmark/params_examples.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,17 @@ def track_find_test(n):
9494
track_find_test.params = [1, 2]
9595

9696

97+
def time_find_test_timeout():
98+
import asv_test_repo, time
99+
if asv_test_repo.dummy_value[1] < 0:
100+
time.sleep(100)
101+
102+
time_find_test_timeout.timeout = 1.0
103+
time_find_test_timeout.repeat = 1
104+
time_find_test_timeout.number = 1
105+
time_find_test_timeout.warmup_time = 0
106+
107+
97108
def track_param_selection(a, b):
98109
return a + b
99110

test/test_benchmarks.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ def test_discover_benchmarks(benchmarks_fixture):
7373
b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash],
7474
regex='example')
7575
conf.branches = old_branches
76-
assert len(b) == 35
76+
assert len(b) == 36
7777

7878
b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash],
7979
regex='time_example_benchmark_1')
@@ -106,7 +106,7 @@ def test_discover_benchmarks(benchmarks_fixture):
106106
assert b._benchmark_selection['params_examples.track_param_selection'] == [0, 1, 2, 3]
107107

108108
b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash])
109-
assert len(b) == 49
109+
assert len(b) == 50
110110

111111
assert 'named.OtherSuite.track_some_func' in b
112112

test/test_find.py

Lines changed: 39 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -12,21 +12,27 @@
1212
from asv.util import check_output, which
1313

1414
from . import tools
15-
from .tools import dummy_packages
16-
from .test_workflow import basic_conf
15+
from .test_workflow import generate_basic_conf
1716

1817

1918
WIN = (os.name == 'nt')
2019

2120

22-
def test_find(capfd, basic_conf):
23-
tmpdir, local, conf, machine_file = basic_conf
21+
def test_find(capfd, tmpdir):
22+
values = [
23+
(None, None),
24+
(1, 1),
25+
(3, 1),
26+
(None, 1),
27+
(6, None),
28+
(5, 1),
29+
(6, 1),
30+
(6, 1),
31+
(6, 6),
32+
(6, 6),
33+
]
2434

25-
if WIN and os.path.basename(sys.argv[0]).lower().startswith('py.test'):
26-
# Multiprocessing in spawn mode can result to problems with py.test
27-
# Find.run calls Setup.run in parallel mode by default
28-
pytest.skip("Multiprocessing spawn mode on Windows not safe to run "
29-
"from py.test runner.")
35+
tmpdir, local, conf, machine_file = generate_basic_conf(tmpdir, values=values, dummy_packages=False)
3036

3137
# Test find at least runs
3238
tools.run_asv_with_conf(conf, 'find', "master~5..master", "params_examples.track_find_test",
@@ -39,3 +45,27 @@ def test_find(capfd, basic_conf):
3945
[which('git'), 'rev-parse', 'master^'], cwd=conf.repo)
4046

4147
assert "Greatest regression found: {0}".format(regression_hash[:8]) in output
48+
49+
50+
@pytest.mark.flaky(reruns=1, reruns_delay=5) # depends on a timeout
51+
def test_find_timeout(capfd, tmpdir):
52+
values = [
53+
(1, 0),
54+
(1, 0),
55+
(1, -1)
56+
]
57+
58+
tmpdir, local, conf, machine_file = generate_basic_conf(tmpdir, values=values, dummy_packages=False)
59+
60+
# Test find at least runs
61+
tools.run_asv_with_conf(conf, 'find', "-e", "master", "params_examples.time_find_test_timeout",
62+
_machine_file=machine_file)
63+
64+
# Check it found the first commit after the initially tested one
65+
output, err = capfd.readouterr()
66+
67+
regression_hash = check_output(
68+
[which('git'), 'rev-parse', 'master'], cwd=conf.repo)
69+
70+
assert "Greatest regression found: {0}".format(regression_hash[:8]) in output
71+
assert "asv: benchmark timed out (timeout 1.0s)" in output

test/test_run.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
from __future__ import (absolute_import, division, print_function,
55
unicode_literals)
66

7+
import sys
78
import os
89
from os.path import join
910

@@ -20,7 +21,7 @@
2021
from asv.commands.run import Run
2122

2223
from . import tools
23-
from .tools import dummy_packages
24+
from .tools import dummy_packages, WIN
2425
from .test_workflow import basic_conf, generate_basic_conf
2526

2627

@@ -323,6 +324,12 @@ def check_env_matrix(env_build, env_nobuild):
323324
def test_parallel(basic_conf, dummy_packages):
324325
tmpdir, local, conf, machine_file = basic_conf
325326

327+
if WIN and os.path.basename(sys.argv[0]).lower().startswith('py.test'):
328+
# Multiprocessing in spawn mode can result to problems with py.test
329+
# Find.run calls Setup.run in parallel mode by default
330+
pytest.skip("Multiprocessing spawn mode on Windows not safe to run "
331+
"from py.test runner.")
332+
326333
conf.matrix = {
327334
"req": dict(conf.matrix),
328335
"env": {"SOME_TEST_VAR": ["1", "2"]},

test/test_workflow.py

Lines changed: 16 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -25,21 +25,13 @@
2525
from .tools import dummy_packages, get_default_environment_type
2626

2727

28-
dummy_values = [
29-
(None, None),
30-
(1, 1),
31-
(3, 1),
32-
(None, 1),
33-
(6, None),
34-
(5, 1),
35-
(6, 1),
28+
dummy_values = (
3629
(6, 1),
3730
(6, 6),
3831
(6, 6),
39-
]
40-
32+
)
4133

42-
def generate_basic_conf(tmpdir, repo_subdir=''):
34+
def generate_basic_conf(tmpdir, repo_subdir='', values=dummy_values, dummy_packages=True):
4335
tmpdir = six.text_type(tmpdir)
4436
local = abspath(dirname(__file__))
4537
os.chdir(tmpdir)
@@ -54,9 +46,17 @@ def generate_basic_conf(tmpdir, repo_subdir=''):
5446
shutil.copyfile(join(local, 'asv-machine.json'),
5547
machine_file)
5648

57-
repo_path = tools.generate_test_repo(tmpdir, dummy_values,
49+
repo_path = tools.generate_test_repo(tmpdir, values,
5850
subdir=repo_subdir).path
5951

52+
if dummy_packages:
53+
matrix = {
54+
"asv_dummy_test_package_1": [""],
55+
"asv_dummy_test_package_2": tools.DUMMY2_VERSIONS,
56+
}
57+
else:
58+
matrix = {}
59+
6060
conf_dict = {
6161
'env_dir': 'env',
6262
'benchmark_dir': 'benchmark',
@@ -65,10 +65,7 @@ def generate_basic_conf(tmpdir, repo_subdir=''):
6565
'repo': relpath(repo_path),
6666
'dvcs': 'git',
6767
'project': 'asv',
68-
'matrix': {
69-
"asv_dummy_test_package_1": [""],
70-
"asv_dummy_test_package_2": tools.DUMMY2_VERSIONS,
71-
},
68+
'matrix': matrix,
7269
}
7370
if repo_subdir:
7471
conf_dict['repo_subdir'] = repo_subdir
@@ -96,7 +93,7 @@ def test_run_publish(capfd, basic_conf):
9693
}
9794

9895
# Tests a typical complete run/publish workflow
99-
tools.run_asv_with_conf(conf, 'run', "master~5..master", '--steps=2',
96+
tools.run_asv_with_conf(conf, 'run', "master", '--steps=2',
10097
'--quick', '--show-stderr', '--profile',
10198
'-a', 'warmup_time=0',
10299
'--durations=5',
@@ -137,12 +134,12 @@ def test_run_publish(capfd, basic_conf):
137134

138135
# Check that the skip options work
139136
capfd.readouterr()
140-
tools.run_asv_with_conf(conf, 'run', "master~5..master", '--steps=2',
137+
tools.run_asv_with_conf(conf, 'run', "master", '--steps=2',
141138
'--quick', '--skip-existing-successful',
142139
'--bench=time_secondary.track_value',
143140
'--skip-existing-failed',
144141
_machine_file=join(tmpdir, 'asv-machine.json'))
145-
tools.run_asv_with_conf(conf, 'run', "master~5..master", '--steps=2',
142+
tools.run_asv_with_conf(conf, 'run', "master", '--steps=2',
146143
'--bench=time_secondary.track_value',
147144
'--quick', '--skip-existing-commits',
148145
_machine_file=join(tmpdir, 'asv-machine.json'))

0 commit comments

Comments
 (0)