diff --git a/README.rst b/README.rst
index 8b1d150..26c1f82 100755
--- a/README.rst
+++ b/README.rst
@@ -12,9 +12,10 @@ Installation
Install the following:
-- Docker
-- Python 3
-- Numpy
+- `Docker` (tested with *17.09.0-ce*)
+- Python 3.5+ (tested with *3.5.2*)
+- `numpy` (tested with *1.13.3*)
+- `wrk` (tested with *4.0.0*)
Build the docker image containing the servers being tested by running
``./build.sh``.
@@ -22,3 +23,9 @@ Build the docker image containing the servers being tested by running
The benchmarks can then be ran with ``./run_benchmarks``. Use
``./run_benchmarks --help`` for various options, including selective
benchmark running.
+
+To run the http benchmarks and save results to ``./results.html``:
+
+.. code::
+
+ ./run_benchmarks --duration=60 --save-html=results.html http
diff --git a/build.sh b/build.sh
index 8e128c7..34473e3 100755
--- a/build.sh
+++ b/build.sh
@@ -1,3 +1,3 @@
#!/bin/bash
-docker build -t magic/benchmark $(dirname $0)
+docker build --no-cache -t magic/benchmark "$(dirname $0)"
diff --git a/run_benchmarks b/run_benchmarks
index e38641b..1ee1a5b 100755
--- a/run_benchmarks
+++ b/run_benchmarks
@@ -293,11 +293,15 @@ def server_container_exists():
def kill_server():
if server_is_running():
print('Shutting down server...')
- subprocess.check_output(['docker', 'stop', 'magicbench'])
-
- if server_container_exists():
- print('Removing server container...')
- subprocess.check_output(['docker', 'rm', 'magicbench'])
+ subprocess.check_output(['docker', 'stop', '-t10', 'magicbench'])
+ i = 0
+ while server_container_exists():
+ if i == 100:
+ subprocess.check_output(['docker', 'rm', '-f', 'magicbench'])
+ elif i == 110:
+ raise IOError("container still exists a minute after being sigkilled")
+ time.sleep(0.1)
+ i += 1
def format_report(data, target_file):
@@ -438,9 +442,11 @@ def main():
parser = argparse.ArgumentParser()
parser.add_argument('--duration', '-D', default=30, type=int,
help='duration of each benchmark in seconds')
- parser.add_argument('--benchmarks', type=str,
- help='comma-separated list of benchmarks to run ' +
- '(regular expressions are supported)')
+ parser.add_argument('type', type=str,
+ choices={b['name'].split('-', 1)[0] for b in benchmarks},
+ help='type of benchmark to run')
+ parser.add_argument('benchmark_patterns', type=str, nargs='*',
+ help='run benchmarks that match any of these regexes')
parser.add_argument('--concurrency-levels', type=int, default=[10],
nargs='+',
help='a list of concurrency levels to use')
@@ -457,11 +463,15 @@ def main():
if not os.path.exists(_socket):
os.mkdir(_socket)
- if args.benchmarks:
- benchmarks_to_run = [re.compile(b) for b in args.benchmarks.split(',')]
+ if args.benchmark_patterns:
+ patterns = [re.compile(p) for p in args.benchmark_patterns]
else:
- benchmarks_to_run = [re.compile(re.escape(b['name']))
- for b in benchmarks]
+ patterns = [re.compile('')]
+ benchmarks_to_run = []
+ for benchmark in benchmarks:
+ type_, name = benchmark['name'].split('-', 1)
+ if type_ == args.type and any(p.match(name) for p in patterns):
+ benchmarks_to_run.append(benchmark)
benchmarks_data = []
@@ -470,7 +480,7 @@ def main():
for concurrency in sorted(args.concurrency_levels):
for msgsize in sorted(args.payload_size_levels):
variations.append({
- 'title': '{}kb messages, concurrency {}'.format(
+ 'title': '{}KiB messages, concurrency {}'.format(
round(msgsize / 1024, 1), concurrency
),
'concurrency': concurrency,
@@ -485,10 +495,7 @@ def main():
warmup = ['--msize=1024', '--duration=10',
'--concurrency={}'.format(warmup_concurrency)]
- for benchmark in benchmarks:
- if not any(b.match(benchmark['name']) for b in benchmarks_to_run):
- continue
-
+ for benchmark in benchmarks_to_run:
print(benchmark['title'])
print('=' * len(benchmark['title']))
print()
diff --git a/servers/requirements.txt b/servers/requirements.txt
index 539b598..c3fbd28 100644
--- a/servers/requirements.txt
+++ b/servers/requirements.txt
@@ -4,4 +4,4 @@ gevent==1.1.1
tornado==4.3
Twisted==16.1.1
httptools==0.0.9
-uvloop==0.4.28
+uvloop==0.8.1