|
4 | 4 | import shutil |
5 | 5 | import subprocess |
6 | 6 | import re |
| 7 | +import argparse |
7 | 8 | from datetime import datetime |
8 | 9 |
|
9 | | -I=1 |
10 | | -TOKENSDK_ROOT = "../../" |
| 10 | +parser = argparse.ArgumentParser(description="run_benchmark.py script") |
| 11 | +parser.add_argument( |
| 12 | + "--count", # use --count from the command line |
| 13 | + type=int, # expect an integer |
| 14 | + default=10, # default if not provided |
| 15 | + help="Number of repetitions (default: 10)" |
| 16 | +) |
| 17 | +parser.add_argument( |
| 18 | + "--timeout", # use --timeout from the command line |
| 19 | + type=str, # expect an integer |
| 20 | + default="0", # default if not provided |
| 21 | + help="timeout for the run (default: 0, i.e. not timeout)" |
| 22 | +) |
| 23 | +parser.add_argument( |
| 24 | + "--benchName", # use --benchName from the command line |
| 25 | + type=str, # expect a string |
| 26 | + default="", # default if not provided |
| 27 | + help="benchmark name to run (default is to run all benchmarks)" |
| 28 | +) |
| 29 | + |
| 30 | +args = parser.parse_args() |
| 31 | +count = args.count |
| 32 | +timeout = args.timeout |
| 33 | +benchName = args.benchName |
| 34 | + |
| 35 | +TOKENSDK_ROOT = os.environ.get("TOKENSDK_ROOT", "../../") |
11 | 36 | output_folder_path = "" |
12 | 37 | v1_benchmarks_folder = os.path.join(TOKENSDK_ROOT, "token/core/zkatdlog/nogh/v1") |
13 | 38 | transfer_benchmarks_folder = os.path.join(TOKENSDK_ROOT, "token/core/zkatdlog/nogh/v1/transfer") |
14 | 39 | issuer_benchmarks_folder = os.path.join(TOKENSDK_ROOT, "token/core/zkatdlog/nogh/v1/issue") |
15 | 40 | validator_benchmarks_folder = os.path.join(TOKENSDK_ROOT, "token/core/zkatdlog/nogh/v1/validator") |
16 | 41 |
|
| 42 | +I=1 |
| 43 | + |
17 | 44 | def run_and_parse_non_parallel_metrics(benchName, params, folder=transfer_benchmarks_folder) -> dict: |
18 | 45 | global I |
19 | 46 | global output_folder_path |
| 47 | + global count, timeout |
20 | 48 |
|
21 | 49 | if folder == "": |
22 | 50 | folder = transfer_benchmarks_folder |
23 | 51 |
|
24 | | - cmd = f"go test {folder} -run='^$' -bench={benchName} -v -benchmem -count=10 -cpu=1 -timeout 0 {params} | tee bench.txt; benchstat bench.txt" |
| 52 | + cmd = f"go test {folder} -run='^$' -bench={benchName} -v -benchmem -count={count} -cpu=1 -timeout {timeout} {params} | tee bench.txt; benchstat bench.txt" |
25 | 53 | print(f"{I} Running: {cmd}") |
26 | 54 | I = I+1 |
27 | 55 | result = subprocess.run( |
@@ -120,9 +148,10 @@ def run_and_parse_parallel_metrics(benchName, params, folder=transfer_benchmarks |
120 | 148 | folder = transfer_benchmarks_folder |
121 | 149 |
|
122 | 150 | global I |
| 151 | + global timeout |
123 | 152 | global output_folder_path |
124 | 153 |
|
125 | | - cmd = f"go test {folder} -test.run={benchName} -test.v -test.timeout 0 -bits='32' -num_inputs='2' -num_outputs='2' -workers='NumCPU' -duration='10s' -setup_samples=128 {params}" |
| 154 | + cmd = f"go test {folder} -test.run={benchName} -test.v -test.timeout {timeout} -bits='32' -num_inputs='2' -num_outputs='2' -workers='NumCPU' -duration='10s' -setup_samples=128 {params}" |
126 | 155 | print(f"{I} Running: {cmd}") |
127 | 156 | I = I+1 |
128 | 157 |
|
@@ -228,17 +257,21 @@ def append_dict_as_row(filename: str, data: dict): |
228 | 257 | print("\n*******************************************************") |
229 | 258 | print("Running non-parallel tests") |
230 | 259 |
|
231 | | -for test, params, benchType in non_parallel_tests: |
232 | | - results.update(run_and_parse_non_parallel_metrics(test, params, benchType)) |
| 260 | +for testName, params, benchType in non_parallel_tests: |
| 261 | + if (benchName == "") or (benchName == testName): |
| 262 | + results.update(run_and_parse_non_parallel_metrics(testName, params, benchType)) |
233 | 263 |
|
234 | 264 | print("\n*******************************************************") |
235 | 265 | print("Running parallel tests") |
236 | | -for test, params, folder in parallel_tests: |
237 | | - results.update(run_and_parse_parallel_metrics(test, params, folder)) |
| 266 | +for testName, params, folder in parallel_tests: |
| 267 | + if (benchName == "") or (benchName == testName): |
| 268 | + results.update(run_and_parse_parallel_metrics(testName, params, folder)) |
238 | 269 |
|
239 | 270 | # add new row to benchmark_results.csv and copy it to the output folder |
240 | | -append_dict_as_row("benchmark_results.csv", results) |
241 | | -src = os.path.join(".", "benchmark_results.csv") |
242 | | -dst = os.path.join(output_folder_path, "benchmark_results.csv") |
243 | | -if os.path.exists(src) and not os.path.exists(dst): |
244 | | - shutil.copy(src, dst) |
| 271 | +# but not if we just run a single bench as a test |
| 272 | +if benchName == "": # we ran all the benchmarks |
| 273 | + append_dict_as_row("benchmark_results.csv", results) |
| 274 | + src = os.path.join(".", "benchmark_results.csv") |
| 275 | + dst = os.path.join(output_folder_path, "benchmark_results.csv") |
| 276 | + if os.path.exists(src) and not os.path.exists(dst): |
| 277 | + shutil.copy(src, dst) |
0 commit comments