|
1 | 1 | import argparse |
2 | 2 | import subprocess |
3 | 3 | import os |
| 4 | +import shutil |
4 | 5 | import sys |
| 6 | +import json |
| 7 | +import csv |
5 | 8 | from pathlib import Path |
6 | 9 | from jinja2 import Environment, FileSystemLoader |
7 | 10 | from logikbench import * |
|
23 | 26 |
|
24 | 27 | """, formatter_class=argparse.RawDescriptionHelpFormatter) |
25 | 28 |
|
26 | | - parser.add_argument("-group", |
| 29 | + parser.add_argument("-group","-g", |
27 | 30 | nargs='+', |
28 | 31 | choices=['basic', 'memory', 'arithmetic', 'epfl', 'block'], |
29 | 32 | required=True, |
30 | 33 | help="Benchmark group") |
31 | | - parser.add_argument("-name", |
| 34 | + parser.add_argument("-name","-n", |
32 | 35 | nargs='+', |
33 | 36 | help="Benchmark name") |
34 | 37 | parser.add_argument("-tool", |
|
38 | 41 | parser.add_argument("-target", |
39 | 42 | required=True, |
40 | 43 | help="Compilation target") |
| 44 | + parser.add_argument('-clean','-c', |
| 45 | + action='store_true', |
| 46 | + help='Clean up build directory') |
| 47 | + parser.add_argument('-output','-o', |
| 48 | + default="build/results.json", |
| 49 | + help='Output file name') |
41 | 50 |
|
42 | 51 | args = parser.parse_args() |
43 | 52 |
|
|
49 | 58 | env = Environment(loader=FileSystemLoader('.')) |
50 | 59 | template = env.get_template(f'{args.tool}_template.j2') |
51 | 60 |
|
| 61 | + # global analysis |
| 62 | + results = {} |
| 63 | + results["cells"] = {} |
| 64 | + |
52 | 65 | # iterate over all groups |
53 | 66 | for group in args.group: |
54 | 67 | group_path = rootdir / "logikbench" / group |
|
66 | 79 | script = f"{name}.tcl" |
67 | 80 | cmd = ['vivado', '-mode batch', '-source', script] |
68 | 81 |
|
| 82 | + # clean up old run |
| 83 | + if args.clean: # create run dir.clean: |
| 84 | + shutil.rmtree(f"build/{group}/{name}") |
| 85 | + |
69 | 86 | # create run dir |
70 | 87 | os.makedirs(f"build/{group}/{name}", exist_ok=True) |
71 | 88 | os.chdir(f"build/{group}/{name}") |
72 | 89 |
|
73 | | - # get top module (not always equal to module name) |
| 90 | + # instance of benchmark class |
74 | 91 | mod = sys.modules[f"logikbench.{group}.{name}.{name}"] |
75 | | - cls = getattr(mod, name.capitalize()) |
76 | | - d = cls() |
77 | | - topmodule = d.get_topmodule(fileset='rtl') |
| 92 | + bobj = getattr(mod, name.capitalize()) |
| 93 | + b = bobj() |
78 | 94 |
|
79 | | - # write out fileset locally |
| 95 | + # get top module |
| 96 | + topmodule = b.get_topmodule(fileset='rtl') |
| 97 | + |
| 98 | + # write out design fileset |
80 | 99 | cmdfile = f"{name}.f" |
81 | | - d.write_fileset(cmdfile, fileset='rtl') |
| 100 | + b.write_fileset(cmdfile, fileset='rtl') |
82 | 101 |
|
83 | 102 | # create tool script |
84 | 103 | context = { |
|
92 | 111 | f.write(output) |
93 | 112 |
|
94 | 113 | # run benchmark |
95 | | - try: |
96 | | - print(f"Running {name} benchmark ({group}). Logfile: build/{group}/{name}/{name}.log") |
97 | | - with open(f'{name}.log', "w") as log: |
98 | | - result = subprocess.run(cmd, |
99 | | - stdout=log, |
100 | | - stderr=subprocess.STDOUT, |
101 | | - check=True) |
102 | | - |
103 | | - except subprocess.CalledProcessError as e: |
104 | | - print(f"Error...see logfile!!") |
| 114 | + if os.path.exists(f"{name}_stats.json"): |
| 115 | + print(f"Found previous results, skipping {name} benchmark ({group}).") |
| 116 | + else: |
| 117 | + try: |
| 118 | + print(f"Running {name} benchmark ({group}). Logfile: build/{group}/{name}/{name}.log") |
| 119 | + with open(f'{name}.log', "w") as log: |
| 120 | + result = subprocess.run(cmd, |
| 121 | + stdout=log, |
| 122 | + stderr=subprocess.STDOUT, |
| 123 | + check=True) |
| 124 | + |
| 125 | + except subprocess.CalledProcessError as e: |
| 126 | + print(f"Error...see logfile!!") |
| 127 | + |
| 128 | + # collect results |
| 129 | + if args.tool == 'yosys': |
| 130 | + with open(f"{name}_stats.json") as f: |
| 131 | + data = json.load(f) |
| 132 | + results["cells"][name] = data["design"]["num_cells"] |
105 | 133 |
|
106 | 134 | # go back home |
107 | 135 | os.chdir(scriptdir) |
| 136 | + |
| 137 | + |
| 138 | + # writing results to file |
| 139 | + _, ext = os.path.splitext(args.output) |
| 140 | + if ext == ".json": |
| 141 | + with open(args.output, "w") as f: |
| 142 | + json.dump(results, f, indent=2) |
| 143 | + elif ext == ".csv": |
| 144 | + all_rows = set() |
| 145 | + for col in results.values(): |
| 146 | + all_rows.update(col.keys()) |
| 147 | + all_rows = sorted(all_rows) |
| 148 | + columns = sorted(results.keys()) |
| 149 | + with open(args.output, "w", newline="") as f: |
| 150 | + writer = csv.writer(f) |
| 151 | + # Write header |
| 152 | + writer.writerow([""] + columns) |
| 153 | + # Write each row |
| 154 | + for row_key in all_rows: |
| 155 | + row = [row_key] |
| 156 | + for col_key in columns: |
| 157 | + row.append(results.get(col_key, {}).get(row_key, "")) |
| 158 | + writer.writerow(row) |
0 commit comments