Skip to content

Commit 34890d7

Browse files
Initial import of BenchExec from CPAchecker repository
(using revision svn:15678 / git:702d580bd8)
1 parent d76c882 commit 34890d7

40 files changed

+8302
-0
lines changed

benchexec.py

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
#!/usr/bin/env python
2+
3+
"""
4+
CPAchecker is a tool for configurable software verification.
5+
This file is part of CPAchecker.
6+
7+
Copyright (C) 2007-2014 Dirk Beyer
8+
All rights reserved.
9+
10+
Licensed under the Apache License, Version 2.0 (the "License");
11+
you may not use this file except in compliance with the License.
12+
You may obtain a copy of the License at
13+
14+
http://www.apache.org/licenses/LICENSE-2.0
15+
16+
Unless required by applicable law or agreed to in writing, software
17+
distributed under the License is distributed on an "AS IS" BASIS,
18+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19+
See the License for the specific language governing permissions and
20+
limitations under the License.
21+
22+
23+
CPAchecker web page:
24+
http://cpachecker.sosy-lab.org
25+
"""
26+
27+
# prepare for Python 3
28+
from __future__ import absolute_import, division, print_function, unicode_literals
29+
30+
import sys
31+
sys.dont_write_bytecode = True # prevent creation of .pyc files
32+
33+
"""
34+
Main script of BenchExec for executing a whole benchmark (suite).
35+
36+
This script can be called from the command line.
37+
For integrating from within Python instantiate the benchexec.BenchExec class
38+
and either call "instance.start()" or "benchexec.main(instance)".
39+
"""
40+
41+
import benchexec
42+
43+
benchexec.main(benchexec.BenchExec())

benchexec/__init__.py

Lines changed: 298 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,298 @@
1+
"""
2+
CPAchecker is a tool for configurable software verification.
3+
This file is part of CPAchecker.
4+
5+
Copyright (C) 2007-2014 Dirk Beyer
6+
All rights reserved.
7+
8+
Licensed under the Apache License, Version 2.0 (the "License");
9+
you may not use this file except in compliance with the License.
10+
You may obtain a copy of the License at
11+
12+
http://www.apache.org/licenses/LICENSE-2.0
13+
14+
Unless required by applicable law or agreed to in writing, software
15+
distributed under the License is distributed on an "AS IS" BASIS,
16+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17+
See the License for the specific language governing permissions and
18+
limitations under the License.
19+
20+
21+
CPAchecker web page:
22+
http://cpachecker.sosy-lab.org
23+
"""
24+
25+
# prepare for Python 3
26+
from __future__ import absolute_import, division, print_function, unicode_literals
27+
28+
import logging
29+
import argparse
30+
import os
31+
import signal
32+
import sys
33+
import time
34+
35+
from .model import Benchmark
36+
from . import util as util
37+
from .outputhandler import OutputHandler
38+
39+
40+
"""
41+
Main module of BenchExec for executing a whole benchmark (suite).
42+
To use it, instantiate the benchexec.BenchExec class
43+
and either call "instance.start()" or "benchexec.main(instance)".
44+
45+
Naming conventions used within BenchExec:
46+
47+
TOOL: a (verification) tool that should be executed
48+
EXECUTABLE: the executable file that should be called for running a TOOL
49+
SOURCEFILE: one input file for the TOOL
50+
RUN: one execution of a TOOL on one SOURCEFILE
51+
RUNSET: a set of RUNs of one TOOL with at most one RUN per SOURCEFILE
52+
RUNDEFINITION: a template for the creation of a RUNSET with RUNS from one or more SOURCEFILESETs
53+
BENCHMARK: a list of RUNDEFINITIONs and SOURCEFILESETs for one TOOL
54+
OPTION: a user-specified option to add to the command-line of the TOOL when it its run
55+
CONFIG: the configuration of this script consisting of the command-line arguments given by the user
56+
EXECUTOR: a module for executing a BENCHMARK
57+
58+
"run" always denotes a job to do and is never used as a verb.
59+
"execute" is only used as a verb (this is what is done with a run).
60+
A benchmark or a run set can also be executed, which means to execute all contained runs.
61+
62+
Variables ending with "file" contain filenames.
63+
Variables ending with "tag" contain references to XML tag objects created by the XML parser.
64+
"""
65+
66+
class BenchExec(object):
67+
"""
68+
The main class of BenchExec.
69+
It is designed to be extended by inheritance, and for example
70+
allows configuration options to be added and the executor to be replaced.
71+
By default, it uses an executor that executes all runs on the local machine.
72+
"""
73+
74+
def __init__(self):
75+
self.executor = None
76+
self.stopped_by_interrupt = False
77+
78+
def start(self, argv):
79+
"""
80+
Start BenchExec.
81+
@param argv: command-line options for BenchExec
82+
"""
83+
parser = self.create_argument_parser()
84+
self.config = parser.parse_args(argv[1:])
85+
86+
for arg in self.config.files:
87+
if not os.path.exists(arg) or not os.path.isfile(arg):
88+
parser.error("File {0} does not exist.".format(repr(arg)))
89+
90+
if os.path.isdir(self.config.output_path):
91+
self.config.output_path = os.path.normpath(self.config.output_path) + os.sep
92+
93+
self.setup_logging()
94+
95+
self.executor = self.load_executor()
96+
97+
returnCode = 0
98+
for arg in self.config.files:
99+
if self.stopped_by_interrupt: break
100+
logging.debug("Benchmark {0} is started.".format(repr(arg)))
101+
rc = self.execute_benchmark(arg)
102+
returnCode = returnCode or rc
103+
logging.debug("Benchmark {0} is done.".format(repr(arg)))
104+
105+
logging.debug("I think my job is done. Have a nice day!")
106+
return returnCode
107+
108+
109+
def create_argument_parser(self):
110+
"""
111+
Create a parser for the command-line options.
112+
May be overwritten for adding more configuration options.
113+
@return: an argparse.ArgumentParser instance
114+
"""
115+
parser = argparse.ArgumentParser(description=
116+
"""Run benchmarks with a (verification) tool.
117+
Documented example files for the benchmark definitions
118+
can be found as 'doc/examples/benchmark*.xml'.
119+
Use the table-generator.py script to create nice tables
120+
from the output of this script.""")
121+
122+
parser.add_argument("files", nargs='+', metavar="FILE",
123+
help="XML file with benchmark definition")
124+
parser.add_argument("-d", "--debug",
125+
action="store_true",
126+
help="Enable debug output")
127+
128+
parser.add_argument("-r", "--rundefinition", dest="selected_run_definitions",
129+
action="append",
130+
help="Run only the specified RUN_DEFINITION from the benchmark definition file. "
131+
+ "This option can be specified several times.",
132+
metavar="RUN_DEFINITION")
133+
134+
parser.add_argument("-s", "--sourcefiles", dest="selected_sourcefile_sets",
135+
action="append",
136+
help="Run only the files from the sourcefiles tag with SOURCE as name. "
137+
+ "This option can be specified several times.",
138+
metavar="SOURCES")
139+
140+
parser.add_argument("-n", "--name",
141+
dest="name", default=None,
142+
help="Set name of benchmark execution to NAME",
143+
metavar="NAME")
144+
145+
parser.add_argument("-o", "--outputpath",
146+
dest="output_path", type=str,
147+
default="./test/results/",
148+
help="Output prefix for the generated results. "
149+
+ "If the path is a folder files are put into it,"
150+
+ "otherwise it is used as a prefix for the resulting files.")
151+
152+
parser.add_argument("-T", "--timelimit",
153+
dest="timelimit", default=None,
154+
help="Time limit in seconds for each run (-1 to disable)",
155+
metavar="SECONDS")
156+
157+
parser.add_argument("-M", "--memorylimit",
158+
dest="memorylimit", default=None,
159+
help="Memory limit in MB (-1 to disable)",
160+
metavar="MB")
161+
162+
parser.add_argument("-N", "--numOfThreads",
163+
dest="num_of_threads", default=None, type=int,
164+
help="Run n benchmarks in parallel",
165+
metavar="n")
166+
167+
parser.add_argument("-c", "--limitCores", dest="corelimit",
168+
type=int, default=None,
169+
metavar="N",
170+
help="Limit each run of the tool to N CPU cores (-1 to disable).")
171+
172+
parser.add_argument("--maxLogfileSize",
173+
dest="maxLogfileSize", type=int, default=20,
174+
metavar="MB",
175+
help="Shrink logfiles to given size in MB, if they are too big. (-1 to disable, default value: 20 MB).")
176+
177+
parser.add_argument("--commit", dest="commit",
178+
action="store_true",
179+
help="If the output path is a git repository without local changes, "
180+
+ "add and commit the result files.")
181+
182+
parser.add_argument("--message",
183+
dest="commit_message", type=str,
184+
default="Results for benchmark run",
185+
help="Commit message if --commit is used.")
186+
187+
parser.add_argument("--startTime",
188+
dest="start_time",
189+
type=parse_time_arg,
190+
default=None,
191+
metavar="'YYYY-MM-DD hh:mm'",
192+
help='Set the given date and time as the start time of the benchmark.')
193+
194+
return parser
195+
196+
197+
def setup_logging(self):
198+
"""
199+
Configure the logging framework.
200+
"""
201+
if self.config.debug:
202+
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
203+
level=logging.DEBUG)
204+
else:
205+
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
206+
level=logging.INFO)
207+
208+
209+
def load_executor(self):
210+
"""
211+
Create and return the executor module that should be used for benchmarking.
212+
May be overridden for replacing the executor,
213+
for example with an implementation that delegates to some cloud service.
214+
"""
215+
from . import localexecution as executor
216+
return executor
217+
218+
219+
def execute_benchmark(self, benchmark_file):
220+
"""
221+
Execute a single benchmark as defined in a file.
222+
If called directly, ensure that config and executor attributes are set up.
223+
@param benchmark_file: the name of a benchmark-definition XML file
224+
@return: a result value from the executor module
225+
"""
226+
benchmark = Benchmark(benchmark_file, self.config,
227+
self.config.start_time or time.localtime())
228+
self.check_existing_results(benchmark)
229+
230+
self.executor.init(self.config, benchmark)
231+
output_handler = OutputHandler(benchmark, self.executor.get_system_info())
232+
233+
logging.debug("I'm benchmarking {0} consisting of {1} run sets.".format(
234+
repr(benchmark_file), len(benchmark.run_sets)))
235+
236+
result = self.executor.execute_benchmark(benchmark, output_handler)
237+
238+
if self.config.commit and not self.stopped_by_interrupt:
239+
util.add_files_to_git_repository(self.config.output_path, output_handler.all_created_files,
240+
self.config.commit_message+'\n\n'+output_handler.description)
241+
return result
242+
243+
244+
def check_existing_results(self, benchmark):
245+
"""
246+
Check and abort if the target directory for the benchmark results
247+
already exists in order to avoid overwriting results.
248+
"""
249+
if os.path.exists(benchmark.log_folder):
250+
# we refuse to overwrite existing results
251+
sys.exit('Output directory {0} already exists, will not overwrite existing results.'.format(benchmark.log_folder))
252+
253+
254+
def stop(self):
255+
"""
256+
Stop the execution of a benchmark.
257+
This instance cannot be used anymore afterwards.
258+
Timely termination is not guaranteed, and this method may return before
259+
everything is terminated.
260+
"""
261+
self.stopped_by_interrupt = True
262+
263+
if self.executor:
264+
self.executor.stop()
265+
266+
267+
def parse_time_arg(s):
268+
"""
269+
Parse a time stamp in the "year-month-day hour-minute" format.
270+
"""
271+
try:
272+
return time.strptime(s, "%Y-%m-%d %H:%M")
273+
except ValueError as e:
274+
raise argparse.ArgumentTypeError(e)
275+
276+
277+
def signal_handler_ignore(signum, frame):
278+
"""
279+
Log and ignore all signals.
280+
"""
281+
logging.warn('Received signal %d, ignoring it' % signum)
282+
283+
def main(benchexec, argv=None):
284+
"""
285+
The main method of BenchExec for use in a command-line script.
286+
In addition to calling benchexec.start(argv),
287+
it also handles signals and keyboard interrupts.
288+
It does not return but calls sys.exit().
289+
@param benchexec: An instance of BenchExec for executing benchmarks.
290+
@param argv: optionally the list of command-line options to use
291+
"""
292+
# ignore SIGTERM
293+
signal.signal(signal.SIGTERM, signal_handler_ignore)
294+
try:
295+
sys.exit(benchexec.start(argv or sys.argv))
296+
except KeyboardInterrupt: # this block is reached, when interrupt is thrown before or after a run set execution
297+
benchexec.stop()
298+
util.printOut("\n\nScript was interrupted by user, some runs may not be done.")

0 commit comments

Comments
 (0)