Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,13 @@ include(CTest)
# Enable CTest
enable_testing()

# Enables LLVM builtin functions for memory operations. We do not check these for memory safety.
# To check for memory safety, set this option to OFF. Then our wrappers will be used instead that
# check for memory safety and call builtin functions directly.
option(SEA_LLVM_MEM_BUILTINS "Use LLVM builtins memcpy/memmove without memory safety checks" OFF)

option(SEA_ENABLE_CRAB "Enable Seahorn using Clam/Crab" OFF)

if (CMAKE_SOURCE_DIR STREQUAL CMAKE_BINARY_DIR )
message (FATAL_ERROR
"In-source builds are not allowed. Please clean your source tree and try again.")
Expand Down
92 changes: 92 additions & 0 deletions scripts/get_exper_brunch_stat.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
import re
import csv
import argparse
from collections import defaultdict

BRUNCH_DICT = { # Dict stores csv columns to correspond name on BRUNCH STATS
"job_name": "Test: ",
"result": "Result",
"bmc.circ_sz": "bmc.circ_sz",
"bmc.dag_sz": "bmc.dag_sz",
"opsem.isderef.not.solve": "crab.isderef.not.solve", # for crab
"opsem.isderef.solve": "crab.isderef.solve", # for crab
"pp.isderef.not.solve": "crab.pp.isderef.not.solve",
"pp.isderef.solve": "crab.pp.isderef.solve",
"bmc_time": "BMC",
"bmc_solve_time": "BMC.solve",
"opsem_crab_time": "opsem.crab",
"pp_crab_time": "seapp.crab",
"opsem_assert_time": "opsem.assert",
"opsem_simplify_time": "opsem.simplify",
"seahorn_total_time": "seahorn_total",
"object_domain_use_odi": "count.move_object",
"opsem_crab_solve_isderef": "opsem.time.crab.isderef"
# ADD additional coloumn and corresponded pattern on outputs here
}


def move_dict_items_to_lst(dt):
res = []
for key in BRUNCH_DICT:
if key == "job_name":
continue
if key not in dt:
res.append(0)
else:
res.append(dt[key])
return res


def read_brunchstat_from_log(log_file_name, use_crab=False):
log_file = open(log_file_name, 'r')
line = log_file.readline()
data = defaultdict(list)
row_dict = {}
cur_test = None
while line:
# look for next test
new_test = re.search(BRUNCH_DICT["job_name"], line)
if new_test:
if cur_test:
data[cur_test] = move_dict_items_to_lst(row_dict)
row_dict.clear()
span = new_test.span()
test_name = line[span[1]:]
# remove _unsat_test
cur_test = test_name[:-12]
elif line.startswith("BRUNCH_STAT"):
stat = line.split()
# stat_name = " ".join(stat[1:-1])
stat_num = stat[-1]
for key in BRUNCH_DICT:
if stat[-2] == BRUNCH_DICT[key] or BRUNCH_DICT[key] in stat[-2]:
row_dict[key] = stat_num
line = log_file.readline()
if cur_test:
data[cur_test] = move_dict_items_to_lst(row_dict)
log_file.close()
return data


def write_brunchstat_into_csv(data, out_file):
print(out_file)
with open(out_file, 'w+', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(list(BRUNCH_DICT.keys()))
for k, v in data.items():
writer.writerow([k, *v])


if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='in and out files')
parser.add_argument('logfile', type=str, help=".log file to read from")
parser.add_argument(
'--outfile',
'-o',
type=str,
default="brunch_stat.csv",
help="ouput csv filename")
args = parser.parse_args()
data = read_brunchstat_from_log(args.logfile)
write_brunchstat_into_csv(data, args.outfile)
143 changes: 143 additions & 0 deletions scripts/get_exper_res.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
import xml.etree.ElementTree as ET
import sys
import os
import csv
import glob
import subprocess
import argparse
from get_exper_brunch_stat import read_brunchstat_from_log, write_brunchstat_into_csv

BUILDABSPATH = os.path.abspath('../build/')
DATAABSPATH = os.path.abspath('../') + "/data"
SEAHORN_ROOT = '../../build-seahorn-rgn' # Put your seahorn root dir here
FILE_DICT = {
"": "seahorn.csv",
"--vac": "seahorn(vac).csv",
"--horn-bmc-solver=smt-y2": "seahorn(smt-y2).csv",
"--cex": "seahorn(cex).csv",
"--cex --horn-bmc-solver=smt-y2": "seahorn(cex, smt-y2).csv",
"klee": "klee.csv"}


def extra_to_filename(extra, suffix='.csv', prefix=''):
'''extra: --a=b --c=d to filename: a.b.c.d.csv'''
if (len(extra) == 0):
return f'base{suffix}'
parts = []
for flag in extra:
if flag.startswith('--'):
flag = flag[2:]
parts.extend(flag.split('='))
return f'{prefix}{"_".join(parts)}{suffix}'


def make_build_path(extra, build_path='../build'):
build_path += extra_to_filename(extra, '', '_') if extra else '_base'
print(f'[Build Path] {build_path}')
if not os.path.exists(build_path):
os.mkdir(build_path)
global BUILDABSPATH
BUILDABSPATH = os.path.abspath(build_path)


def get_output_level():
if args.debug:
output_level = sys.stdout
else:
output_level = subprocess.PIPE
return output_level


def make_new_cmake_conf():
use_crab = "ON" if "--crab" in extra else "OFF"
return f'cmake -DSEA_LINK=llvm-link-10 -DCMAKE_C_COMPILER=clang-10\
-DCMAKE_CXX_COMPILER=clang++-10 -DSEA_ENABLE_CRAB={use_crab}\
-DSEAHORN_ROOT={SEAHORN_ROOT} -DTRUSTY_TARGET=x86_64 ../ -GNinja'


def read_data_from_xml(res_data):
xml_file = glob.glob(
"{path}/**/Test.xml".format(path=BUILDABSPATH), recursive=True)[0]
tree = ET.parse(xml_file)
root = tree.getroot()
testlist = root.find('Testing')
for test in testlist.findall('Test'):
bench_name = test.find('Name').text
res = test.get('Status')
for name_measure_tag in test.find('Results').findall(
'NamedMeasurement'):
if name_measure_tag.attrib['name'] == 'Execution Time':
timing = name_measure_tag.find('Value').text
res_data.append([bench_name, timing, res])


def write_data_into_csv(file_name, res_data):
with open(file_name, 'w+', newline='') as csvfile:
# create the csv writer object
csvwriter = csv.writer(csvfile)
csvwriter.writerow(["Name", "Timing", "Result"])
for row in res_data:
csvwriter.writerow(row)


def collect_res_from_ctest(file_name):
res_data = []
read_data_from_xml(res_data)
write_data_into_csv(
"{dir}/{file}".format(dir="../data", file=file_name), res_data)
print("[Results] Please find result csv file at: ../data/%s" % file_name)


def run_ctest_for_seahorn():
print("[SeaHorn] Start making SeaHorn results...")
set_env = ''
if extra and len(extra) > 0:
verify_flags = " ".join(extra)
print(f'Run SeaHorn with extra configs: {verify_flags} ')
set_env = f'env VERIFY_FLAGS=\"{verify_flags}\"'
cmake_conf = make_new_cmake_conf()
command_lst = ["rm -rf *", cmake_conf, "ninja",
f'{set_env} ctest -j{os.cpu_count()} -D ExperimentalTest -R . --timeout {args.timeout}']
make_build_path(extra)
cddir = "cd " + BUILDABSPATH
for strcmd in command_lst:
cddir += " ; " + strcmd
if args.debug:
print(f'[Command] {cddir}')
process = subprocess.Popen(
'/bin/bash',
shell=True,
stdin=subprocess.PIPE,
stdout=get_output_level())
_ = process.communicate(cddir.encode())
collect_res_from_ctest(extra_to_filename(extra))
collect_stat_from_ctest_log(extra_to_filename(extra, suffix='.brunch.csv'),
True if "--crab" in extra else False)


def collect_stat_from_ctest_log(outfile, use_crab):
test_tmpdir = os.path.join(BUILDABSPATH, 'Testing', 'Temporary')
logfiles = [i for i in os.listdir(test_tmpdir)if os.path.isfile(
os.path.join(test_tmpdir, i)) and i.startswith("LastTest_")]
latest_log = logfiles[0]
print("collecting brunch stat from " + logfiles[0])
data = read_brunchstat_from_log(os.path.join(test_tmpdir, latest_log), use_crab)
outpath = os.path.join(DATAABSPATH, outfile)
write_brunchstat_into_csv(data, outpath)


def main():
os.makedirs(DATAABSPATH, exist_ok=True)
os.makedirs(BUILDABSPATH, exist_ok=True)
run_ctest_for_seahorn()


if __name__ == "__main__":
# Args for this script
parser = argparse.ArgumentParser(
description='Present flags to decide which tool will be tested.')
parser.add_argument('--debug', action='store_true', default=False)
parser.add_argument('--timeout', type=int, default=2000,
help='Seconds before timeout for each test')
args, extra = parser.parse_known_args()
main()
Loading